code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
import sys
import yaml
def fetch_table_data(table_path):
data_dict = {}
with open(table_path) as table_to_load:
# load headers
headers = table_to_load.readline().strip('\n').split('\t')
row_id = 0
for line in table_to_load.readlines():
# print(line)
line_data = line.strip('\n').split('\t')
row_dict = {}
for col_num in range(len(headers)):
col_name = headers[col_num]
row_dict[col_name] = line_data[col_num]
data_dict[row_id] = row_dict
row_id += 1
return data_dict
all_data_dict = {}
print('YAML -------------')
studies_table_path = sys.argv[1]
table_data = fetch_table_data(studies_table_path)
all_data_dict['ENA_study'] = table_data
samples_table_path = sys.argv[2]
table_data = fetch_table_data(samples_table_path)
all_data_dict['ENA_sample'] = table_data
experiments_table_path = sys.argv[3]
table_data = fetch_table_data(experiments_table_path)
all_data_dict['ENA_experiment'] = table_data
runs_table_path = sys.argv[4]
table_data = fetch_table_data(runs_table_path)
all_data_dict['ENA_run'] = table_data
# print(all_data_dict)
print(yaml.dump(all_data_dict))
print('YAML -------------')
| gregvonkuster/tools-iuc | tools/ena_upload/dump_yaml.py | Python | mit | 1,252 |
# -*- coding: utf-8 -*-
import unittest
from datetime import datetime
from wechatpy import parse_message
class EventsTestCase(unittest.TestCase):
def test_scan_code_push_event(self):
from wechatpy.events import ScanCodePushEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408090502</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[scancode_push]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<ScanCodeInfo><ScanType><![CDATA[qrcode]]></ScanType>
<ScanResult><![CDATA[1]]></ScanResult>
</ScanCodeInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, ScanCodePushEvent))
self.assertEqual("qrcode", event.scan_type)
self.assertEqual("1", event.scan_result)
def test_scan_code_waitmsg_event(self):
from wechatpy.events import ScanCodeWaitMsgEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408090606</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[scancode_waitmsg]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<ScanCodeInfo><ScanType><![CDATA[qrcode]]></ScanType>
<ScanResult><![CDATA[2]]></ScanResult>
</ScanCodeInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, ScanCodeWaitMsgEvent))
self.assertEqual("qrcode", event.scan_type)
self.assertEqual("2", event.scan_result)
def test_pic_sysphoto_event(self):
from wechatpy.events import PicSysPhotoEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408090651</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[pic_sysphoto]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<SendPicsInfo><Count>1</Count>
<PicList>
<item>
<PicMd5Sum><![CDATA[1b5f7c23b5bf75682a53e7b6d163e185]]></PicMd5Sum>
</item>
</PicList>
</SendPicsInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, PicSysPhotoEvent))
self.assertEqual(1, event.count)
self.assertEqual("1b5f7c23b5bf75682a53e7b6d163e185", event.pictures[0]["PicMd5Sum"])
def test_pic_photo_or_album_event(self):
from wechatpy.events import PicPhotoOrAlbumEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408090816</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[pic_photo_or_album]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<SendPicsInfo><Count>1</Count>
<PicList>
<item>
<PicMd5Sum><![CDATA[5a75aaca956d97be686719218f275c6b]]></PicMd5Sum>
</item>
</PicList>
</SendPicsInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, PicPhotoOrAlbumEvent))
self.assertEqual(1, event.count)
self.assertEqual("5a75aaca956d97be686719218f275c6b", event.pictures[0]["PicMd5Sum"])
def test_pic_wechat_event(self):
from wechatpy.events import PicWeChatEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408090816</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[pic_weixin]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<SendPicsInfo><Count>1</Count>
<PicList>
<item>
<PicMd5Sum><![CDATA[5a75aaca956d97be686719218f275c6b]]></PicMd5Sum>
</item>
</PicList>
</SendPicsInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, PicWeChatEvent))
self.assertEqual(1, event.count)
self.assertEqual("5a75aaca956d97be686719218f275c6b", event.pictures[0]["PicMd5Sum"])
def test_location_select_event(self):
from wechatpy.events import LocationSelectEvent
xml = """<xml>
<ToUserName><![CDATA[gh_e136c6e50636]]></ToUserName>
<FromUserName><![CDATA[oMgHVjngRipVsoxg6TuX3vz6glDg]]></FromUserName>
<CreateTime>1408091189</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[location_select]]></Event>
<EventKey><![CDATA[6]]></EventKey>
<SendLocationInfo><Location_X><![CDATA[23]]></Location_X>
<Location_Y><![CDATA[113]]></Location_Y>
<Scale><![CDATA[15]]></Scale>
<Label><![CDATA[广州市海珠区客村艺苑路 106号]]></Label>
<Poiname><![CDATA[]]></Poiname>
</SendLocationInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, LocationSelectEvent))
self.assertEqual(("23", "113"), event.location)
self.assertEqual("15", event.scale)
self.assertTrue(event.poiname is None)
self.assertEqual("广州市海珠区客村艺苑路 106号", event.label)
def test_merchant_order_event(self):
from wechatpy.events import MerchantOrderEvent
xml = """<xml>
<ToUserName><![CDATA[weixin_media1]]></ToUserName>
<FromUserName><![CDATA[oDF3iYyVlek46AyTBbMRVV8VZVlI]]></FromUserName>
<CreateTime>1398144192</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[merchant_order]]></Event>
<OrderId><![CDATA[test_order_id]]></OrderId>
<OrderStatus>2</OrderStatus>
<ProductId><![CDATA[test_product_id]]></ProductId>
<SkuInfo><![CDATA[10001:1000012;10002:100021]]></SkuInfo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, MerchantOrderEvent))
self.assertEqual("test_order_id", event.order_id)
self.assertEqual(2, event.order_status)
self.assertEqual("test_product_id", event.product_id)
self.assertEqual("10001:1000012;10002:100021", event.sku_info)
def test_kf_create_session_event(self):
from wechatpy.events import KfCreateSessionEvent
xml = """<xml>
<ToUserName><![CDATA[touser]]></ToUserName>
<FromUserName><![CDATA[fromuser]]></FromUserName>
<CreateTime>1399197672</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[kf_create_session]]></Event>
<KfAccount><![CDATA[test1@test]]></KfAccount>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, KfCreateSessionEvent))
self.assertEqual("test1@test", event.account)
def test_kf_close_session_event(self):
from wechatpy.events import KfCloseSessionEvent
xml = """<xml>
<ToUserName><![CDATA[touser]]></ToUserName>
<FromUserName><![CDATA[fromuser]]></FromUserName>
<CreateTime>1399197672</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[kf_close_session]]></Event>
<KfAccount><![CDATA[test1@test]]></KfAccount>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, KfCloseSessionEvent))
self.assertEqual("test1@test", event.account)
def test_kf_switch_session_event(self):
from wechatpy.events import KfSwitchSessionEvent
xml = """<xml>
<ToUserName><![CDATA[touser]]></ToUserName>
<FromUserName><![CDATA[fromuser]]></FromUserName>
<CreateTime>1399197672</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[kf_switch_session]]></Event>
<FromKfAccount><![CDATA[test1@test]]></FromKfAccount>
<ToKfAccount><![CDATA[test2@test]]></ToKfAccount>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, KfSwitchSessionEvent))
self.assertEqual("test1@test", event.from_account)
self.assertEqual("test2@test", event.to_account)
def test_template_send_job_finish_event(self):
from wechatpy.events import TemplateSendJobFinishEvent
xml = """<xml>
<ToUserName><![CDATA[touser]]></ToUserName>
<FromUserName><![CDATA[fromuser]]></FromUserName>
<CreateTime>1395658920</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[TEMPLATESENDJOBFINISH]]></Event>
<MsgID>200163836</MsgID>
<Status><![CDATA[success]]></Status>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, TemplateSendJobFinishEvent))
self.assertEqual(200163836, event.id)
self.assertEqual("success", event.status)
def test_template_subscribe_msg_popup_event(self):
from wechatpy.events import SubscribeMsgPopupEvent
xml = """<xml>
<ToUserName><![CDATA[gh_123456789abc]]></ToUserName>
<FromUserName><![CDATA[otFpruAK8D-E6EfStSYonYSBZ8_4]]></FromUserName>
<CreateTime>1610969440</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[subscribe_msg_popup_event]]></Event>
<SubscribeMsgPopupEvent>
<List>
<TemplateId><![CDATA[VRR0UEO9VJOLs0MHlU0OilqX6MVFDwH3_3gz3Oc0NIc]]></TemplateId>
<SubscribeStatusString><![CDATA[accept]]></SubscribeStatusString>
<PopupScene>2</PopupScene>
</List>
<List>
<TemplateId><![CDATA[9nLIlbOQZC5Y89AZteFEux3WCXRRRG5Wfzkpssu4bLI]]></TemplateId>
<SubscribeStatusString><![CDATA[reject]]></SubscribeStatusString>
<PopupScene>2</PopupScene>
</List>
</SubscribeMsgPopupEvent>
</xml>"""
event = parse_message(xml)
self.assertIsInstance(event, SubscribeMsgPopupEvent)
self.assertEqual(2, len(event.subscribes))
self.assertEqual("VRR0UEO9VJOLs0MHlU0OilqX6MVFDwH3_3gz3Oc0NIc", event.subscribes[0]["TemplateId"])
def test_template_subscribe_msg_change_event(self):
from wechatpy.events import SubscribeMsgChangeEvent
xml = """<xml>
<ToUserName><![CDATA[gh_123456789abc]]></ToUserName>
<FromUserName><![CDATA[otFpruAK8D-E6EfStSYonYSBZ8_4]]></FromUserName>
<CreateTime>1610969440</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[subscribe_msg_change_event]]></Event>
<SubscribeMsgChangeEvent>
<List>
<TemplateId><![CDATA[VRR0UEO9VJOLs0MHlU0OilqX6MVFDwH3_3gz3Oc0NIc]]></TemplateId>
<SubscribeStatusString><![CDATA[reject]]></SubscribeStatusString>
</List>
</SubscribeMsgChangeEvent>
</xml>"""
event = parse_message(xml)
self.assertIsInstance(event, SubscribeMsgChangeEvent)
self.assertEqual(1, len(event.subscribes))
self.assertEqual("VRR0UEO9VJOLs0MHlU0OilqX6MVFDwH3_3gz3Oc0NIc", event.subscribes[0]["TemplateId"])
self.assertEqual("reject", event.subscribes[0]["SubscribeStatusString"])
def test_template_subscribe_msg_sent_event(self):
from wechatpy.events import SubscribeMsgSentEvent
xml = """<xml>
<ToUserName><![CDATA[gh_123456789abc]]></ToUserName>
<FromUserName><![CDATA[otFpruAK8D-E6EfStSYonYSBZ8_4]]></FromUserName>
<CreateTime>1610969468</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[subscribe_msg_sent_event]]></Event>
<SubscribeMsgSentEvent>
<List>
<TemplateId><![CDATA[VRR0UEO9VJOLs0MHlU0OilqX6MVFDwH3_3gz3Oc0NIc]]></TemplateId>
<MsgID>1700827132819554304</MsgID>
<ErrorCode>0</ErrorCode>
<ErrorStatus><![CDATA[success]]></ErrorStatus>
</List>
</SubscribeMsgSentEvent>
</xml>"""
event = parse_message(xml)
self.assertIsInstance(event, SubscribeMsgSentEvent)
self.assertEqual(1, len(event.subscribes))
self.assertEqual("VRR0UEO9VJOLs0MHlU0OilqX6MVFDwH3_3gz3Oc0NIc", event.subscribes[0]["TemplateId"])
self.assertEqual("1700827132819554304", event.subscribes[0]["MsgID"])
def test_shakearound_user_shake_event(self):
from wechatpy.events import ShakearoundUserShakeEvent
xml = """<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>1433332012</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[ShakearoundUserShake]]></Event>
<ChosenBeacon>
<Uuid><![CDATA[uuid]]></Uuid>
<Major>major</Major>
<Minor>minor</Minor>
<Distance>0.057</Distance>
</ChosenBeacon>
<AroundBeacons>
<AroundBeacon>
<Uuid><![CDATA[uuid]]></Uuid>
<Major>major</Major>
<Minor>minor</Minor>
<Distance>166.816</Distance>
</AroundBeacon>
<AroundBeacon>
<Uuid><![CDATA[uuid]]></Uuid>
<Major>major</Major>
<Minor>minor</Minor>
<Distance>15.013</Distance>
</AroundBeacon>
</AroundBeacons>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, ShakearoundUserShakeEvent))
chosen_beacon = {
"uuid": "uuid",
"major": "major",
"minor": "minor",
"distance": 0.057,
}
self.assertEqual(chosen_beacon, event.chosen_beacon)
self.assertEqual(2, len(event.around_beacons))
def test_wifi_connected_event(self):
from wechatpy.events import WiFiConnectedEvent
xml = """
<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>123456789</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[WifiConnected]]></Event>
<ConnectTime>0</ConnectTime>
<ExpireTime>0</ExpireTime>
<VendorId><![CDATA[3001224419]]></VendorId>
<PlaceId><![CDATA[1234]]></PlaceId>
<DeviceNo><![CDATA[00:1f:7a:ad:5c:a8]]></DeviceNo>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, WiFiConnectedEvent))
self.assertEqual(0, event.connect_time)
self.assertEqual("1234", event.shop_id)
self.assertEqual("00:1f:7a:ad:5c:a8", event.bssid)
def test_qualification_verify_success_event(self):
from wechatpy.events import QualificationVerifySuccessEvent
xml = """
<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>1442401156</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[qualification_verify_success]]></Event>
<ExpiredTime>1442401156</ExpiredTime>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, QualificationVerifySuccessEvent))
self.assertTrue(isinstance(event.expired_time, datetime))
def test_qualification_verify_fail_event(self):
from wechatpy.events import QualificationVerifyFailEvent
xml = """
<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>1442401156</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[qualification_verify_fail]]></Event>
<FailTime>1442401122</FailTime>
<FailReason><![CDATA[by time]]></FailReason>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, QualificationVerifyFailEvent))
self.assertTrue(isinstance(event.fail_time, datetime))
self.assertEqual(event.fail_reason, "by time")
def test_naming_verify_success_event(self):
from wechatpy.events import NamingVerifySuccessEvent
xml = """
<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>1442401093</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[naming_verify_success]]></Event>
<ExpiredTime>1442401093</ExpiredTime>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, NamingVerifySuccessEvent))
self.assertTrue(isinstance(event.expired_time, datetime))
def test_naming_verify_fail_event(self):
from wechatpy.events import NamingVerifyFailEvent
xml = """
<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>1442401061</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[naming_verify_fail]]></Event>
<FailTime>1442401061</FailTime>
<FailReason><![CDATA[by time]]></FailReason>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, NamingVerifyFailEvent))
self.assertTrue(isinstance(event.fail_time, datetime))
self.assertEqual(event.fail_reason, "by time")
def test_annual_renew_event(self):
from wechatpy.events import AnnualRenewEvent
xml = """
<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>1442401004</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[annual_renew]]></Event>
<ExpiredTime>1442401004</ExpiredTime>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, AnnualRenewEvent))
self.assertTrue(isinstance(event.expired_time, datetime))
def test_verify_expired_event(self):
from wechatpy.events import VerifyExpiredEvent
xml = """
<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[FromUser]]></FromUserName>
<CreateTime>1442400900</CreateTime>
<MsgType><![CDATA[event]]></MsgType>
<Event><![CDATA[verify_expired]]></Event>
<ExpiredTime>1442400900</ExpiredTime>
</xml>"""
event = parse_message(xml)
self.assertTrue(isinstance(event, VerifyExpiredEvent))
self.assertTrue(isinstance(event.expired_time, datetime))
| jxtech/wechatpy | tests/test_events.py | Python | mit | 18,940 |
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
return open(filename).read(), filename
# Use sys.stdout encoding for ouput.
_encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8'
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
If the string `s` is Unicode, it is encoded using the stdout
encoding and the `backslashreplace` error handler.
"""
if isinstance(s, unicode):
s = s.encode(_encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print test.name, '->', runner.run(test)
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'(?:[^:]*\.)?([^:]*:)', example.exc_msg)
m2 = re.match(r'(?:[^:]*\.)?([^:]*:)', exc_msg)
if m1 and m2 and check(m1.group(1), m2.group(1),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
source = example.source.encode('ascii', 'backslashreplace')
return source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print "*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return TestResults(f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return TestResults(f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self):
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % module.__name__
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = unittest.TestSuite()
suite.addTest(SkipDocTestCase())
return suite
elif not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']
if not testfiles:
name = os.path.basename(sys.argv[0])
if '__loader__' in globals(): # python -m
name, _ = os.path.splitext(name)
print("usage: {0} [-v] file ...".format(name))
return 2
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m)
else:
failures, _ = testfile(filename, module_relative=False)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test())
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.7/Lib/doctest.py | Python | mit | 101,750 |
'''
Created on Mar 19, 2014
@author: Simon
'''
from engine.engine_job import EngineJob
class ValidationJob(EngineJob):
'''
M/R job for validating a trained model.
'''
def mapper(self, key, values):
data_processor = self.get_data_processor()
data_processor.set_data(values)
data_processor.normalize_data(self.data_handler.get_statistics())
data_set = data_processor.get_data_set()
alg = self.get_trained_alg()
validator = self.get_validator()
yield 'validation', validator.validate(alg, data_set)
def reducer(self, key, values):
vals = list(values)
yield key, self.get_validator().aggregate(vals)
if __name__ == '__main__':
ValidationJob.run()
| xapharius/mrEnsemble | Engine/src/jobs/validation_job.py | Python | mit | 746 |
"""Functionality to query and extract information from aligned BAM files.
"""
import collections
import contextlib
import os
import itertools
import signal
import subprocess
import numpy
import pysam
import toolz as tz
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed import objectstore
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import config_utils
import bcbio.pipeline.datadict as dd
from bcbio.provenance import do
def is_paired(bam_file):
"""Determine if a BAM file has paired reads.
Works around issues with head closing the samtools pipe using signal trick from:
http://stackoverflow.com/a/12451083/252589
"""
bam_file = objectstore.cl_input(bam_file)
cmd = ("set -o pipefail; "
"sambamba view -h {bam_file} | head -50000 | "
"sambamba view -S -F paired /dev/stdin | head -1 | wc -l")
p = subprocess.Popen(cmd.format(**locals()), shell=True,
executable=do.find_bash(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL))
stdout, stderr = p.communicate()
if p.returncode == 0 or p.returncode == 141 and stderr.strip() == "":
return int(stdout) > 0
else:
raise ValueError("Failed to check paired status of BAM file: %s" % str(stderr))
def index(in_bam, config, check_timestamp=True):
"""Index a BAM file, skipping if index present.
Centralizes BAM indexing providing ability to switch indexing approaches.
"""
assert is_bam(in_bam), "%s in not a BAM file" % in_bam
index_file = "%s.bai" % in_bam
alt_index_file = "%s.bai" % os.path.splitext(in_bam)[0]
if check_timestamp:
bai_exists = utils.file_uptodate(index_file, in_bam) or utils.file_uptodate(alt_index_file, in_bam)
else:
bai_exists = utils.file_exists(index_file) or utils.file_exists(alt_index_file)
if not bai_exists:
# Remove old index files and re-run to prevent linking into tx directory
for fname in [index_file, alt_index_file]:
utils.remove_safe(fname)
sambamba = _get_sambamba(config)
samtools = config_utils.get_program("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, index_file) as tx_index_file:
assert tx_index_file.find(".bam.bai") > 0
tx_bam_file = tx_index_file.replace(".bam.bai", ".bam")
utils.symlink_plus(in_bam, tx_bam_file)
if sambamba:
cmd = "{sambamba} index -t {num_cores} {tx_bam_file}"
else:
cmd = "{samtools} index {tx_bam_file}"
do.run(cmd.format(**locals()), "Index BAM file: %s" % os.path.basename(in_bam))
return index_file if utils.file_exists(index_file) else alt_index_file
def remove(in_bam):
"""
remove bam file and the index if exists
"""
if utils.file_exists(in_bam):
utils.remove_safe(in_bam)
if utils.file_exists(in_bam + ".bai"):
utils.remove_safe(in_bam + ".bai")
def idxstats(in_bam, data):
"""Return BAM index stats for the given file, using samtools idxstats.
"""
index(in_bam, data["config"])
AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"])
samtools = config_utils.get_program("samtools", data["config"])
idxstats_out = subprocess.check_output([samtools, "idxstats", in_bam])
out = []
for line in idxstats_out.split("\n"):
if line.strip():
contig, length, aligned, unaligned = line.split("\t")
out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned)))
return out
def get_downsample_pct(in_bam, target_counts, data):
"""Retrieve percentage of file to downsample to get to target counts.
"""
total = sum(x.aligned for x in idxstats(in_bam, data))
with contextlib.closing(pysam.Samfile(in_bam, "rb")) as work_bam:
n_rgs = max(1, len(work_bam.header.get("RG", [])))
rg_target = n_rgs * target_counts
if total > rg_target:
return float(rg_target) / float(total)
def get_aligned_reads(in_bam, data):
index(in_bam, data["config"])
bam_stats = idxstats(in_bam, data)
align = sum(x.aligned for x in bam_stats)
unaligned = sum(x.unaligned for x in bam_stats)
total = float(align + unaligned)
return 1.0 * align / total
def downsample(in_bam, data, target_counts, read_filter="", always_run=False,
work_dir=None):
"""Downsample a BAM file to the specified number of target counts.
"""
index(in_bam, data["config"])
ds_pct = get_downsample_pct(in_bam, target_counts, data)
if always_run and not ds_pct:
ds_pct = 1.0
if ds_pct:
out_file = "%s-downsample%s" % os.path.splitext(in_bam)
if work_dir:
out_file = os.path.join(work_dir, os.path.basename(out_file))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
sambamba = config_utils.get_program("sambamba", data["config"])
num_cores = dd.get_num_cores(data)
cmd = ("{sambamba} view -t {num_cores} {read_filter} -f bam -o {tx_out_file} "
"--subsample={ds_pct:.3} --subsampling-seed=42 {in_bam}")
do.run(cmd.format(**locals()), "Downsample BAM file: %s" % os.path.basename(in_bam))
return out_file
def check_header(in_bam, rgnames, ref_file, config):
"""Ensure passed in BAM header matches reference file and read groups names.
"""
_check_bam_contigs(in_bam, ref_file, config)
_check_sample(in_bam, rgnames)
def _check_sample(in_bam, rgnames):
"""Ensure input sample name matches expected run group names.
"""
with contextlib.closing(pysam.Samfile(in_bam, "rb")) as bamfile:
rg = bamfile.header.get("RG", [{}])
msgs = []
warnings = []
if len(rg) > 1:
warnings.append("Multiple read groups found in input BAM. Expect single RG per BAM.")
elif len(rg) == 0:
msgs.append("No read groups found in input BAM. Expect single RG per BAM.")
elif rg[0].get("SM") != rgnames["sample"]:
msgs.append("Read group sample name (SM) does not match configuration `description`: %s vs %s"
% (rg[0].get("SM"), rgnames["sample"]))
if len(msgs) > 0:
raise ValueError("Problems with pre-aligned input BAM file: %s\n" % (in_bam)
+ "\n".join(msgs) +
"\nSetting `bam_clean: picard` in the configuration can often fix this issue.")
if warnings:
print("*** Potential problems in input BAM compared to reference:\n%s\n" %
"\n".join(warnings))
def _check_bam_contigs(in_bam, ref_file, config):
"""Ensure a pre-aligned BAM file matches the expected reference genome.
"""
ref_contigs = [c.name for c in ref.file_contigs(ref_file, config)]
with contextlib.closing(pysam.Samfile(in_bam, "rb")) as bamfile:
bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
problems = []
warnings = []
for bc, rc in itertools.izip_longest(bam_contigs, ref_contigs):
if bc != rc:
if bc and rc:
problems.append("Reference mismatch. BAM: %s Reference: %s" % (bc, rc))
elif bc:
warnings.append("Extra BAM chromosomes: %s" % bc)
elif rc:
warnings.append("Extra reference chromosomes: %s" % rc)
if problems:
raise ValueError("Unexpected order, name or contig mismatches between input BAM and reference file:\n%s\n"
"Setting `bam_clean: picard` in the configuration can often fix this issue."
% "\n".join(problems))
if warnings:
print("*** Potential problems in input BAM compared to reference:\n%s\n" %
"\n".join(warnings))
def open_samfile(in_file):
if is_bam(in_file):
return pysam.Samfile(in_file, "rb")
elif is_sam(in_file):
return pysam.Samfile(in_file, "r")
else:
raise IOError("in_file must be either a BAM file or SAM file. Is the "
"extension .sam or .bam?")
def is_bam(in_file):
_, ext = os.path.splitext(in_file)
if ext == ".bam":
return True
else:
return False
def is_sam(in_file):
_, ext = os.path.splitext(in_file)
if ext == ".sam":
return True
else:
return False
def mapped(in_bam, config):
"""
return a bam file of only the mapped reads
"""
out_file = os.path.splitext(in_bam)[0] + ".mapped.bam"
if utils.file_exists(out_file):
return out_file
sambamba = _get_sambamba(config)
with file_transaction(config, out_file) as tx_out_file:
if sambamba:
cmd = ("{sambamba} view --format=bam -F 'not (unmapped or mate_is_unmapped)' "
"{in_bam} -o {tx_out_file}")
else:
samtools = config_utils.get_program("samtools", config)
cmd = "{samtools} view -b -F 4 {in_bam} -o {tx_out_file}"
do.run(cmd.format(**locals()),
"Filtering mapped reads to %s." % (tx_out_file))
return out_file
def count(in_bam, config=None):
"""
return the counts in a BAM file
"""
if not config:
config = {}
sambamba = _get_sambamba(config)
if sambamba:
cmd = ("{sambamba} view -c {in_bam}").format(**locals())
else:
samtools = config_utils.get_program("samtools", config)
cmd = ("{samtools} view -c {in_bam}").format(**locals())
out = subprocess.check_output(cmd, shell=True)
return int(out)
def sam_to_bam(in_sam, config):
if is_bam(in_sam):
return in_sam
assert is_sam(in_sam), "%s is not a SAM file" % in_sam
out_file = os.path.splitext(in_sam)[0] + ".bam"
if utils.file_exists(out_file):
return out_file
samtools = config_utils.get_program("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, out_file) as tx_out_file:
cmd = "{samtools} view -@ {num_cores} -h -S -b {in_sam} -o {tx_out_file}"
do.run(cmd.format(**locals()),
("Convert SAM to BAM (%s cores): %s to %s"
% (str(num_cores), in_sam, out_file)))
return out_file
def sam_to_bam_stream_cmd(config, named_pipe=None):
sambamba = config_utils.get_program("sambamba", config)
num_cores = config["algorithm"].get("num_cores", 1)
pipe = named_pipe if named_pipe else "/dev/stdin"
cmd = " {sambamba} view --format=bam -S -t {num_cores} {pipe} ".format(**locals())
return cmd
def bam_to_sam(in_file, config):
if is_sam(in_file):
return in_file
assert is_bam(in_file), "%s is not a BAM file" % in_file
out_file = os.path.splitext(in_file)[0] + ".sam"
if utils.file_exists(out_file):
return out_file
samtools = config_utils.get_program("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, out_file) as tx_out_file:
cmd = "{samtools} view -@ {num_cores} -h {in_file} -o {tx_out_file}"
do.run(cmd.format(**locals()),
("Convert BAM to SAM (%s cores): %s to %s"
% (str(num_cores), in_file, out_file)))
return out_file
def reheader(header, bam_file, config):
samtools = config_utils.get_program("samtools", config)
base, ext = os.path.splitext(bam_file)
out_file = base + ".reheadered" + ext
cmd = "{samtools} reheader {header} {bam_file} > {out_file}"
do.run(cmd.format(**locals()), "Reheadering %s." % bam_file)
return out_file
def merge(bamfiles, out_bam, config):
assert all(map(is_bam, bamfiles)), ("Not all of the files to merge are not BAM "
"files: %s " % (bamfiles))
assert all(map(utils.file_exists, bamfiles)), ("Not all of the files to merge "
"exist: %s" % (bamfiles))
if len(bamfiles) == 1:
return bamfiles[0]
if os.path.exists(out_bam):
return out_bam
sambamba = _get_sambamba(config)
sambamba = None
samtools = config_utils.get_program("samtools", config)
bamtools = config_utils.get_program("bamtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, out_bam) as tx_out_bam:
try:
if sambamba:
cmd = "{sambamba} merge -t {num_cores} {tx_out_bam} " + " ".join(bamfiles)
else:
cmd = "{samtools} merge -@ {num_cores} {tx_out_bam} " + " ".join(bamfiles)
do.run(cmd.format(**locals()), "Merge %s into %s." % (bamfiles, out_bam))
except subprocess.CalledProcessError:
files = " -in ".join(bamfiles)
cmd = "{bamtools} merge -in {files} -out {tx_out_bam}"
do.run(cmd.format(**locals()), "Error with other tools. Merge %s into %s with bamtools" %
(bamfiles, out_bam))
index(out_bam, config)
return out_bam
def sort(in_bam, config, order="coordinate"):
"""Sort a BAM file, skipping if already present.
"""
assert is_bam(in_bam), "%s in not a BAM file" % in_bam
if bam_already_sorted(in_bam, config, order):
return in_bam
sort_stem = _get_sort_stem(in_bam, order)
sort_file = sort_stem + ".bam"
if not utils.file_exists(sort_file):
sambamba = _get_sambamba(config)
samtools = config_utils.get_program("samtools", config)
cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, sort_file) as tx_sort_file:
tx_sort_stem = os.path.splitext(tx_sort_file)[0]
tx_dir = utils.safe_makedir(os.path.dirname(tx_sort_file))
order_flag = "-n" if order == "queryname" else ""
resources = config_utils.get_resources("samtools", config)
mem = resources.get("memory", "2G")
samtools_cmd = ("{samtools} sort -@ {cores} -m {mem} {order_flag} "
"{in_bam} {tx_sort_stem}")
if sambamba:
if tz.get_in(["resources", "sambamba"], config):
sm_resources = config_utils.get_resources("sambamba", config)
mem = sm_resources.get("memory", "2G")
# sambamba uses total memory, not memory per core
mem = config_utils.adjust_memory(mem, cores, "increase").upper()
# Use samtools compatible natural sorting
# https://github.com/lomereiter/sambamba/issues/132
order_flag = "--natural-sort" if order == "queryname" else ""
cmd = ("{sambamba} sort -t {cores} -m {mem} {order_flag} "
"-o {tx_sort_file} --tmpdir={tx_dir} {in_bam}")
else:
cmd = samtools_cmd
# sambamba has intermittent multicore failures. Allow
# retries with single core
try:
do.run(cmd.format(**locals()),
"Sort BAM file (multi core, %s): %s to %s" %
(order, os.path.basename(in_bam),
os.path.basename(sort_file)))
except:
logger.exception("Multi-core sorting failed, reverting to single core")
resources = config_utils.get_resources("samtools", config)
mem = resources.get("memory", "2G")
cores = 1
order_flag = "-n" if order == "queryname" else ""
do.run(samtools_cmd.format(**locals()),
"Sort BAM file (single core, %s): %s to %s" %
(order, os.path.basename(in_bam),
os.path.basename(sort_file)))
return sort_file
def sort_cmd(config, tmp_dir, named_pipe=None, order="coordinate"):
""" Get a sort command, suitable for piping
"""
sambamba = _get_sambamba(config)
pipe = named_pipe if named_pipe else "/dev/stdin"
order_flag = "-n" if order == "queryname" else ""
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
mem = config_utils.adjust_memory(resources.get("memory", "2G"), 1, "decrease").upper()
cmd = ("{sambamba} sort -m {mem} --tmpdir {tmp_dir} -t {num_cores} {order_flag} -o /dev/stdout {pipe}")
return cmd.format(**locals())
def _get_sambamba(config):
try:
sambamba = config_utils.get_program("sambamba", config)
except config_utils.CmdNotFound:
sambamba = None
return sambamba
def bam_already_sorted(in_bam, config, order):
return order == _get_sort_order(in_bam, config)
def _get_sort_order(in_bam, config):
with open_samfile(in_bam) as bam_handle:
header = bam_handle.header
return utils.get_in(header, ("HD", "SO"), None)
def _get_sort_stem(in_bam, order):
SUFFIXES = {"coordinate": ".sorted", "queryname": ".nsorted"}
sort_base = os.path.splitext(in_bam)[0]
for suffix in SUFFIXES:
sort_base = sort_base.split(suffix)[0]
return sort_base + SUFFIXES[order]
def sample_name(in_bam):
"""Get sample name from BAM file.
"""
with contextlib.closing(pysam.AlignmentFile(in_bam, "rb", check_sq=False)) as in_pysam:
try:
if "RG" in in_pysam.header:
return in_pysam.header["RG"][0]["SM"]
except ValueError:
return None
def estimate_read_length(bam_file, nreads=1000):
"""
estimate median read length of a SAM/BAM file
"""
with open_samfile(bam_file) as bam_handle:
reads = tz.itertoolz.take(nreads, bam_handle)
lengths = [len(x.seq) for x in reads]
return int(numpy.median(lengths))
def estimate_fragment_size(bam_file, nreads=1000):
"""
estimate median fragment size of a SAM/BAM file
"""
with open_samfile(bam_file) as bam_handle:
reads = tz.itertoolz.take(nreads, bam_handle)
lengths = [x.tlen for x in reads]
return int(numpy.median(lengths))
def filter_stream_cmd(bam_file, data, filter_flag):
"""
return a command to keep only alignments matching the filter flag
see https://github.com/lomereiter/sambamba/wiki/%5Bsambamba-view%5D-Filter-expression-syntax for examples
"""
sambamba = config_utils.get_program("sambamba", data["config"])
num_cores = dd.get_num_cores(data)
cmd = ('{sambamba} view -t {num_cores} -f bam -F "{filter_flag}" {bam_file}')
return cmd.format(**locals())
def filter_primary_stream_cmd(bam_file, data):
return filter_stream_cmd(bam_file, data, "not secondary_alignment")
def filter_primary(bam_file, data):
stem, ext = os.path.splitext(bam_file)
out_file = stem + ".primary" + ext
if utils.file_exists(out_file):
return out_file
with file_transaction(out_file) as tx_out_file:
cmd = filter_primary_stream_cmd(bam_file, data)
cmd += "> {tx_out_file}"
do.run(cmd.format(**locals()), ("Filtering primary alignments in %s." %
os.path.basename(bam_file)))
return out_file
| Cyberbio-Lab/bcbio-nextgen | bcbio/bam/__init__.py | Python | mit | 19,381 |
import os
import pytest
from som.vm.current import current_universe
@pytest.mark.parametrize(
"test_name",
[
"Array",
"Block",
"ClassLoading",
"ClassStructure",
"Closure",
"Coercion",
"CompilerReturn",
"DoesNotUnderstand",
"Double",
"Empty",
"Global",
"Hash",
"Integer",
"Preliminary",
"Reflection",
"SelfBlock",
"SpecialSelectors",
"Super",
"Set",
"String",
"Symbol",
"System",
"Vector",
],
)
def test_som(test_name):
current_universe.reset(True)
core_lib_path = os.path.dirname(os.path.abspath(__file__)) + "/../core-lib/"
args = [
"-cp",
core_lib_path + "Smalltalk",
core_lib_path + "TestSuite/TestHarness.som",
test_name,
]
current_universe.interpret(args)
assert current_universe.last_exit_code() == 0
| SOM-st/PySOM | tests/test_som.py | Python | mit | 964 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
from tensorboard.plugins.beholder import im_util
from tensorboard.plugins.beholder.file_system_tools import read_pickle,\
write_pickle, write_file
from tensorboard.plugins.beholder.shared_config import PLUGIN_NAME, TAG_NAME,\
SUMMARY_FILENAME, DEFAULT_CONFIG, CONFIG_FILENAME
from tensorboard.plugins.beholder import video_writing
from tensorboard.plugins.beholder.visualizer import Visualizer
class Beholder(object):
def __init__(self, logdir):
self.PLUGIN_LOGDIR = logdir + '/plugins/' + PLUGIN_NAME
self.is_recording = False
self.video_writer = video_writing.VideoWriter(
self.PLUGIN_LOGDIR,
outputs=[
video_writing.FFmpegVideoOutput,
video_writing.PNGVideoOutput])
self.frame_placeholder = tf.placeholder(tf.uint8, [None, None, None])
self.summary_op = tf.summary.tensor_summary(TAG_NAME,
self.frame_placeholder)
self.last_image_shape = []
self.last_update_time = time.time()
self.config_last_modified_time = -1
self.previous_config = dict(DEFAULT_CONFIG)
if not tf.gfile.Exists(self.PLUGIN_LOGDIR + '/config.pkl'):
tf.gfile.MakeDirs(self.PLUGIN_LOGDIR)
write_pickle(DEFAULT_CONFIG, '{}/{}'.format(self.PLUGIN_LOGDIR,
CONFIG_FILENAME))
self.visualizer = Visualizer(self.PLUGIN_LOGDIR)
def _get_config(self):
'''Reads the config file from disk or creates a new one.'''
filename = '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME)
modified_time = os.path.getmtime(filename)
if modified_time != self.config_last_modified_time:
config = read_pickle(filename, default=self.previous_config)
self.previous_config = config
else:
config = self.previous_config
self.config_last_modified_time = modified_time
return config
def _write_summary(self, session, frame):
'''Writes the frame to disk as a tensor summary.'''
summary = session.run(self.summary_op, feed_dict={
self.frame_placeholder: frame
})
path = '{}/{}'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME)
write_file(summary, path)
def _get_final_image(self, session, config, arrays=None, frame=None):
if config['values'] == 'frames':
if frame is None:
final_image = im_util.get_image_relative_to_script('frame-missing.png')
else:
frame = frame() if callable(frame) else frame
final_image = im_util.scale_image_for_display(frame)
elif config['values'] == 'arrays':
if arrays is None:
final_image = im_util.get_image_relative_to_script('arrays-missing.png')
# TODO: hack to clear the info. Should be cleaner.
self.visualizer._save_section_info([], [])
else:
final_image = self.visualizer.build_frame(arrays)
elif config['values'] == 'trainable_variables':
arrays = [session.run(x) for x in tf.trainable_variables()]
final_image = self.visualizer.build_frame(arrays)
if len(final_image.shape) == 2:
# Map grayscale images to 3D tensors.
final_image = np.expand_dims(final_image, -1)
return final_image
def _enough_time_has_passed(self, FPS):
'''For limiting how often frames are computed.'''
if FPS == 0:
return False
else:
earliest_time = self.last_update_time + (1.0 / FPS)
return time.time() >= earliest_time
def _update_frame(self, session, arrays, frame, config):
final_image = self._get_final_image(session, config, arrays, frame)
self._write_summary(session, final_image)
self.last_image_shape = final_image.shape
return final_image
def _update_recording(self, frame, config):
'''Adds a frame to the current video output.'''
# pylint: disable=redefined-variable-type
should_record = config['is_recording']
if should_record:
if not self.is_recording:
self.is_recording = True
tf.logging.info(
'Starting recording using %s',
self.video_writer.current_output().name())
self.video_writer.write_frame(frame)
elif self.is_recording:
self.is_recording = False
self.video_writer.finish()
tf.logging.info('Finished recording')
# TODO: blanket try and except for production? I don't someone's script to die
# after weeks of running because of a visualization.
def update(self, session, arrays=None, frame=None):
'''Creates a frame and writes it to disk.
Args:
arrays: a list of np arrays. Use the "custom" option in the client.
frame: a 2D np array. This way the plugin can be used for video of any
kind, not just the visualization that comes with the plugin.
frame can also be a function, which only is evaluated when the
"frame" option is selected by the client.
'''
new_config = self._get_config()
if self._enough_time_has_passed(self.previous_config['FPS']):
self.visualizer.update(new_config)
self.last_update_time = time.time()
final_image = self._update_frame(session, arrays, frame, new_config)
self._update_recording(final_image, new_config)
##############################################################################
@staticmethod
def gradient_helper(optimizer, loss, var_list=None):
'''A helper to get the gradients out at each step.
Args:
optimizer: the optimizer op.
loss: the op that computes your loss value.
Returns: the gradient tensors and the train_step op.
'''
if var_list is None:
var_list = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
grads = [pair[0] for pair in grads_and_vars]
return grads, optimizer.apply_gradients(grads_and_vars)
class BeholderHook(tf.train.SessionRunHook):
"""SessionRunHook implementation that runs Beholder every step.
Convenient when using tf.train.MonitoredSession:
```python
beholder_hook = BeholderHook(LOG_DIRECTORY)
with MonitoredSession(..., hooks=[beholder_hook]) as sess:
sess.run(train_op)
```
"""
def __init__(self, logdir):
"""Creates new Hook instance
Args:
logdir: Directory where Beholder should write data.
"""
self._logdir = logdir
self.beholder = None
def begin(self):
self.beholder = Beholder(self._logdir)
def after_run(self, run_context, unused_run_values):
self.beholder.update(run_context.session)
| ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorboard/plugins/beholder/beholder.py | Python | mit | 7,267 |
import csv
import re
from io import TextIOWrapper
from django.conf import settings
from django.core.cache import cache
from django.utils.termcolors import colorize
# Import clog if we're in debug otherwise make it a noop
if settings.DEBUG:
from clog.clog import clog
else:
def clog(*args, **kwargs):
pass
def pop_first(data, key):
"""Pop the given key from the given `data` dict, and if the popped item
is a list, return the first element. This is handy for those cases where,
in the api, `request.data.pop(whatever)` sometimes gives a list and other
times is an object.
"""
result = data.pop(key)
if isinstance(result, list):
result = result[0]
return result
def num_user_selections(obj):
"""Return a count of the given object's UserXXXX instances (where XXXX is
the name of one of our content models). This will tell how many users
have selected this item.
Valid for Category, Goal, Action instances.
"""
model = obj._meta.model_name
if model not in ['category', 'goal', 'action']:
raise ValueError("{0} is not a supported object type".format(model))
method = "user{0}_set".format(model)
return getattr(obj, method).count()
# ------------------------------------------
#
# Helper functions for cleaning text content
#
# ------------------------------------------
def clean_title(text):
"""Titles: collapse all whitespace, remove ending periods, strip."""
if text:
text = re.sub(r'\s+', ' ', text).strip() # collapse whitespace
if text.endswith("."):
text = text[:-1]
return text
def clean_notification(text):
"""Notification text: collapse all whitespace, strip, include an ending
period (if not a ? or a !).
"""
if text:
text = re.sub(r'\s+', ' ', text).strip() # collapse whitespace
if text[-1] not in ['.', '?', '!']:
text += "."
return text
def strip(text):
"""Conditially call text.strip() if the input text is truthy."""
if text:
text = text.strip()
return text
def read_uploaded_csv(uploaded_file, encoding='utf-8', errors='ignore'):
"""This is a generator that takes an uploaded file (such as an instance of
InMemoryUploadedFile.file), converts it to a string (instead of bytes)
representation, then parses it as a CSV.
Returns a list of lists containing strings, and removes any empty rows.
NOTES:
1. This makes a big assumption about utf-8 encodings, and the errors
param means we potentially lose data!
2. InMemoryUploadedFileSee: http://stackoverflow.com/a/16243182/182778
"""
file = TextIOWrapper(
uploaded_file.file,
encoding=encoding,
newline='',
errors=errors
)
for row in csv.reader(file):
if any(row):
yield row
def delete_content(prefix):
"""Delete content whose title/name starts with the given prefix."""
from goals.models import Action, Category, Goal, Trigger
print("Deleting content that startswith='{}'".format(prefix))
actions = Action.objects.filter(title__startswith=prefix)
print("Deleting {} Actions...".format(actions.count()))
actions.delete()
triggers = Trigger.objects.filter(name__startswith=prefix)
print("Deleting {} Triggers...".format(triggers.count()))
triggers.delete()
goals = Goal.objects.filter(title__startswith=prefix)
print("Deleting {} Goals...".format(goals.count()))
goals.delete()
cats = Category.objects.filter(title__startswith=prefix)
print("Deleting {} Categories...".format(cats.count()))
cats.delete()
print("...done.")
| tndatacommons/tndata_backend | tndata_backend/goals/utils.py | Python | mit | 3,686 |
from __future__ import absolute_import
from .nihts_xcam import XenicsCamera
| henryroe/xenics_pluto | nihts_xcam/__init__.py | Python | mit | 77 |
import re
from django.db import migrations
def add_project_member(apps, schema_editor):
# Using historical versions as recommended for RunPython
PublicDataAccess = apps.get_model("public_data", "PublicDataAccess")
DataRequestProjectMember = apps.get_model(
"private_sharing", "DataRequestProjectMember"
)
DataRequestProject = apps.get_model("private_sharing", "DataRequestProject")
db_alias = schema_editor.connection.alias
def id_label_to_project(id_label):
match = re.match(r"direct-sharing-(?P<id>\d+)", id_label)
if match:
project = DataRequestProject.objects.using(db_alias).get(
id=int(match.group("id"))
)
return project
for pda in PublicDataAccess.objects.using(db_alias).filter(project_membership=None):
project = id_label_to_project(pda.data_source)
drpm = DataRequestProjectMember.objects.using(db_alias).get(
project=project, member=pda.participant.member
)
pda.project_membership = drpm
pda.save()
def set_data_source(apps, schema_editor):
# Using historical versions as recommended for RunPython
PublicDataAccess = apps.get_model("public_data", "PublicDataAccess")
db_alias = schema_editor.connection.alias
for pda in PublicDataAccess.objects.using(db_alias).filter(data_source=None):
pda.data_source = "direct-sharing-{}".format(pda.project_membership.project.id)
pda.save()
class Migration(migrations.Migration):
dependencies = [("public_data", "0003_auto_20190508_2341")]
operations = [migrations.RunPython(add_project_member, set_data_source)]
| OpenHumans/open-humans | public_data/migrations/0004_migrate_data_20190508.py | Python | mit | 1,674 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
# all country info
import os, json, frappe
from frappe.utils.momentjs import get_all_timezones
def get_country_info(country=None):
data = get_all()
data = frappe._dict(data.get(country, {}))
if 'date_format' not in data:
data.date_format = "dd-mm-yyyy"
if 'time_format' not in data:
data.time_format = "HH:mm:ss"
return data
def get_all():
with open(os.path.join(os.path.dirname(__file__), "country_info.json"), "r") as local_info:
all_data = json.loads(local_info.read())
return all_data
@frappe.whitelist()
def get_country_timezone_info():
return {
"country_info": get_all(),
"all_timezones": get_all_timezones()
}
def get_translated_dict():
from babel.dates import get_timezone, get_timezone_name, Locale
translated_dict = {}
locale = Locale.parse(frappe.local.lang, sep="-")
# timezones
for tz in get_all_timezones():
timezone_name = get_timezone_name(get_timezone(tz), locale=locale, width='short')
if timezone_name:
translated_dict[tz] = timezone_name + ' - ' + tz
# country names && currencies
for country, info in get_all().items():
country_name = locale.territories.get((info.get("code") or "").upper())
if country_name:
translated_dict[country] = country_name
currency = info.get("currency")
currency_name = locale.currencies.get(currency)
if currency_name:
translated_dict[currency] = currency_name
return translated_dict
def update():
with open(os.path.join(os.path.dirname(__file__), "currency_info.json"), "r") as nformats:
nformats = json.loads(nformats.read())
all_data = get_all()
for country in all_data:
data = all_data[country]
data["number_format"] = nformats.get(data.get("currency", "default"),
nformats.get("default"))["display"]
with open(os.path.join(os.path.dirname(__file__), "country_info.json"), "w") as local_info:
local_info.write(json.dumps(all_data, indent=1))
| frappe/frappe | frappe/geo/country_info.py | Python | mit | 1,972 |
import csv
import math
import sys
data = []
with open(sys.argv[1], 'rb') as f:
reader = csv.DictReader(f)
for row in reader:
if row['year'] != '2015':
continue
if row['birthdate'] and row['gender']:
data.append({
'age': 2016 - int(math.floor(float(row['birthdate']))),
'gender': row['gender'],
})
with open(sys.argv[2], 'wb') as f:
writer = csv.DictWriter(f, ['age', 'gender'])
writer.writeheader()
for row in data:
writer.writerow(row)
| adjspecies/furrypoll-munger | bin/other/age-gender.py | Python | mit | 553 |
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .punctuation import TOKENIZER_SUFFIXES
from ...language import Language, BaseDefaults
class UrduDefaults(BaseDefaults):
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
writing_system = {"direction": "rtl", "has_case": False, "has_letters": True}
class Urdu(Language):
lang = "ur"
Defaults = UrduDefaults
__all__ = ["Urdu"]
| honnibal/spaCy | spacy/lang/ur/__init__.py | Python | mit | 461 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-11 21:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Control',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| dcalacci/Interactive_estimation | game/control/migrations/0001_initial.py | Python | mit | 492 |
# Copyright (C) 2008 Guild of Writers PyPRP Project Team
# See the file AUTHORS for more info about the team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Please see the file LICENSE for the full license.
try:
import Blender
try:
from Blender import Mesh
from Blender import Lamp
except Exception, detail:
print detail
except ImportError:
pass
import md5, random, binascii, cStringIO, copy, Image, math, struct, StringIO, os, os.path, pickle
from prp_Types import *
from prp_DXTConv import *
from prp_HexDump import *
from prp_GeomClasses import *
#from prp_LogicClasses import *
from prp_Functions import *
from prp_ConvexHull import *
from prp_VolumeIsect import *
from prp_AlcScript import *
from prp_RefParser import *
from prp_Messages import *
import prp_Config, prp_HexDump
################# Rework of the camera classes ###################
class CamTrans:
def __init__(self,parent):
self.parent = parent
self.fTransTo = UruObjectRef()
self.fCutPos = False # boolean
self.fCutPOA = False # boolean
self.fIgnore = False # boolean
self.fAccel = 60.0
self.fDecel = 60.0
self.fVelocity = 60.0
self.fPOAAccel = 60.0
self.fPOADecel = 60.0
self.fPOAVelocity = 60.0
def read(self,stream):
print "w"
self.fTransTo.read(stream)
print "v"
self.fCutPos = stream.ReadBool()
self.fCutPOA = stream.ReadBool()
self.fIgnore = stream.ReadBool()
self.fVelocity = stream.ReadFloat()
self.fAccel = stream.ReadFloat()
self.fDecel = stream.ReadFloat()
self.fPOAVelocity = stream.ReadFloat()
self.fPOAAccel = stream.ReadFloat()
self.fPOADecel = stream.ReadFloat()
def write(self,stream):
self.fTransTo.write(stream)
stream.WriteBool(self.fCutPos)
stream.WriteBool(self.fCutPOA)
stream.WriteBool(self.fIgnore)
stream.WriteFloat(self.fVelocity)
stream.WriteFloat(self.fAccel)
stream.WriteFloat(self.fDecel)
stream.WriteFloat(self.fPOAVelocity)
stream.WriteFloat(self.fPOAAccel)
stream.WriteFloat(self.fPOADecel)
def import_obj(self,obj,count):
pass
def export_script(self,script):
self.fAccel = float(FindInDict(script,"accel",self.fAccel))
self.fDecel = float(FindInDict(script,"decel",self.fDecel))
self.fVelocity = float(FindInDict(script,"velocity",self.fVelocity))
self.fPOAAccel = float(FindInDict(script,"poaaccel",self.fPOAAccel))
self.fPOADecel = float(FindInDict(script,"poadecel",self.fPOADecel))
self.fPOCVelocity = float(FindInDict(script,"poavelocity",self.fPOAVelocity))
self.fCutPos = bool(str(FindInDict(script,"cutpos",str(self.fCutPos))).lower() == "true")
self.fCutPOA = bool(str(FindInDict(script,"cutpoa",str(self.fCutPOA))).lower() == "true")
self.fIgnore = bool(str(FindInDict(script,"ignore",str(self.fIgnore))).lower() == "true")
transto = FindInDict(script,"to",None)
# do something with that...
refparser = ScriptRefParser(self.parent.getRoot(),False,"scnobj",[0x0001])
self.fSubjectKey = refparser.MixedRef_FindCreateRef(transto)
pass
class plCameraModifier1(plSingleModifier): # actually descends from plSingleModifer, but doesn't use those read/write functions
def __init__(self,parent,name="unnamed",type=0x009B):
plSingleModifier.__init__(self,parent,name,type)
#format
self.fBrain=UruObjectRef()
self.fTrans = [] #CamTrans Fields
#Ending is almost always like this:
self.fFOVw = 45.0 # Field of View
self.fFOVh = 33.75 #
self.fMessageQueue = []
self.fFOVInstructions = []
self.fAnimated = False
self.fStartAnimOnPush = False
self.fStopAnimOnPop = False
self.fResetAnimOnPop = False
def _Find(page,name):
return page.find(0x009B,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x009B,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
hsKeyedObject.changePageRaw(self,sid,did,stype,dtype)
def read(self,stream,size):
start = stream.tell()
hsKeyedObject.read(self,stream)
self.fBrain.read(stream)
count = stream.Read32()
for i in range(count):
cam = CamTrans(self)
cam.read(stream) # not like this in Plasma, but makes it easier here :)
self.fTrans.append(cam)
self.fFOVw = stream.ReadFloat()
self.fFOVh = stream.ReadFloat()
count = stream.Read32()
# we now should read in a message queue, which is hard because of incomplete message implementations
try:
print "Y"
for i in range(count):
Msg = PrpMessage.FromStream(stream)
self.fMessageQueue.add(Msg.data)
print "Z"
for msg in self.fMessageQueue:
msg.fSender.read(stream)
# read in queue of plCameraMSgs
self.fFOVInstructions = []
count = stream.Read32()
for i in range(count):
Msg = PrpMessage.FromStream(stream)
self.fFOVInstructions.append(FovMsg.data)
except ValueError, detail:
print "/---------------------------------------------------------"
print "| WARNING:"
print "| Got Value Error:" , detail, ":"
print "| Skipping message arrays of plCameraModifier1..."
print "| -> Skipping %i bytes ahead " % ( (start + size -4) - stream.tell())
print "| -> Total object size: %i bytes"% (size)
print "\---------------------------------------------------------\n"
stream.seek(start + size - 4) #reposition the stream to read in the last 4 bytes
self.fAnimated = stream.ReadBool()
self.fStartAnimOnPush = stream.ReadBool()
self.fStopAnimOnPop = stream.ReadBool()
self.fResetAnimOnPop = stream.ReadBool()
def write(self,stream):
hsKeyedObject.write(self,stream)
self.fBrain.write(stream)
stream.Write32(len(self.fTrans))
for cam in self.fTrans:
cam.write(stream)
stream.WriteFloat(self.fFOVw)
stream.WriteFloat(self.fFOVh)
stream.Write32(len(self.fMessageQueue))
for msg in self.fMessageQueue:
PrpMessage.ToStream(stream,msg)
for msg in self.fMessageQueue:
msg.fSender.write(stream)
stream.Write32(len(self.fFOVInstructions))
for msg in self.fFOVInstructions:
PrpMessage.ToStream(stream,msg)
stream.WriteBool(self.fAnimated)
stream.WriteBool(self.fStartAnimOnPush)
stream.WriteBool(self.fStopAnimOnPop)
stream.WriteBool(self.fResetAnimOnPop)
def import_obj(self,obj):
root = self.getRoot()
# calculate the camera lens (blender has 32 mm camera)
radian_fFOVh = self.fFOVh / (360/(2*math.pi))
lens = 32/(2*math.tan(radian_fFOVh/2))
# now set the blender lens property
obj.data.setLens(lens)
c = 0
for cam in self.fTrans:
cam.import_obj(obj,c)
c = c+1
cbrain = root.findref(self.fBrain)
cbrain.data.import_obj(obj)
pass
def export_obj(self,obj):
root = self.getRoot()
print "Exporting Camera Modifier Object"
# --- Find the camera's script object ----
objscript = AlcScript.objects.Find(obj.name)
# check if it's animated
self.fAnimated = FindInDict(objscript,"camera.animated",False)
# --------- FOV --------------
if(obj.data.getType() == 0): # check for perspective camera
lens = obj.data.getLens()
print "Calculating FOV for lens is %i mm" % lens
self.fFOVh = 2 * math.atan(32/(2*lens)) * (360/(2*math.pi))
self.fFOVw = self.fFOVh / 0.750 # I divide by 0.750 becaus I hope it's more accurate than multiplying by 1.33
else:
#default them to default values (45:33.75):
print "Camera is not perpective - please changeit to perspective"
pass
# -------- Camera Brains --------
# get brain type from logic property first
cambraintype = getTextPropertyOrDefault(obj,"cambrain","fixed")
# load it in from AlcScript (overrides logic properties)
scriptbrain = FindInDict(objscript,"camera.brain.type","fixed")
scriptbrain = str(scriptbrain).lower()
if scriptbrain in ["fixed","circle","avatar","firstperson","simple"]:
cambraintype = scriptbrain
print " Camera Brain: %s" % cambraintype
# determine the camera brain
if(cambraintype == "fixed"):
# fixed camera brain
cambrain = plCameraBrain1_Fixed.FindCreate(root,str(self.Key.name))
elif(cambraintype == "circle"):
# Circle camera brain
cambrain = plCameraBrain1_Circle.FindCreate(root,str(self.Key.name))
elif(cambraintype == "avatar"):
# Avatar camera brain
cambrain = plCameraBrain1_Avatar.FindCreate(root,str(self.Key.name))
elif(cambraintype == "firstperson"):
# First Person Camera Brain
cambrain = plCameraBrain1_FirstPerson.FindCreate(root,str(self.Key.name))
else:
# simple and default camera brain
cambrain = plCameraBrain1.FindCreate(root,str(self.Key.name))
cambrain.data.export_obj(obj)
self.fBrain = cambrain.data.getRef()
# -------- Camera Transitions ---------
transitions = list(FindInDict(objscript,"camera.transitions",[]))
for transitionscript in transitions:
cam = CamTrans(self)
cam.export_script(transitionscript)
self.fTrans.append(cam)
if len(self.fTrans) == 0:
cam = CamTrans(self)
self.fTrans.append(cam)
def _Export(page,obj,scnobj,name):
# -------- Camera Modifier 1 -------------
cameramod = plCameraModifier1.FindCreate(page,name)
cameramod.data.export_obj(obj)
# now link the camera modifier to the object (twice, since that appears to be what cyan does
scnobj.data.addModifier(cameramod)
scnobj.data.addModifier(cameramod)
Export = staticmethod(_Export)
## Needs to be moved to plCameraModifier1.Export(self,obj,scnobj,name)
def _Import(scnobj,prp,obj):
# Lights
for c_ref in scnobj.data1.vector:
if c_ref.Key.object_type in [0x009B,]:
cam=prp.findref(c_ref)
cam.data.import_obj(obj)
obj.layers=[5,]
break
Import = staticmethod(_Import)
class plCameraBrain1(hsKeyedObject):
Flags = {
"kCutPos" : 0,
"kCutPosOnce" : 1,
"kCutPOA" : 2,
"kCutPOAOnce" : 3,
"kAnimateFOV" : 4,
"kFollowLocalAvatar" : 5,
"kPanicVelocity" : 6,
"kRailComponent" : 7,
"kSubject" : 8,
"kCircleTarget" : 9,
"kMaintainLOS" : 10,
"kZoomEnabled" : 11,
"kIsTransitionCamera" : 12,
"kWorldspacePOA" : 13,
"kWorldspacePos" : 14,
"kCutPosWhilePan" : 15,
"kCutPOAWhilePan" : 16,
"kNonPhys" : 17,
"kNeverAnimateFOV" : 18,
"kIgnoreSubworldMovement" : 19,
"kFalling" : 20,
"kRunning" : 21,
"kVerticalWhenFalling" : 22,
"kSpeedUpWhenRunning" : 23,
"kFallingStopped" : 24,
"kBeginFalling" : 25
}
ScriptFlags = {
"cutpos" : 0,
"cutposonce" : 1,
"cutpoa" : 2,
"cutpoaonce" : 3,
"animatefov" : 4,
"followlocalavatar" : 5,
"panicvelocity" : 6,
"railcomponent" : 7,
"subject" : 8,
"circletarget" : 9,
"maintainlos" : 10,
"zoomenabled" : 11,
"istransitioncamera" : 12,
"worldspacepoa" : 13,
"worldspacepos" : 14,
"cutposwhilepan" : 15,
"cutpoawhilepan" : 16,
"nonphys" : 17,
"neveranimatefov" : 18,
"ignoresubworldmovement" : 19,
"falling" : 20,
"running" : 21,
"verticalwhenfalling" : 22,
"speedupwhenrunning" : 23,
"fallingstopped" : 24,
"beginfalling" : 25
}
def __init__(self,parent,name="unnamed",type=0x0099):
hsKeyedObject.__init__(self,parent,name,type)
#format
self.fFlags=hsBitVector()
# -------- Initialize default settings
# only set in export_obj if there is no flag block in script
#self.fFlags.SetBit(plCameraBrain1.Flags["kFollowLocalAvatar"])
self.fPOAOffset = Vertex(0.0,0.0,6.0)
self.fSubjectKey=UruObjectRef()
self.fRail=UruObjectRef()
self.fAccel = 30
self.fDecel = 30
self.fVelocity = 30
self.fPOAAccel = 30
self.fPOADecel = 30
self.fPOAVelocity = 30
self.fXPanLimit = 0
self.fZPanLimit = 0
self.fZoomRate = 0
self.fZoomMin = 0
self.fZoomMax = 0
def _Find(page,name):
return page.find(0x0099,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x0099,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
hsKeyedObject.changePageRaw(self,sid,did,stype,dtype)
self.fSubjectKey.changePageRaw(sid,did,stype,dtype)
self.fRail.changePageRaw(sid,did,stype,dtype)
def read(self,stream):
hsKeyedObject.read(self,stream)
self.fPOAOffset.read(stream)
self.fSubjectKey.read(stream)
self.fRail.read(stream)
self.fFlags.read(stream)
self.fAccel = stream.ReadFloat()
self.fDecel = stream.ReadFloat()
self.fVelocity = stream.ReadFloat()
self.fPOAAccel = stream.ReadFloat()
self.fPOADecel = stream.ReadFloat()
self.fPOAVelocity = stream.ReadFloat()
self.fXPanLimit = stream.ReadFloat()
self.fZPanLimit = stream.ReadFloat()
self.fZoomRate = stream.ReadFloat()
self.fZoomMin = stream.ReadFloat()
self.fZoomMax = stream.ReadFloat()
def write(self,stream):
hsKeyedObject.write(self,stream)
self.fPOAOffset.write(stream)
self.fSubjectKey.write(stream)
self.fRail.write(stream)
self.fFlags.write(stream)
stream.WriteFloat(self.fAccel)
stream.WriteFloat(self.fDecel)
stream.WriteFloat(self.fVelocity)
stream.WriteFloat(self.fPOAAccel)
stream.WriteFloat(self.fPOADecel)
stream.WriteFloat(self.fPOAVelocity)
stream.WriteFloat(self.fXPanLimit)
stream.WriteFloat(self.fZPanLimit)
stream.WriteFloat(self.fZoomRate)
stream.WriteFloat(self.fZoomMin)
stream.WriteFloat(self.fZoomMax)
def import_obj(self,obj):
objscript = AlcScript.objects.FindCreate(obj.name)
StoreInDict(objscript,"camera.brain.type","simple")
StoreInDict(objscript,"camera.brain.poa","%f,%f,%f"%(float(self.fPOAOffset.x),float(self.fOAOffset.y),float(self.fPOAOffset.z)))
StoreInDict(objscript,"camera.brain.accel",self.fAccel)
StoreInDict(objscript,"camera.brain.decel",self.fDecel)
StoreInDict(objscript,"camera.brain.velocity",self.fVelocity)
StoreInDict(objscript,"camera.brain.poaaccel",self.fPOAAccel)
StoreInDict(objscript,"camera.brain.poadecel",self.fPOADecel)
StoreInDict(objscript,"camera.brain.poavelocity",self.fPOAVelocity)
StoreInDict(objscript,"camera.brain.xpanlimit",self.fXPanLimit)
StoreInDict(objscript,"camera.brain.zpanlimit",self.fZPanLimit)
StoreInDict(objscript,"camera.brain.zoomrate",self.fZoomRate)
StoreInDict(objscript,"camera.brain.zoommin",self.fZoomMin)
StoreInDict(objscript,"camera.brain.zoommax",self.fZoomMax)
if not self.fRail.isNull():
StoreInDict(objscript,"camera.brain.rail","%0x%X:%s"%(int(self.fSubjectKey.object_type),str(self.fSubjectKey.Key.Name)))
if not self.fSubjectKey.isNull():
StoreInDict(objscript,"camera.brain.subjectkey","%0x%X:%s"%(int(self.fSubjectKey.object_type),str(self.fSubjectKey.Key.Name)))
def export_obj(self,obj):
print "Exporting CameraBrain1"
# ------ Obtain the AlcScript Object ------
objscript = AlcScript.objects.Find(obj.name)
self.fAccel = float(FindInDict(objscript,"camera.brain.accel",self.fAccel))
self.fDecel = float(FindInDict(objscript,"camera.brain.decel",self.fDecel))
self.fVelocity = float(FindInDict(objscript,"camera.brain.velocity",self.fVelocity))
self.fPOAAccel = float(FindInDict(objscript,"camera.brain.poaaccel",self.fPOAAccel))
self.fPOADecel = float(FindInDict(objscript,"camera.brain.poadecel",self.fPOADecel))
self.fPOCVelocity = float(FindInDict(objscript,"camera.brain.poavelocity",self.fPOAVelocity))
self.fXPanLimit= float(FindInDict(objscript,"camera.brain.xpanlimit",self.fXPanLimit))
self.fZPanLimit= float(FindInDict(objscript,"camera.brain.zpanlimit",self.fZPanLimit))
self.fZoomRate= float(FindInDict(objscript,"camera.brain.zoomrate",self.fZoomRate))
self.fZoomMin= float(FindInDict(objscript,"camera.brain.zoommin",self.fZoomMin))
self.fZoomMax= float(FindInDict(objscript,"camera.brain.zoommax",self.fZoomMax))
# AlcScript: camera.brain.subjectkey
subject = FindInDict(objscript,"camera.brain.subjectkey",None)
# do something with that...
refparser = ScriptRefParser(self.getRoot(),"",False,[])
self.fSubjectKey = refparser.MixedRef_FindCreateRef(subject)
# AlcScript: camera.brain.subjectkey
rail = FindInDict(objscript,"camera.brain.rail",None)
# do something with that...
refparser = ScriptRefParser(self.getRoot(),"",False,[])
self.fRail = refparser.MixedRef_FindCreateRef(rail)
# ------ Process ------
# AlcScript: camera.brain.poa = "<float X>,<float Y>,<float Z>"
poa = str(FindInDict(objscript,"camera.brain.poa","0,0,0"))
try:
X,Y,Z, = poa.split(',')
self.fPOAOffset = Vertex(float(X),float(Y),float(Z))
except ValueError, detail:
print " Error parsing camera.brain.poa (Value:",poa,") : ",detail
flags = FindInDict(objscript,"camera.brain.flags",None)
if type(flags) == list:
self.fFlags = hsBitVector() # reset
for flag in flags:
if flag.lower() in plCameraBrain1.ScriptFlags:
idx = plCameraBrain1.ScriptFlags[flag.lower()]
self.fFlags.SetBit(idx)
else:
print " No camera flags list, setting default"
self.fFlags.SetBit(plCameraBrain1.Flags["kFollowLocalAvatar"])
class plCameraBrain1_Fixed(plCameraBrain1):
def __init__(self,parent,name="unnamed",type=0x009F):
plCameraBrain1.__init__(self,parent,name,type)
# set the Camerabrain1 floats to match defaults for this brain type
self.fAccel = 30
self.fDecel = 30
self.fVelocity = 30
self.fPOAAccel = 30
self.fPOADecel = 30
self.fPOAVelocity = 30
self.fXPanLimit = 0
self.fZPanLimit = 0
self.fZoomRate = 0
self.fZoomMin = 0
self.fZoomMax = 0
#format
self.fTargetPoint=UruObjectRef()
def _Find(page,name):
return page.find(0x009F,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x009F,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
plCameraBrain1.changePageRaw(self,sid,did,stype,dtype)
self.fTargetPoint.changePageRaw(sid,did,stype,dtype)
def read(self,stream):
plCameraBrain1.read(self,stream)
self.fTargetPoint.read(stream)
def write(self,stream):
plCameraBrain1.write(self,stream)
self.fTargetPoint.write(stream)
def import_obj(self,obj):
plCameraBrain1.import_obj(self,obj)
objscript = AlcScript.objects.FindCreate(obj.name)
StoreInDict(objscript,"camera.brain.type","fixed")
if not self.fTargetPoint.isNull():
StoreInDict(objscript,"camera.brain.target","%0x%X:%s"%(int(self.fSubjectKey.object_type),str(self.fSubjectKey.Key.Name)))
def export_obj(self,obj):
plCameraBrain1.export_obj(self,obj)
print "Exporting CameraBrain1_Fixed"
# ------ Obtain the AlcScript Object ------
objscript = AlcScript.objects.Find(obj.name)
# ------ Conintue if it's set ------
# AlcScript: camera.brain.target = string
target = FindInDict(objscript,"camera.brain.target",None)
# do something with that...
refparser = ScriptRefParser(self.getRoot(),"","scnobj",[0x0001])
self.fTargetPoint = refparser.MixedRef_FindCreateRef(target)
class plCameraBrain1_Circle(plCameraBrain1_Fixed):
CircleFlags = {
"kLagged" : 0x1,
"kAbsoluteLag" : 0x3,
"kFarthest" : 0x4,
"kTargetted" : 0x8,
"kHasCenterObject" : 0x10,
"kPOAObject" : 0x20,
"kCircleLocalAvatar" : 0x40
}
ScriptCircleFlags = {
"lagged" : 0x1,
"absolutelag" : 0x3,
"farthest" : 0x4,
"targetted" : 0x8,
"hascenterobject" : 0x10,
"poaobject" : 0x20,
"circlelocalavatar" : 0x40
}
def __init__(self,parent,name="unnamed",type=0x00C2):
plCameraBrain1_Fixed.__init__(self,parent,name,type)
# set the Camerabrain1 floats to match defaults for this brain type
self.fAccel = 10
self.fDecel = 10
self.fVelocity = 15
self.fPOAAccel = 10
self.fPOADecel = 10
self.fPOAVelocity = 15
self.fXPanLimit = 0
self.fZPanLimit = 0
self.fZoomRate = 0
self.fZoomMin = 0
self.fZoomMax = 0
#format
self.fCircleFlags = 0
self.fCircleFlags |= plCameraBrain1_Circle.CircleFlags['kCircleLocalAvatar'] | \
plCameraBrain1_Circle.CircleFlags['kFarthest']
self.fCenter = Vertex(0.0,0.0,0.0)
self.fRadius = 0
self.fCenterObject = UruObjectRef()
self.fPOAObj = UruObjectRef()
self.fCirPerSec = 0.10 # virtually always 0.10
def _Find(page,name):
return page.find(0x00C2,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x00C2,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
plCameraBrain1.changePageRaw(self,sid,did,stype,dtype)
self.fCenterObject.changePageRaw(sid,did,stype,dtype)
self.fPOAObj.changePageRaw(sid,did,stype,dtype)
def read(self,stream):
plCameraBrain1.read(self,stream) # yes, this is correct, it uses the plCameraBrain1 read/write functions
self.fCircleFlags = stream.Read32()
self.fCenter.read(stream)
self.fRadius = stream.ReadFloat()
self.fCenterObject.read(stream)
self.fPOAObj.read(stream)
self.fCirPerSec = stream.ReadFloat()
def write(self,stream):
plCameraBrain1.write(self,stream) # yes, this is correct, it uses the plCameraBrain1 read/write functions
stream.Write32(self.fCircleFlags)
self.fCenter.write(stream)
stream.WriteFloat(self.fRadius)
self.fCenterObject.write(stream)
self.fPOAObj.write(stream)
stream.WriteFloat(self.fCirPerSec)
def import_obj(self,obj):
plCameraBrain1.import_obj(self,obj)
objscript = AlcScript.objects.FindCreate(obj.name)
StoreInDict(objscript,"camera.brain.type","circle")
obj.data.setClipEnd(self.fRadius)
obj.data.setMode("showLimits")
flaglist = []
for flag in plCameraBrain1_Circle.ScriptCircleFlags.keys():
if self.fCicleFlags & plCameraBrain1_Circle.ScriptCircleFlags[flag] > 0:
flaglist.append(flag)
StoreInDict(objscript,"camera.brain.cicleflags",flaglist)
def export_obj(self,obj):
plCameraBrain1_Fixed.export_obj(self,obj)
# -------- Export based on blender object -------
# get the matrices
LocalToWorld=hsMatrix44()
m=getMatrix(obj)
m.transpose()
LocalToWorld.set(m)
# convert the clip-end to the Center point of the camera
if obj.getType() == 'Camera':
clip_end = obj.data.getClipEnd()
self.fCenter = Vertex(0,0,0 - clip_end) # camera points to local -Z
self.fCenter.transform(LocalToWorld)
self.fRadius = clip_end # always seems to define distance from camera to rotation point
# -------Continue based on AlcScript object ------
objscript = AlcScript.objects.Find(obj.name)
# ------ Conintue if it's set ------
flags = FindInDict(objscript,"camera.brain.circleflags",None)
if type(flags) == list:
self.fCircleFlags = 0
for flag in flags:
if flag.lower() in plCameraBrain1_Circle.ScriptCircleFlags:
self.fCircleFlags |= plCameraBrain1_Circle.ScriptCircleFlags[flag.lower()]
class plCameraBrain1_Avatar(plCameraBrain1):
def __init__(self,parent,name="unnamed",type=0x009E):
plCameraBrain1.__init__(self,parent,name,type)
# Set default flag...
self.fFlags.SetBit(plCameraBrain1.Flags["kMaintainLOS"])
# set the Camerabrain1 floats to match defaults for this brain type
self.fAccel = 10
self.fDecel = 10
self.fVelocity = 50
self.fPOAAccel = 10
self.fPOADecel = 10
self.fPOAVelocity = 50
self.fXPanLimit = 0
self.fZPanLimit = 0
self.fZoomRate = 0
self.fZoomMin = 0
self.fZoomMax = 0
#format
self.fOffset = Vertex(0,0,0)
def _Find(page,name):
return page.find(0x009E,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x009E,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
plCameraBrain1.changePageRaw(self,sid,did,stype,dtype)
def read(self,stream):
plCameraBrain1.read(self,stream)
self.fOffset.read(stream)
def write(self,stream):
plCameraBrain1.write(self,stream)
self.fOffset.write(stream)
def import_obj(self,obj):
plCameraBrain1.import_obj(self,obj)
objscript = AlcScript.objects.FindCreate(obj.name)
StoreInDict(objscript,"camera.brain.type","avatar")
StoreInDict(objscript,"camera.brain.fpoffset","%f,%f,%f"%(float(self.fOffset.x),float(self.fOffset.y),float(self.fOffset.z)))
def export_obj(self,obj):
plCameraBrain1.export_obj(self,obj)
# ------ Obtain the AlcScript Object ------
objscript = AlcScript.objects.Find(obj.name)
# ------ Conintue if it's set ------
# AlcScript: camera.brain.offset = "<float X>,<float Y>,<float Z>"
offset = str(FindInDict(objscript,"camera.brain.offset","0,0,0"))
try:
X,Y,Z, = offset.split(',')
self.fOffset = Vertex(float(X),float(Y),float(Z))
except ValueError, detail:
print " Error parsing camera.brain.offset (Value:",offset,") : ",detail
class plCameraBrain1_FirstPerson(plCameraBrain1_Avatar):
def __init__(self,parent,name="unnamed",type=0x00B3):
plCameraBrain1_Avatar.__init__(self,parent,name,type)
def _Find(page,name):
return page.find(0x00B3,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x00B3,name,1)
FindCreate = staticmethod(_FindCreate)
def changePageRaw(self,sid,did,stype,dtype):
plCameraBrain1_Avatar.changePageRaw(self,sid,did,stype,dtype)
def read(self,stream):
plCameraBrain1_Avatar.read(self,stream)
def write(self,stream):
plCameraBrain1_Avatar.write(self,stream)
def import_obj(self,obj):
plCameraBrain1_Avatar.import_obj(self,obj)
StoreInDict(objscript,"camera.brain.type","firstperson")
def export_obj(self,obj):
plCameraBrain1_Avatar.export_obj(self,obj)
class plPostEffectMod(plSingleModifier):
def __init__(self,parent,name="unnamed",type=0x007A):
plSingleModifier.__init__(self,parent,name,type)
self.fState = hsBitVector()
self.fHither = 1.0
self.fYon = 100.0
self.fFOVX = 45.00
self.fFOVY = 33.75
self.fNodeKey = UruObjectRef(self.getVersion())
self.fC2W = hsMatrix44()
self.fW2C = hsMatrix44()
def _Find(page,name):
return page.find(0x007A,name,0)
Find = staticmethod(_Find)
def _FindCreate(page,name):
return page.find(0x007A,name,1)
FindCreate = staticmethod(_FindCreate)
def read(self,stream):
plSingleModifier.read(self,stream)
self.fState.read(stream)
self.fHither = stream.ReadFloat()
self.fYon - stream.ReadFloat()
self.fFOVX = stream.ReadFloat()
self.fFOVY = stream.ReadFloat()
self.fNodeKey.read(stream)
self.fW2C.read(stream)
self.fC2W.read(stream)
def write(self,stream):
plSingleModifier.write(self,stream)
self.fState.write(stream)
stream.WriteFloat(self.fHither)
stream.WriteFloat(self.fYon)
stream.WriteFloat(self.fFOVX)
stream.WriteFloat(self.fFOVY)
self.fNodeKey.write(stream)
self.fW2C.write(stream)
self.fC2W.write(stream)
def export_obj(self, obj, sceneNode):
script = AlcScript.objects.Find(obj.name)
m = getMatrix(obj)
m.transpose()
self.fC2W.set(m)
m.invert()
self.fW2C.set(m)
self.fNodeKey = sceneNode
self.fHither = float(FindInDict(script, "camera.hither", 1.0))
self.fYon = float(FindInDict(script, "camera.yon", 100.0))
| Jrius/PyPRP | PyPRP/prp_CamClasses.py | Python | gpl-2.0 | 32,164 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
MultipleExternalInputDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
(C) 2013 by CS Systemes d'information (CS SI)
Email : volayaf at gmail dot com
otb at c-s dot fr (CS SI)
Contributors : Victor Olaya - basis from MultipleInputDialog
Alexia Mondot (CS SI) - new parameter
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.core import QgsSettings
from qgis.PyQt import uic
from qgis.PyQt.QtCore import QByteArray
from qgis.PyQt.QtWidgets import QDialog, QAbstractItemView, QPushButton, QDialogButtonBox, QFileDialog
from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgMultipleSelection.ui'))
class MultipleFileInputDialog(BASE, WIDGET):
def __init__(self, options):
super(MultipleFileInputDialog, self).__init__(None)
self.setupUi(self)
self.lstLayers.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.selectedoptions = options
# Additional buttons
self.btnAdd = QPushButton(self.tr('Add file'))
self.buttonBox.addButton(self.btnAdd,
QDialogButtonBox.ActionRole)
self.btnRemove = QPushButton(self.tr('Remove file(s)'))
self.buttonBox.addButton(self.btnRemove,
QDialogButtonBox.ActionRole)
self.btnRemoveAll = QPushButton(self.tr('Remove all'))
self.buttonBox.addButton(self.btnRemoveAll,
QDialogButtonBox.ActionRole)
self.btnAdd.clicked.connect(self.addFile)
self.btnRemove.clicked.connect(lambda: self.removeRows())
self.btnRemoveAll.clicked.connect(lambda: self.removeRows(True))
self.settings = QgsSettings()
self.restoreGeometry(self.settings.value("/Processing/multipleFileInputDialogGeometry", QByteArray()))
self.populateList()
self.finished.connect(self.saveWindowGeometry)
def saveWindowGeometry(self):
self.settings.setValue("/Processing/multipleInputDialogGeometry", self.saveGeometry())
def populateList(self):
model = QStandardItemModel()
for option in self.selectedoptions:
item = QStandardItem(option)
model.appendRow(item)
self.lstLayers.setModel(model)
def accept(self):
self.selectedoptions = []
model = self.lstLayers.model()
for i in range(model.rowCount()):
item = model.item(i)
self.selectedoptions.append(item.text())
QDialog.accept(self)
def reject(self):
QDialog.reject(self)
def addFile(self):
settings = QgsSettings()
if settings.contains('/Processing/LastInputPath'):
path = settings.value('/Processing/LastInputPath')
else:
path = ''
files, selected_filter = QFileDialog.getOpenFileNames(self,
self.tr('Select File(s)'), path, self.tr('All files (*.*)'))
if len(files) == 0:
return
model = self.lstLayers.model()
for filePath in files:
item = QStandardItem(filePath)
model.appendRow(item)
settings.setValue('/Processing/LastInputPath',
os.path.dirname(files[0]))
def removeRows(self, removeAll=False):
if removeAll:
self.lstLayers.model().clear()
else:
self.lstLayers.setUpdatesEnabled(False)
indexes = sorted(self.lstLayers.selectionModel().selectedIndexes())
for i in reversed(indexes):
self.lstLayers.model().removeRow(i.row())
self.lstLayers.setUpdatesEnabled(True)
| CS-SI/QGIS | python/plugins/processing/gui/MultipleFileInputDialog.py | Python | gpl-2.0 | 4,837 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutView.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '05/07/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.core import (QgsProject,
QgsLayout,
QgsUnitTypes,
QgsLayoutItemPicture,
QgsLayoutPoint,
QgsLayoutSize,
QgsLayoutAligner)
from qgis.gui import QgsLayoutView
from qgis.PyQt.QtCore import QRectF
from qgis.PyQt.QtGui import QTransform
from qgis.PyQt.QtTest import QSignalSpy
from qgis.testing import start_app, unittest
start_app()
class TestQgsLayoutView(unittest.TestCase):
def testScaleSafe(self):
""" test scaleSafe method """
view = QgsLayoutView()
view.fitInView(QRectF(0, 0, 10, 10))
scale = view.transform().m11()
view.scaleSafe(2)
self.assertAlmostEqual(view.transform().m11(), 2)
view.scaleSafe(4)
self.assertAlmostEqual(view.transform().m11(), 8)
# try to zoom in heaps
view.scaleSafe(99999999)
# assume we have hit the limit
scale = view.transform().m11()
view.scaleSafe(2)
self.assertAlmostEqual(view.transform().m11(), scale)
view.setTransform(QTransform.fromScale(1, 1))
self.assertAlmostEqual(view.transform().m11(), 1)
# test zooming out
view.scaleSafe(0.5)
self.assertAlmostEqual(view.transform().m11(), 0.5)
view.scaleSafe(0.1)
self.assertAlmostEqual(view.transform().m11(), 0.05)
# try zooming out heaps
view.scaleSafe(0.000000001)
# assume we have hit the limit
scale = view.transform().m11()
view.scaleSafe(0.5)
self.assertAlmostEqual(view.transform().m11(), scale)
def testLayoutScalePixels(self):
p = QgsProject()
l = QgsLayout(p)
l.setUnits(QgsUnitTypes.LayoutPixels)
view = QgsLayoutView()
view.setCurrentLayout(l)
view.setZoomLevel(1)
# should be no transform, since 100% with pixel units should be pixel-pixel
self.assertEqual(view.transform().m11(), 1)
view.setZoomLevel(0.5)
self.assertEqual(view.transform().m11(), 0.5)
def testSelectAll(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
l.addItem(item2)
item3 = QgsLayoutItemPicture(l)
item3.setLocked(True)
l.addItem(item3)
view = QgsLayoutView()
# no layout, no crash
view.selectAll()
view.setCurrentLayout(l)
focused_item_spy = QSignalSpy(view.itemFocused)
view.selectAll()
self.assertTrue(item1.isSelected())
self.assertTrue(item2.isSelected())
self.assertFalse(item3.isSelected()) # locked
self.assertEqual(len(focused_item_spy), 1)
item3.setSelected(True) # locked item selection should be cleared
view.selectAll()
self.assertTrue(item1.isSelected())
self.assertTrue(item2.isSelected())
self.assertFalse(item3.isSelected()) # locked
def testDeselectAll(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
l.addItem(item2)
item3 = QgsLayoutItemPicture(l)
item3.setLocked(True)
l.addItem(item3)
view = QgsLayoutView()
# no layout, no crash
view.deselectAll()
view.setCurrentLayout(l)
focused_item_spy = QSignalSpy(view.itemFocused)
view.deselectAll()
self.assertFalse(item1.isSelected())
self.assertFalse(item2.isSelected())
self.assertFalse(item3.isSelected())
self.assertEqual(len(focused_item_spy), 1)
item1.setSelected(True)
item2.setSelected(True)
item3.setSelected(True)
view.deselectAll()
self.assertFalse(item1.isSelected())
self.assertFalse(item2.isSelected())
self.assertFalse(item3.isSelected())
def testInvertSelection(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
l.addItem(item2)
item3 = QgsLayoutItemPicture(l)
item3.setLocked(True)
l.addItem(item3)
view = QgsLayoutView()
# no layout, no crash
view.invertSelection()
view.setCurrentLayout(l)
focused_item_spy = QSignalSpy(view.itemFocused)
view.invertSelection()
self.assertTrue(item1.isSelected())
self.assertTrue(item2.isSelected())
self.assertFalse(item3.isSelected()) # locked
self.assertEqual(len(focused_item_spy), 1)
item3.setSelected(True) # locked item selection should be cleared
view.invertSelection()
self.assertFalse(item1.isSelected())
self.assertFalse(item2.isSelected())
self.assertFalse(item3.isSelected()) # locked
def testSelectNextByZOrder(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
l.addItem(item2)
item3 = QgsLayoutItemPicture(l)
item3.setLocked(True)
l.addItem(item3)
view = QgsLayoutView()
# no layout, no crash
view.selectNextItemAbove()
view.selectNextItemBelow()
view.setCurrentLayout(l)
focused_item_spy = QSignalSpy(view.itemFocused)
# no selection
view.selectNextItemAbove()
view.selectNextItemBelow()
self.assertEqual(len(focused_item_spy), 0)
l.setSelectedItem(item1)
self.assertEqual(len(focused_item_spy), 1)
# already bottom most
view.selectNextItemBelow()
self.assertTrue(item1.isSelected())
self.assertFalse(item2.isSelected())
self.assertFalse(item3.isSelected())
self.assertEqual(len(focused_item_spy), 1)
view.selectNextItemAbove()
self.assertFalse(item1.isSelected())
self.assertTrue(item2.isSelected())
self.assertFalse(item3.isSelected())
self.assertEqual(len(focused_item_spy), 2)
view.selectNextItemAbove()
self.assertFalse(item1.isSelected())
self.assertFalse(item2.isSelected())
self.assertTrue(item3.isSelected())
self.assertEqual(len(focused_item_spy), 3)
view.selectNextItemAbove() # already top most
self.assertFalse(item1.isSelected())
self.assertFalse(item2.isSelected())
self.assertTrue(item3.isSelected())
self.assertEqual(len(focused_item_spy), 3)
view.selectNextItemBelow()
self.assertFalse(item1.isSelected())
self.assertTrue(item2.isSelected())
self.assertFalse(item3.isSelected())
self.assertEqual(len(focused_item_spy), 4)
view.selectNextItemBelow()
self.assertTrue(item1.isSelected())
self.assertFalse(item2.isSelected())
self.assertFalse(item3.isSelected())
self.assertEqual(len(focused_item_spy), 5)
view.selectNextItemBelow() # back to bottom most
self.assertTrue(item1.isSelected())
self.assertFalse(item2.isSelected())
self.assertFalse(item3.isSelected())
self.assertEqual(len(focused_item_spy), 5)
def testLockActions(self):
p = QgsProject()
l = QgsLayout(p)
view = QgsLayoutView()
view.setCurrentLayout(l)
# add some items
item1 = QgsLayoutItemPicture(l)
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
l.addItem(item2)
item3 = QgsLayoutItemPicture(l)
l.addItem(item3)
item1.setLocked(True)
item3.setLocked(True)
self.assertTrue(item1.isLocked())
self.assertFalse(item2.isLocked())
self.assertTrue(item3.isLocked())
view.unlockAllItems()
self.assertFalse(item1.isLocked())
self.assertFalse(item2.isLocked())
self.assertFalse(item3.isLocked())
self.assertTrue(item1.isSelected())
self.assertFalse(item2.isSelected())
self.assertTrue(item3.isSelected())
view.lockSelectedItems()
self.assertTrue(item1.isLocked())
self.assertFalse(item2.isLocked())
self.assertTrue(item3.isLocked())
self.assertFalse(item1.isSelected())
self.assertFalse(item2.isSelected())
self.assertFalse(item3.isSelected())
def testStacking(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
l.addLayoutItem(item1)
item2 = QgsLayoutItemPicture(l)
l.addLayoutItem(item2)
item3 = QgsLayoutItemPicture(l)
l.addLayoutItem(item3)
view = QgsLayoutView()
view.setCurrentLayout(l)
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 2)
self.assertEqual(item3.zValue(), 3)
# no effect interactions
view.raiseSelectedItems()
view.lowerSelectedItems()
view.moveSelectedItemsToTop()
view.moveSelectedItemsToBottom()
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 2)
self.assertEqual(item3.zValue(), 3)
# raising
item3.setSelected(True)
view.raiseSelectedItems()
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 2)
self.assertEqual(item3.zValue(), 3)
item3.setSelected(False)
item2.setSelected(True)
view.raiseSelectedItems()
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 2)
view.raiseSelectedItems()
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 2)
item2.setSelected(False)
item1.setSelected(True)
view.raiseSelectedItems()
self.assertEqual(item1.zValue(), 2)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 1)
# lower
item1.setSelected(False)
item3.setSelected(True)
view.lowerSelectedItems()
self.assertEqual(item1.zValue(), 2)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 1)
item3.setSelected(False)
item2.setSelected(True)
view.lowerSelectedItems()
self.assertEqual(item1.zValue(), 3)
self.assertEqual(item2.zValue(), 2)
self.assertEqual(item3.zValue(), 1)
view.lowerSelectedItems()
self.assertEqual(item1.zValue(), 3)
self.assertEqual(item2.zValue(), 1)
self.assertEqual(item3.zValue(), 2)
# raise to top
item2.setSelected(False)
item1.setSelected(True)
view.moveSelectedItemsToTop()
self.assertEqual(item1.zValue(), 3)
self.assertEqual(item2.zValue(), 1)
self.assertEqual(item3.zValue(), 2)
item1.setSelected(False)
item3.setSelected(True)
view.moveSelectedItemsToTop()
self.assertEqual(item1.zValue(), 2)
self.assertEqual(item2.zValue(), 1)
self.assertEqual(item3.zValue(), 3)
item3.setSelected(False)
item2.setSelected(True)
view.moveSelectedItemsToTop()
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 2)
# move to bottom
item2.setSelected(False)
item1.setSelected(True)
view.moveSelectedItemsToBottom()
self.assertEqual(item1.zValue(), 1)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 2)
item1.setSelected(False)
item3.setSelected(True)
view.moveSelectedItemsToBottom()
self.assertEqual(item1.zValue(), 2)
self.assertEqual(item2.zValue(), 3)
self.assertEqual(item3.zValue(), 1)
item3.setSelected(False)
item2.setSelected(True)
view.moveSelectedItemsToBottom()
self.assertEqual(item1.zValue(), 3)
self.assertEqual(item2.zValue(), 1)
self.assertEqual(item3.zValue(), 2)
def testAlign(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
item2.attemptMove(QgsLayoutPoint(6, 10, QgsUnitTypes.LayoutMillimeters))
item2.attemptResize(QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
l.addItem(item2)
item3 = QgsLayoutItemPicture(l)
item3.attemptMove(QgsLayoutPoint(0.8, 1.2, QgsUnitTypes.LayoutCentimeters))
item3.attemptResize(QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
l.addItem(item3)
view = QgsLayoutView()
view.setCurrentLayout(l)
view.alignSelectedItems(QgsLayoutAligner.AlignLeft)
item1.setSelected(True)
item2.setSelected(True)
item3.setSelected(True)
view.alignSelectedItems(QgsLayoutAligner.AlignLeft)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(4, 10, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(0.4, 1.2, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
view.alignSelectedItems(QgsLayoutAligner.AlignHCenter)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(8, 10, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(0.4, 1.2, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
view.alignSelectedItems(QgsLayoutAligner.AlignRight)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(12, 10, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(0.4, 1.2, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
view.alignSelectedItems(QgsLayoutAligner.AlignTop)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(12, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(0.4, 0.8, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
view.alignSelectedItems(QgsLayoutAligner.AlignVCenter)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 10, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(12, 11.5, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(0.4, 0.8, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
view.alignSelectedItems(QgsLayoutAligner.AlignBottom)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.positionWithUnits(), QgsLayoutPoint(12, 15, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.positionWithUnits(), QgsLayoutPoint(0.4, 0.8, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
def testDistribute(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
item2.attemptMove(QgsLayoutPoint(7, 10, QgsUnitTypes.LayoutMillimeters))
item2.attemptResize(QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
l.addItem(item2)
item3 = QgsLayoutItemPicture(l)
item3.attemptMove(QgsLayoutPoint(0.8, 1.2, QgsUnitTypes.LayoutCentimeters))
item3.attemptResize(QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
l.addItem(item3)
view = QgsLayoutView()
view.setCurrentLayout(l)
view.distributeSelectedItems(QgsLayoutAligner.DistributeLeft)
item1.setSelected(True)
item2.setSelected(True)
item3.setSelected(True)
view.distributeSelectedItems(QgsLayoutAligner.DistributeLeft)
self.assertEqual(item1.positionWithUnits(), QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().x(), 6.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().x(), 0.8, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
view.distributeSelectedItems(QgsLayoutAligner.DistributeHCenter)
self.assertAlmostEqual(item1.positionWithUnits().x(), 5.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().x(), 6.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().x(), 0.8, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
view.distributeSelectedItems(QgsLayoutAligner.DistributeRight)
self.assertAlmostEqual(item1.positionWithUnits().x(), 3.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().x(), 6.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().x(), 0.8, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
view.distributeSelectedItems(QgsLayoutAligner.DistributeTop)
self.assertAlmostEqual(item1.positionWithUnits().y(), 8.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().y(), 10.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().y(), 1.2, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
view.distributeSelectedItems(QgsLayoutAligner.DistributeVCenter)
self.assertAlmostEqual(item1.positionWithUnits().y(), 8.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().y(), 12.5, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().y(), 1.2, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
view.distributeSelectedItems(QgsLayoutAligner.DistributeBottom)
self.assertAlmostEqual(item1.positionWithUnits().y(), 8.0, 3)
self.assertEqual(item1.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item2.positionWithUnits().y(), 15.0, 3)
self.assertEqual(item2.positionWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertAlmostEqual(item3.positionWithUnits().y(), 1.2, 3)
self.assertEqual(item3.positionWithUnits().units(), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
def testResize(self):
p = QgsProject()
l = QgsLayout(p)
# add some items
item1 = QgsLayoutItemPicture(l)
item1.attemptMove(QgsLayoutPoint(4, 8, QgsUnitTypes.LayoutMillimeters))
item1.attemptResize(QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
l.addItem(item1)
item2 = QgsLayoutItemPicture(l)
item2.attemptMove(QgsLayoutPoint(7, 10, QgsUnitTypes.LayoutMillimeters))
item2.attemptResize(QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
l.addItem(item2)
item3 = QgsLayoutItemPicture(l)
item3.attemptMove(QgsLayoutPoint(0.8, 1.2, QgsUnitTypes.LayoutCentimeters))
item3.attemptResize(QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
l.addItem(item3)
view = QgsLayoutView()
view.setCurrentLayout(l)
view.resizeSelectedItems(QgsLayoutAligner.ResizeNarrowest)
item1.setSelected(True)
item2.setSelected(True)
item3.setSelected(True)
view.resizeSelectedItems(QgsLayoutAligner.ResizeNarrowest)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(10, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.0, 1.6, QgsUnitTypes.LayoutCentimeters))
l.undoStack().stack().undo()
view.resizeSelectedItems(QgsLayoutAligner.ResizeWidest)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 12, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(18, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
l.undoStack().stack().undo()
view.resizeSelectedItems(QgsLayoutAligner.ResizeShortest)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 9, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 0.9, QgsUnitTypes.LayoutCentimeters))
l.undoStack().stack().undo()
view.resizeSelectedItems(QgsLayoutAligner.ResizeTallest)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 16, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(10, 16, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.6, QgsUnitTypes.LayoutCentimeters))
l.undoStack().stack().undo()
item2.attemptResize(QgsLayoutSize(10, 19, QgsUnitTypes.LayoutMillimeters))
view.resizeSelectedItems(QgsLayoutAligner.ResizeToSquare)
self.assertEqual(item1.sizeWithUnits(), QgsLayoutSize(18, 18, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item2.sizeWithUnits(), QgsLayoutSize(19, 19, QgsUnitTypes.LayoutMillimeters))
self.assertEqual(item3.sizeWithUnits(), QgsLayoutSize(1.8, 1.8, QgsUnitTypes.LayoutCentimeters))
if __name__ == '__main__':
unittest.main()
| nirvn/QGIS | tests/src/python/test_qgslayoutview.py | Python | gpl-2.0 | 27,525 |
# -*- coding: utf-8 -*-
'''
Created on 25 April 2014
@author: Kimon Tsitsikas
Copyright © 2013-2014 Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
from concurrent import futures
import logging
import math
from odemis import model
import odemis
from odemis import acq
from odemis.acq import align, stream
from odemis.dataio import hdf5
from odemis.driver.actuator import ConvertStage
from odemis.util import test
import os
import threading
import time
import unittest
from unittest.case import skip
import weakref
logging.basicConfig(format="%(asctime)s %(levelname)-7s %(module)-15s: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
CONFIG_PATH = os.path.dirname(odemis.__file__) + "/../../install/linux/usr/share/odemis/"
SECOM_LENS_CONFIG = CONFIG_PATH + "sim/secom-sim-lens-align.odm.yaml" # 4x4
class TestAlignment(unittest.TestCase):
"""
Test Spot Alignment functions
"""
backend_was_running = False
@classmethod
def setUpClass(cls):
try:
test.start_backend(SECOM_LENS_CONFIG)
except LookupError:
logging.info("A running backend is already found, skipping tests")
cls.backend_was_running = True
return
except IOError as exp:
logging.error(str(exp))
raise
# find components by their role
cls.ebeam = model.getComponent(role="e-beam")
cls.sed = model.getComponent(role="se-detector")
cls.ccd = model.getComponent(role="ccd")
cls.focus = model.getComponent(role="focus")
cls.align = model.getComponent(role="align")
cls.light = model.getComponent(role="light")
cls.light_filter = model.getComponent(role="filter")
cls.stage = model.getComponent(role="stage")
# Used for OBJECTIVE_MOVE type
cls.aligner_xy = ConvertStage("converter-ab", "stage",
children={"orig": cls.align},
axes=["b", "a"],
rotation=math.radians(45))
@classmethod
def tearDownClass(cls):
if cls.backend_was_running:
return
test.stop_backend()
def setUp(self):
if self.backend_was_running:
self.skipTest("Running backend found")
# image for FakeCCD
self.data = hdf5.read_data("../align/test/one_spot.h5")
C, T, Z, Y, X = self.data[0].shape
self.data[0].shape = Y, X
self.fake_img = self.data[0]
# @skip("skip")
def test_spot_alignment(self):
"""
Test AlignSpot
"""
escan = self.ebeam
ccd = self.ccd
focus = self.focus
f = align.AlignSpot(ccd, self.aligner_xy, escan, focus)
dist, vector = f.result()
self.assertAlmostEqual(dist, 2.41e-05)
# @skip("faster")
def test_spot_alignment_cancelled(self):
"""
Test AlignSpot cancellation
"""
escan = self.ebeam
ccd = self.ccd
focus = self.focus
f = align.AlignSpot(ccd, self.aligner_xy, escan, focus)
time.sleep(0.01) # Cancel almost after the half grid is scanned
f.cancel()
self.assertTrue(f.cancelled())
self.assertTrue(f.done())
with self.assertRaises(futures.CancelledError):
f.result()
def on_done(self, future):
self.done += 1
def on_progress_update(self, future, past, left):
self.past = past
self.left = left
self.updates += 1
def test_aligned_stream(self):
"""
Test the AlignedSEMStream
"""
# Use fake ccd in order to have just one spot
ccd = FakeCCD(self, self.align)
# first try using the metadata correction
st = stream.AlignedSEMStream("sem-md", self.sed, self.sed.data, self.ebeam,
ccd, self.stage, self.focus, shiftebeam=stream.MTD_MD_UPD)
# we don't really care about the SEM image, so the faster the better
self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]
# start one image acquisition (so it should do the calibration)
f = acq.acquire([st])
received, _ = f.result()
self.assertTrue(received, "No image received after 30 s")
# Check the correction metadata is there
md = self.sed.getMetadata()
self.assertIn(model.MD_POS_COR, md)
# Check the position of the image is correct
pos_cor = md[model.MD_POS_COR]
pos_dict = self.stage.position.value
pos = (pos_dict["x"], pos_dict["y"])
exp_pos = tuple(p - c for p, c in zip(pos, pos_cor))
imd = received[0].metadata
self.assertEqual(exp_pos, imd[model.MD_POS])
# Check the calibration doesn't happen again on a second acquisition
bad_cor = (-1, -1) # stupid impossible value
self.sed.updateMetadata({model.MD_POS_COR: bad_cor})
f = acq.acquire([st])
received, _ = f.result()
self.assertTrue(received, "No image received after 10 s")
# if calibration has happened (=bad), it has changed the metadata
md = self.sed.getMetadata()
self.assertEqual(bad_cor, md[model.MD_POS_COR],
"metadata has been updated while it shouldn't have")
# Check calibration happens again after a stage move
f = self.stage.moveRel({"x": 100e-6})
f.result() # make sure the move is over
time.sleep(0.1) # make sure the stream had time to detect position has changed
received = st.image.value
f = acq.acquire([st])
received, _ = f.result()
self.assertTrue(received, "No image received after 30 s")
# if calibration has happened (=good), it has changed the metadata
md = self.sed.getMetadata()
self.assertNotEqual(bad_cor, md[model.MD_POS_COR],
"metadata hasn't been updated while it should have")
class FakeCCD(model.HwComponent):
"""
Fake CCD component that returns a spot image
"""
def __init__(self, testCase, align):
super(FakeCCD, self).__init__("testccd", "ccd")
self.testCase = testCase
self.align = align
self.exposureTime = model.FloatContinuous(1, (1e-6, 1000), unit="s")
self.binning = model.TupleContinuous((1, 1), [(1, 1), (8, 8)],
cls=(int, long, float), unit="")
self.resolution = model.ResolutionVA((2160, 2560), [(1, 1), (2160, 2560)])
self.data = CCDDataFlow(self)
self._acquisition_thread = None
self._acquisition_lock = threading.Lock()
self._acquisition_init_lock = threading.Lock()
self._acquisition_must_stop = threading.Event()
self.fake_img = self.testCase.fake_img
def start_acquire(self, callback):
with self._acquisition_lock:
self._wait_acquisition_stopped()
target = self._acquire_thread
self._acquisition_thread = threading.Thread(target=target,
name="FakeCCD acquire flow thread",
args=(callback,))
self._acquisition_thread.start()
def stop_acquire(self):
with self._acquisition_lock:
with self._acquisition_init_lock:
self._acquisition_must_stop.set()
def _wait_acquisition_stopped(self):
"""
Waits until the acquisition thread is fully finished _iff_ it was requested
to stop.
"""
# "if" is to not wait if it's already finished
if self._acquisition_must_stop.is_set():
logging.debug("Waiting for thread to stop.")
self._acquisition_thread.join(10) # 10s timeout for safety
if self._acquisition_thread.isAlive():
logging.exception("Failed to stop the acquisition thread")
# Now let's hope everything is back to normal...
# ensure it's not set, even if the thread died prematurely
self._acquisition_must_stop.clear()
def _simulate_image(self):
"""
Generates the fake output.
"""
with self._acquisition_lock:
self.fake_img.metadata[model.MD_ACQ_DATE] = time.time()
output = model.DataArray(self.fake_img, self.fake_img.metadata)
return self.fake_img
def _acquire_thread(self, callback):
"""
Thread that simulates the CCD acquisition.
"""
try:
while not self._acquisition_must_stop.is_set():
# dummy
duration = 1
if self._acquisition_must_stop.wait(duration):
break
callback(self._simulate_image())
except:
logging.exception("Unexpected failure during image acquisition")
finally:
logging.debug("Acquisition thread closed")
self._acquisition_must_stop.clear()
class CCDDataFlow(model.DataFlow):
"""
This is an extension of model.DataFlow. It receives notifications from the
FakeCCD component once the fake output is generated. This is the dataflow to
which the CCD acquisition streams subscribe.
"""
def __init__(self, ccd):
model.DataFlow.__init__(self)
self.component = weakref.ref(ccd)
def start_generate(self):
try:
self.component().start_acquire(self.notify)
except ReferenceError:
pass
def stop_generate(self):
try:
self.component().stop_acquire()
except ReferenceError:
pass
if __name__ == '__main__':
# suite = unittest.TestLoader().loadTestsFromTestCase(TestAlignment)
# unittest.TextTestRunner(verbosity=2).run(suite)
unittest.main()
| gstiebler/odemis | src/odemis/acq/test/spot_alignment_test.py | Python | gpl-2.0 | 10,396 |
#python
import k3d
import testing
setup = testing.setup_mesh_modifier_test("PolyGrid", "ExtrudeFaces")
setup.source.rows = 3
setup.source.columns = 3
selection = k3d.geometry.selection.create(0)
face_selection = k3d.geometry.primitive_selection.create(selection, k3d.selection.type.FACE)
k3d.geometry.primitive_selection.append(face_selection, 4, 5, 1)
setup.modifier.mesh_selection = selection
setup.modifier.distance = 5
testing.require_valid_mesh(setup.document, setup.modifier.get_property("output_mesh"))
testing.require_similar_mesh(setup.document, setup.modifier.get_property("output_mesh"), "mesh.modifier.ExtrudeFaces", 2)
| barche/k3d | tests/mesh/mesh.modifier.ExtrudeFaces.py | Python | gpl-2.0 | 639 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Support for buildsets in the database
"""
import sqlalchemy as sa
from buildbot.db import base
from buildbot.util import datetime2epoch
from buildbot.util import epoch2datetime
from buildbot.util import json
from twisted.internet import reactor
class BsDict(dict):
pass
class BuildsetsConnectorComponent(base.DBConnectorComponent):
# Documentation is in developer/database.rst
def addBuildset(self, sourcestampsetid, reason, properties, builderNames,
external_idstring=None, _reactor=reactor):
def thd(conn):
buildsets_tbl = self.db.model.buildsets
submitted_at = _reactor.seconds()
self.check_length(buildsets_tbl.c.reason, reason)
self.check_length(buildsets_tbl.c.external_idstring,
external_idstring)
transaction = conn.begin()
# insert the buildset itself
r = conn.execute(buildsets_tbl.insert(), dict(
sourcestampsetid=sourcestampsetid, submitted_at=submitted_at,
reason=reason, complete=0, complete_at=None, results=-1,
external_idstring=external_idstring))
bsid = r.inserted_primary_key[0]
# add any properties
if properties:
bs_props_tbl = self.db.model.buildset_properties
inserts = [
dict(buildsetid=bsid, property_name=k,
property_value=json.dumps([v, s]))
for k, (v, s) in properties.iteritems()]
for i in inserts:
self.check_length(bs_props_tbl.c.property_name,
i['property_name'])
conn.execute(bs_props_tbl.insert(), inserts)
# and finish with a build request for each builder. Note that
# sqlalchemy and the Python DBAPI do not provide a way to recover
# inserted IDs from a multi-row insert, so this is done one row at
# a time.
brids = {}
br_tbl = self.db.model.buildrequests
ins = br_tbl.insert()
for buildername in builderNames:
self.check_length(br_tbl.c.buildername, buildername)
r = conn.execute(ins,
dict(buildsetid=bsid, buildername=buildername, priority=0,
claimed_at=0, claimed_by_name=None,
claimed_by_incarnation=None, complete=0, results=-1,
submitted_at=submitted_at, complete_at=None))
brids[buildername] = r.inserted_primary_key[0]
transaction.commit()
return (bsid, brids)
return self.db.pool.do(thd)
def completeBuildset(self, bsid, results, complete_at=None,
_reactor=reactor):
if complete_at is not None:
complete_at = datetime2epoch(complete_at)
else:
complete_at = _reactor.seconds()
def thd(conn):
tbl = self.db.model.buildsets
q = tbl.update(whereclause=(
(tbl.c.id == bsid) &
((tbl.c.complete == None) | (tbl.c.complete != 1))))
res = conn.execute(q,
complete=1,
results=results,
complete_at=complete_at)
if res.rowcount != 1:
raise KeyError
return self.db.pool.do(thd)
def getBuildset(self, bsid):
def thd(conn):
bs_tbl = self.db.model.buildsets
q = bs_tbl.select(whereclause=(bs_tbl.c.id == bsid))
res = conn.execute(q)
row = res.fetchone()
if not row:
return None
return self._row2dict(row)
return self.db.pool.do(thd)
def getBuildsets(self, complete=None):
def thd(conn):
bs_tbl = self.db.model.buildsets
q = bs_tbl.select()
if complete is not None:
if complete:
q = q.where(bs_tbl.c.complete != 0)
else:
q = q.where((bs_tbl.c.complete == 0) |
(bs_tbl.c.complete == None))
res = conn.execute(q)
return [self._row2dict(row) for row in res.fetchall()]
return self.db.pool.do(thd)
def getRecentBuildsets(self, count, branch=None, repository=None,
complete=None):
def thd(conn):
bs_tbl = self.db.model.buildsets
ss_tbl = self.db.model.sourcestamps
j = sa.join(self.db.model.buildsets,
self.db.model.sourcestampsets)
j = j.join(self.db.model.sourcestamps)
q = sa.select(columns=[bs_tbl], from_obj=[j],
distinct=True)
q = q.order_by(sa.desc(bs_tbl.c.submitted_at))
q = q.limit(count)
if complete is not None:
if complete:
q = q.where(bs_tbl.c.complete != 0)
else:
q = q.where((bs_tbl.c.complete == 0) |
(bs_tbl.c.complete == None))
if branch:
q = q.where(ss_tbl.c.branch == branch)
if repository:
q = q.where(ss_tbl.c.repository == repository)
res = conn.execute(q)
return list(reversed([self._row2dict(row)
for row in res.fetchall()]))
return self.db.pool.do(thd)
def getBuildsetProperties(self, buildsetid):
"""
Return the properties for a buildset, in the same format they were
given to L{addBuildset}.
Note that this method does not distinguish a nonexistent buildset from
a buildset with no properties, and returns C{{}} in either case.
@param buildsetid: buildset ID
@returns: dictionary mapping property name to (value, source), via
Deferred
"""
def thd(conn):
bsp_tbl = self.db.model.buildset_properties
q = sa.select(
[bsp_tbl.c.property_name, bsp_tbl.c.property_value],
whereclause=(bsp_tbl.c.buildsetid == buildsetid))
l = []
for row in conn.execute(q):
try:
properties = json.loads(row.property_value)
l.append((row.property_name,
tuple(properties)))
except ValueError:
pass
return dict(l)
return self.db.pool.do(thd)
def _row2dict(self, row):
def mkdt(epoch):
if epoch:
return epoch2datetime(epoch)
return BsDict(external_idstring=row.external_idstring,
reason=row.reason, sourcestampsetid=row.sourcestampsetid,
submitted_at=mkdt(row.submitted_at),
complete=bool(row.complete),
complete_at=mkdt(row.complete_at), results=row.results,
bsid=row.id)
| mitya57/debian-buildbot | buildbot/db/buildsets.py | Python | gpl-2.0 | 7,930 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptDialog.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing.modeler.ModelerUtils import ModelerUtils
__author__ = 'Alexander Bruy'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import codecs
import sys
import json
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qsci import *
from qgis.core import *
from qgis.utils import iface
from processing.gui.ParametersDialog import ParametersDialog
from processing.gui.HelpEditionDialog import HelpEditionDialog
from processing.algs.r.RAlgorithm import RAlgorithm
from processing.algs.r.RUtils import RUtils
from processing.script.ScriptAlgorithm import ScriptAlgorithm
from processing.script.ScriptUtils import ScriptUtils
from processing.ui.ui_DlgScriptEditor import Ui_DlgScriptEditor
import processing.resources_rc
class ScriptEditorDialog(QDialog, Ui_DlgScriptEditor):
SCRIPT_PYTHON = 0
SCRIPT_R = 1
hasChanged = False
def __init__(self, algType, alg):
QDialog.__init__(self)
self.setupUi(self)
self.setWindowFlags(Qt.WindowMinimizeButtonHint |
Qt.WindowMaximizeButtonHint |
Qt.WindowCloseButtonHint)
# Set icons
self.btnSave.setIcon(
QgsApplication.getThemeIcon('/mActionFileSave.svg'))
self.btnSaveAs.setIcon(
QgsApplication.getThemeIcon('/mActionFileSaveAs.svg'))
self.btnEditHelp.setIcon(QIcon(':/processing/images/edithelp.png'))
self.btnRun.setIcon(QIcon(':/processing/images/runalgorithm.png'))
self.btnCut.setIcon(QgsApplication.getThemeIcon('/mActionEditCut.png'))
self.btnCopy.setIcon(
QgsApplication.getThemeIcon('/mActionEditCopy.png'))
self.btnPaste.setIcon(
QgsApplication.getThemeIcon('/mActionEditPaste.png'))
self.btnUndo.setIcon(QgsApplication.getThemeIcon('/mActionUndo.png'))
self.btnRedo.setIcon(QgsApplication.getThemeIcon('/mActionRedo.png'))
# Connect signals and slots
self.btnSave.clicked.connect(self.save)
self.btnSaveAs.clicked.connect(self.saveAs)
self.btnEditHelp.clicked.connect(self.editHelp)
self.btnRun.clicked.connect(self.runAlgorithm)
self.btnCut.clicked.connect(self.editor.cut)
self.btnCopy.clicked.connect(self.editor.copy)
self.btnPaste.clicked.connect(self.editor.paste)
self.btnUndo.clicked.connect(self.editor.undo)
self.btnRedo.clicked.connect(self.editor.redo)
self.editor.textChanged.connect(lambda: self.setHasChanged(True))
self.alg = alg
self.algType = algType
if self.alg is not None:
self.filename = self.alg.descriptionFile
self.editor.setText(self.alg.script)
else:
self.filename = None
self.update = False
self.help = None
self.setHasChanged(False)
self.editor.setLexerType(self.algType)
def editHelp(self):
if self.alg is None:
if self.algType == self.SCRIPT_PYTHON:
alg = ScriptAlgorithm(None, unicode(self.editor.text()))
elif self.algType == self.SCRIPT_R:
alg = RAlgorithm(None, unicode(self.editor.text()))
else:
alg = self.alg
dlg = HelpEditionDialog(alg)
dlg.exec_()
# We store the description string in case there were not saved
# because there was no filename defined yet
if self.alg is None and dlg.descriptions:
self.help = dlg.descriptions
def save(self):
self.saveScript(False)
def saveAs(self):
self.saveScript(True)
def saveScript(self, saveAs):
if self.filename is None or saveAs:
if self.algType == self.SCRIPT_PYTHON:
scriptDir = ScriptUtils.scriptsFolder()
filterName = self.tr('Python scripts (*.py)')
elif self.algType == self.SCRIPT_R:
scriptDir = RUtils.RScriptsFolder()
filterName = self.tr('Processing R script (*.rsx)')
self.filename = unicode(QFileDialog.getSaveFileName(self,
self.tr('Save script'), scriptDir,
filterName))
if self.filename:
if self.algType == self.SCRIPT_PYTHON \
and not self.filename.lower().endswith('.py'):
self.filename += '.py'
if self.algType == self.SCRIPT_R \
and not self.filename.lower().endswith('.rsx'):
self.filename += '.rsx'
text = unicode(self.editor.text())
if self.alg is not None:
self.alg.script = text
try:
with codecs.open(self.filename, 'w', encoding='utf-8') as fout:
fout.write(text)
except IOError:
QMessageBox.warning(self, self.tr('I/O error'),
self.tr('Unable to save edits. Reason:\n %s')
% unicode(sys.exc_info()[1]))
return
self.update = True
# If help strings were defined before saving the script for
# the first time, we do it here
if self.help:
with open(self.filename + '.help', 'w') as f:
json.dump(self.help, f)
self.help = None
self.setHasChanged(False)
else:
self.filename = None
def setHasChanged(self, hasChanged):
self.hasChanged = hasChanged
self.btnSave.setEnabled(hasChanged)
def runAlgorithm(self):
if self.algType == self.SCRIPT_PYTHON:
alg = ScriptAlgorithm(None, unicode(self.editor.text()))
alg.provider = ModelerUtils.providers['script']
if self.algType == self.SCRIPT_R:
alg = RAlgorithm(None, unicode(self.editor.text()))
alg.provider = ModelerUtils.providers['r']
dlg = alg.getCustomParametersDialog()
if not dlg:
dlg = ParametersDialog(alg)
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
| yordan-desta/QgisIns | python/plugins/processing/gui/ScriptEditorDialog.py | Python | gpl-2.0 | 7,417 |
#!/usr/bin/env python
##########################################################################
## Sequence Concat
## Author: Tyghe Vallard
## Release Date: 5/30/2012
## Version: 1.1
## Description:
## This script will merge all sequences with identical names in a
## file.
##
## Example:
## SampleFile1.fasta contents
## >identifier1
## AAAAAAAAAAAAAAAAAAAAAAAAA
## >identifier1
## TTTTTTTTTTTTTTTTTTTTTTTTT
## >identifier1
## GGGGGGGGGGGGGGGGGGGGGGGGG
## >identifier2
## CCCCCCCCCCCCCCCCCCCCCCCCC
## >identifier2
## AAAAAAAAAAAAAAAAAAAAAAAAA
##
## OUTPUT
## >identifier1
## AAAAAAAAAAAAAAAAAAAAAAAAATTTTTTTTTTTTTTTTTTTTTTTTTGGGGGGGGGGGGGGGGGGGGGGGGG
## >identifier2
## CCCCCCCCCCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAAAAAAAAAA
##
## VERSION HISTORY
## -----------------
## v1.1 - 6/08/2012
## - Added options for doing gisaid formatted files
## - Added support for different formatted required_segments
##########################################################################
import os
from optparse import OptionParser,OptionGroup
import sys
import cStringIO
from bio_pieces import fasta
from bio_pieces.fasta import UnknownIdentifierLineException
class SequenceConcat:
_fasta_file_path = None
_strip_chars = ""
_parsed_fasta = None
def __init__( self, fasta_file, file_type = 'genbank', strip_chars = "-" ):
self._fasta_file_path = fasta_file
self._strip_chars = strip_chars
if file_type == 'genbank':
try:
self._read_genbank()
except UnknownIdentifierLineException, e:
print "An unknown identifier line was encountered in the fasta file. Is this a genbank file? If so use --type genbank"
print e
sys.exit( 1 )
elif file_type == 'gisaid':
try:
self._read_gisaid()
except UnknownIdentifierLineException, e:
print "An unknown identifier line was encountered in the fasta file. Is this a gisaid file? If so use --type gisaid"
print e
sys.exit( 1 )
def _read_genbank( self ):
""" Reads the genbank file into a easy to work with dictionary """
self._parsed_fasta = fasta.read_genbank_fasta( self._fasta_file_path, self._strip_chars )
def _read_gisaid( self ):
""" Reads the gisaid file into a easy to work with dictionary """
self._parsed_fasta = fasta.read_gisaid_fasta( self._fasta_file_path, self._strip_chars )
def prune_missing( self, fasta_dict, segments_expected = None ):
"""
Prunes the dictionary of sequences so that only the sequences that have segments_expected
amount of segments are included.
Parameters:
fasta_dict - Dictionary form of a fasta file from pyWRAIRLib.parser.fasta functions
segments_expected - List of segments expected in what order(I.E [1,2,3,4,5,6,7,8] or ['PB2', 'PB1', 'PA', 'HA', 'NP', 'NA', 'MP', 'NS'])
Note:
This is an inplace operation
+++ Unit Test +++
# Try an inhouse easy test
>>> path = os.path.dirname( __file__ )
>>> s = SequenceConcat( os.path.join( path, '../example_files/example1.txt' ), 'genbank' )
>>> fasta = s.get_sequences()
>>> pruned_fasta = s.prune_missing( fasta, range( 1, 4 ) )
>>> print pruned_fasta
{'ident4': {'1': 'AAAAAAAAAAAAAAAAA', '3': 'TTTTTTTTTTTTTTTTT'}, 'ident5': {'1': 'AAAAAAAAAAAAAAAAA'}, 'ident3': {'1': 'AAAAAAAAAAAAAAAAA', '2': 'CCCCCCCCCCCCCCCCC'}}
>>> fasta != pruned_fasta
True
>>> len( fasta ) == 2
True
>>> s = SequenceConcat( os.path.join( path, '../example_files/example2.txt' ), 'gisaid' )
>>> fasta = s.get_sequences()
>>> pruned_fasta = s.prune_missing( fasta, ['A','B','C'] )
>>> print pruned_fasta
{'ident4': {'A': 'AAAAAAAAAAAAAAAAA', 'B': 'TTTTTTTTTTTTTTTTT'}, 'ident5': {'A': 'AAAAAAAAAAAAAAAAA'}, 'ident3': {'C': 'CCCCCCCCCCCCCCCCC', 'B': 'AAAAAAAAAAAAAAAAA'}}
>>> fasta != pruned_fasta
True
>>> len( fasta ) == 2
True
"""
# We will return a dictionary that contains the sequences that have missing segments
segments_missing = {}
# Delete any of the sequences names from the dictionary that
# do not have the required amount of segments
for seq_name in fasta_dict.keys():
if len( fasta_dict[seq_name] ) != len( segments_expected ):
# Copy the sequence
segments_missing[seq_name] = fasta_dict[seq_name]
# Delete it
del fasta_dict[seq_name]
return segments_missing
def get_sequences( self ):
"""
Return unmodified fasta dictionary
+++ Unit Tests +++
>>> s = SequenceConcat( '../example_files/example1.txt', 'genbank' )
>>> len( s.get_sequences() ) > 1
True
"""
return self._parsed_fasta
def get_merged_sequences( self, prune = True, segments_required = [1,2,3,4,5,6,7,8] ):
"""
Returns a merged fasta formatted file
Set prune to false if you don't want to prune out the sequences that don't have
the correct amount of segments
+++ Unit Tests +++
>>> path = os.path.dirname( __file__ )
>>> s = SequenceConcat( os.path.join( path, '../example_files/example1.txt' ), 'genbank' )
>>> print s.get_merged_sequences( True, range( 1, 4 ) )
>ident1
AAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCTTTTTTTTTTTTTTTTT
>ident2
AAAAAAAAAAAAAAAAATTTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCC
<BLANKLINE>
>>> s = SequenceConcat( '../example_files/WR2848N.fasta', 'genbank' )
>>> fh = open( '../example_files/WR2848N_merged.fasta' )
>>> WR2848N_manually_merged = fh.read()
>>> fh.close()
>>> s.get_merged_sequences( ) == WR2848N_manually_merged[:-1]
True
>>> path = os.path.dirname( __file__ )
>>> s = SequenceConcat( os.path.join( path, '../example_files/example2.txt' ), 'gisaid' )
>>> print s.get_merged_sequences( True, ['A','B','C'] )
>ident1
AAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCTTTTTTTTTTTTTTTTT
>ident2
AAAAAAAAAAAAAAAAATTTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCC
<BLANKLINE>
"""
# The fasta sequences
fasta_dict = self.get_sequences()
# Fast String writer
cs = cStringIO.StringIO()
# Return variable
output = None
# Will hold missing sequences
missing = None
# If the prune option is set then prune the sequences
if prune:
# Make sure that the segments_required is in the right format
# If string then split on ,
if type( segments_required ) == str:
segments_required = segments.required.split( ',' )
# If already a list then it is ok
elif type( segments_required ) == list:
pass
# Bail out if it gets here
else:
print( "Invalid value for required segments" )
sys.exit( -1 )
# Prune the dictionary
missing = self.prune_missing( fasta_dict, segments_required )
# Write a header to stderr
if len( missing ):
sys.stderr.write( "==================== Sequences Missing Segments ====================\n" )
#segments_required
# Loop through the missing segment sequences
for name, segments in missing.items():
missing_segs = [str(i) for i in segments_required if str(i) not in segments]
sys.stderr.write( ">%s is missing segment[s] %s\n" % (name, ",".join( missing_segs ) ) )
# Loop through each sequence and merge the segments
for name, segments in fasta_dict.items():
cs.write( fasta.merge_segments( name, segments, segments_required ) )
output = cs.getvalue()
cs.close()
return output
################ Script Functions #######################
def set_opts( parser ):
""" Set script options """
parser.add_option( "-f", "--fasta", dest="fasta_file", help="The fasta file of the sequences to merge" )
parser.add_option( "-t", "--type", dest="db_type", default="genbank", help="What database type is this? gisaid and genbank are the only two options right now. Default: genbank" )
parser.add_option( "--strip", dest="strip_chars", default="-", help="List of characters to strip from the sequences. Default is none" )
parser.add_option( "--test", dest="test", action="store_true", help="Run the tests for this script" )
prune_group = OptionGroup( parser, "Pruning Options" )
prune_group.add_option( "--noprune", dest="prune", action="store_false", default=True, help="Don't prune out sequences that don't have the required amount of segments" )
prune_group.add_option( "--segments_required", dest="segments_required", default="1,2,3,4,5,6,7,8", help="Required segments per sequence. See README for examples. Default: 1,2,3,4,5,6,7,8" )
parser.add_option_group( prune_group )
options,args = parser.parse_args()
if not options.fasta_file and not options.test:
parser.print_help()
parser.error( "Need to specify the fasta file" )
return options
# Run The script if this script is executed
def main():
parser = OptionParser()
options = set_opts( parser )
if options.test:
import doctest
doctest.testmod()
else:
sc = SequenceConcat( options.fasta_file, options.db_type, options.strip_chars )
print sc.get_merged_sequences( options.prune, options.segments_required.split( ',' ) )
| demis001/bio_pieces | bio_pieces_old/sequence_concat.py | Python | gpl-2.0 | 10,368 |
#!/usr/bin/env python
import os, re, sys
if len(sys.argv) < 2:
print "usage: %s <recordfile>" % sys.argv[0]
sys.exit(1)
# Read record
filename = sys.argv[1]
fd = file(filename)
record = fd.read()
fd.close()
# Update revision
newrecord = []
lbreak = "\r\n"
for line in record.splitlines():
if line.startswith('# Revision:'):
rev = int(line.split(':')[1]) + 1
line = '# Revision: %u' % rev
newrecord.append(line)
newrecord = lbreak.join(newrecord)
# Setup mail values
address = '[email protected]'
ident = os.path.splitext(filename)[0]
if not re.search('^[a-z]+ [a-z0-9]{8}$', ident):
sys.exit(ident + " is not a valid freedb `discid genre' pair")
subject = "cddb %s" % ident
# Save updated record
fd = file(filename, "w")
fd.write(newrecord)
fd.close()
# Send mail
print "Subject:", subject
cmd = 'cat "%s" | mutt -s "%s" %s' % (filename, subject, address)
print "%", cmd
os.system(cmd)
| BackupTheBerlios/namingmuse | tools/freedb-submit.py | Python | gpl-2.0 | 936 |
# coding: utf-8
"""
HDL Testing Platform
REST API for HDL TP # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.task import Task # noqa: E501
from swagger_client.rest import ApiException
class TestTask(unittest.TestCase):
"""Task unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTask(self):
"""Test Task"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.task.Task() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| autosub-team/autosub | src/plugins/vels_ob/test/test_task.py | Python | gpl-2.0 | 776 |
from __future__ import print_function
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
import sqlalchemy
import sys
# This value must be incremented after schema changes on replicated tables!
SCHEMA_VERSION = 1
engine = None
def init_db_engine(connect_str):
global engine
engine = create_engine(connect_str, poolclass=NullPool)
def run_sql_script(sql_file_path):
with open(sql_file_path) as sql:
connection = engine.connect()
connection.execute(sql.read())
connection.close()
def run_sql_script_without_transaction(sql_file_path):
with open(sql_file_path) as sql:
connection = engine.connect()
connection.connection.set_isolation_level(0)
lines = sql.read().splitlines()
try:
for line in lines:
# TODO: Not a great way of removing comments. The alternative is to catch
# the exception sqlalchemy.exc.ProgrammingError "can't execute an empty query"
if line and not line.startswith("--"):
connection.execute(line)
except sqlalchemy.exc.ProgrammingError as e:
print("Error: {}".format(e))
return False
finally:
connection.connection.set_isolation_level(1)
connection.close()
return True
| Freso/listenbrainz-server | messybrainz/db/__init__.py | Python | gpl-2.0 | 1,338 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.configuration import Configuration
class JobsEventModelConfigurationCreator(object):
_model_name = 'agent_event_model'
def __init__(self,
location_set = 'gridcell',
agent_set = 'job',
agent_event_set = 'jobs_event'):
self.location_set = location_set
self.agent_event_set = agent_event_set
self.agent_set = agent_set
def execute(self):
return Configuration({
'import': {
'washtenaw.models.%s' % self._model_name: 'AgentEventModel'
},
'init': {'name': 'AgentEventModel'},
'run': {
'arguments': {
'location_set': self.location_set,
'agent_event_set': self.agent_event_set,
'agent_set':self.agent_set,
'current_year': 'year',
'dataset_pool': 'dataset_pool'
}
}
})
from opus_core.tests import opus_unittest
class TestDeletionEventModelConfigurationCreator(opus_unittest.OpusTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_defaults(self):
creator = JobsEventModelConfigurationCreator()
expected = Configuration({
'import': {
'washtenaw.models.agent_event_model': 'AgentEventModel'
},
'init': {'name': 'AgentEventModel'},
'run': {
'arguments': {
'location_set': 'gridcell',
'agent_event_set': 'jobs_event',
'agent_set':'job',
'current_year': 'year',
'dataset_pool': 'dataset_pool'
}
}
})
result = creator.execute()
self.assertDictsEqual(result, expected)
if __name__ == '__main__':
opus_unittest.main()
| christianurich/VIBe2UrbanSim | 3rdparty/opus/src/washtenaw/configurations/jobs_event_model_configuration_creator.py | Python | gpl-2.0 | 2,232 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2010 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os.path
import gtk
import logging
import pango
from gtk import gdk
from locale import strcoll
from translate.lang import factory as lang_factory
from translate.storage import factory as store_factory
from virtaal.common.pan_app import ui_language
from virtaal.views.baseview import BaseView
from virtaal.views import rendering
from virtaal.views.theme import current_theme
class LocalFileView:
"""
Class that manages the localfile terminology plug-in's GUI presense and interaction.
"""
# INITIALIZERS #
def __init__(self, model):
self.term_model = model
self.controller = model.controller
self.mainview = model.controller.main_controller.view
self._signal_ids = []
self._setup_menus()
self.addterm = TermAddDialog(model=model)
self.fileselect = FileSelectDialog(model=model)
# METHODS #
def _setup_menus(self):
mnu_transfer = self.mainview.gui.get_widget('mnu_placnext')
self.mnui_edit = self.mainview.gui.get_widget('menuitem_edit')
self.menu = self.mnui_edit.get_submenu()
self.mnu_select_files, _menu = self.mainview.find_menu_item(_('Terminology _Files...'), self.mnui_edit)
if not self.mnu_select_files:
self.mnu_select_files = self.mainview.append_menu_item(_('Terminology _Files...'), self.mnui_edit, after=mnu_transfer)
self._signal_ids.append((
self.mnu_select_files,
self.mnu_select_files.connect('activate', self._on_select_term_files)
))
self.mnu_add_term, _menu = self.mainview.find_menu_item(_('Add _Term...'), self.mnui_edit)
if not self.mnu_add_term:
self.mnu_add_term = self.mainview.append_menu_item(_('Add _Term...'), self.mnui_edit, after=mnu_transfer)
self._signal_ids.append((
self.mnu_add_term,
self.mnu_add_term.connect('activate', self._on_add_term)
))
gtk.accel_map_add_entry("<Virtaal>/Terminology/Add Term", gtk.keysyms.t, gdk.CONTROL_MASK)
accel_group = self.menu.get_accel_group()
if accel_group is None:
accel_group = gtk.AccelGroup()
self.menu.set_accel_group(accel_group)
self.mnu_add_term.set_accel_path("<Virtaal>/Terminology/Add Term")
self.menu.set_accel_group(accel_group)
def destroy(self):
for gobj, signal_id in self._signal_ids:
gobj.disconnect(signal_id)
self.menu.remove(self.mnu_select_files)
self.menu.remove(self.mnu_add_term)
# EVENT HANDLERS #
def _on_add_term(self, menuitem):
self.addterm.run(parent=self.mainview.main_window)
def _on_select_term_files(self, menuitem):
self.fileselect.run(parent=self.mainview.main_window)
class FileSelectDialog:
"""
Wrapper for the selection dialog, created in Glade, to manage the list of
files used by this plug-in.
"""
COL_FILE, COL_EXTEND = range(2)
# INITIALIZERS #
def __init__(self, model):
self.controller = model.controller
self.term_model = model
self.gladefilename, self.gui = BaseView.load_glade_file(
["virtaal", "virtaal.glade"],
root='TermFilesDlg',
domain='virtaal'
)
self._get_widgets()
self._init_treeview()
self._init_add_chooser()
def _get_widgets(self):
widget_names = ('btn_add_file', 'btn_remove_file', 'btn_open_termfile', 'tvw_termfiles')
for name in widget_names:
setattr(self, name, self.gui.get_widget(name))
self.dialog = self.gui.get_widget('TermFilesDlg')
self.btn_add_file.connect('clicked', self._on_add_file_clicked)
self.btn_remove_file.connect('clicked', self._on_remove_file_clicked)
self.btn_open_termfile.connect('clicked', self._on_open_termfile_clicked)
self.tvw_termfiles.get_selection().connect('changed', self._on_selection_changed)
def _init_treeview(self):
self.lst_files = gtk.ListStore(str, bool)
self.tvw_termfiles.set_model(self.lst_files)
cell = gtk.CellRendererText()
cell.props.ellipsize = pango.ELLIPSIZE_MIDDLE
col = gtk.TreeViewColumn(_('File'))
col.pack_start(cell)
col.add_attribute(cell, 'text', self.COL_FILE)
col.set_expand(True)
col.set_sort_column_id(0)
self.tvw_termfiles.append_column(col)
cell = gtk.CellRendererToggle()
cell.set_radio(True)
cell.connect('toggled', self._on_toggle)
col = gtk.TreeViewColumn(_('Extendable'))
col.pack_start(cell)
col.add_attribute(cell, 'active', self.COL_EXTEND)
col.set_expand(False)
self.tvw_termfiles.append_column(col)
extend_file = self.term_model.config.get('extendfile', '')
files = self.term_model.config['files']
for f in files:
self.lst_files.append([f, f == extend_file])
# If there was no extend file, select the first one
for row in self.lst_files:
if row[self.COL_EXTEND]:
break
else:
itr = self.lst_files.get_iter_first()
if itr and self.lst_files.iter_is_valid(itr):
self.lst_files.set_value(itr, self.COL_EXTEND, True)
self.term_model.config['extendfile'] = self.lst_files.get_value(itr, self.COL_FILE)
self.term_model.save_config()
def _init_add_chooser(self):
# The following code was mostly copied from virtaal.views.MainView._create_dialogs()
dlg = gtk.FileChooserDialog(
_('Add Files'),
self.controller.main_controller.view.main_window,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK)
)
dlg.set_default_response(gtk.RESPONSE_OK)
all_supported_filter = gtk.FileFilter()
all_supported_filter.set_name(_("All Supported Files"))
dlg.add_filter(all_supported_filter)
supported_files_dict = dict([ (_(name), (extension, mimetype)) for name, extension, mimetype in store_factory.supported_files() ])
supported_file_names = supported_files_dict.keys()
supported_file_names.sort(cmp=strcoll)
for name in supported_file_names:
extensions, mimetypes = supported_files_dict[name]
#XXX: we can't open generic .csv formats, so listing it is probably
# more harmful than good.
if "csv" in extensions:
continue
new_filter = gtk.FileFilter()
new_filter.set_name(name)
if extensions:
for extension in extensions:
new_filter.add_pattern("*." + extension)
all_supported_filter.add_pattern("*." + extension)
for compress_extension in store_factory.decompressclass.keys():
new_filter.add_pattern("*.%s.%s" % (extension, compress_extension))
all_supported_filter.add_pattern("*.%s.%s" % (extension, compress_extension))
if mimetypes:
for mimetype in mimetypes:
new_filter.add_mime_type(mimetype)
all_supported_filter.add_mime_type(mimetype)
dlg.add_filter(new_filter)
all_filter = gtk.FileFilter()
all_filter.set_name(_("All Files"))
all_filter.add_pattern("*")
dlg.add_filter(all_filter)
dlg.set_select_multiple(True)
self.add_chooser = dlg
# METHODS #
def clear_selection(self):
self.tvw_termfiles.get_selection().unselect_all()
def run(self, parent=None):
if isinstance(parent, gtk.Widget):
self.dialog.set_transient_for(parent)
self.clear_selection()
self.dialog.show_all()
self.dialog.run()
self.dialog.hide()
# EVENT HANDLERS #
def _on_add_file_clicked(self, button):
self.add_chooser.show_all()
response = self.add_chooser.run()
self.add_chooser.hide()
if response != gtk.RESPONSE_OK:
return
mainview = self.term_model.controller.main_controller.view
currfiles = [row[self.COL_FILE] for row in self.lst_files]
for filename in self.add_chooser.get_filenames():
if filename in currfiles:
continue
# Try and open filename as a translation store
try:
if not os.path.isfile(filename):
raise IOError(_('"%s" is not a usable file.') % filename)
store = store_factory.getobject(filename)
currfiles.append(filename)
self.lst_files.append([filename, False])
except Exception, exc:
message = _('Unable to load %(filename)s:\n\n%(errormsg)s') % {'filename': filename, 'errormsg': str(exc)}
mainview.show_error_dialog(title=_('Error opening file'), message=message)
self.term_model.config['files'] = currfiles
self.term_model.save_config()
self.term_model.load_files() # FIXME: This could be optimized to only load and add the new selected files.
def _on_remove_file_clicked(self, button):
model, selected = self.tvw_termfiles.get_selection().get_selected()
if not selected:
return
remfile = model.get_value(selected, self.COL_FILE)
extend = model.get_value(selected, self.COL_EXTEND)
self.term_model.config['files'].remove(remfile)
if extend:
self.term_model.config['extendfile'] = ''
itr = model.get_iter_first()
if itr and model.iter_is_valid(itr):
model.set_value(itr, self.COL_EXTEND, True)
self.term_model.config['extendfile'] = model.get_value(itr, self.COL_FILE)
self.term_model.save_config()
self.term_model.load_files() # FIXME: This could be optimized to only remove the selected file from the terminology matcher.
model.remove(selected)
def _on_open_termfile_clicked(self, button):
selection = self.tvw_termfiles.get_selection()
model, itr = selection.get_selected()
if itr is None:
return
selected_file = model.get_value(itr, self.COL_FILE)
self.term_model.controller.main_controller.open_file(selected_file)
def _on_selection_changed(self, treesel):
model, itr = treesel.get_selected()
enabled = itr is not None
self.btn_open_termfile.set_sensitive(enabled)
self.btn_remove_file.set_sensitive(enabled)
def _on_toggle(self, renderer, path):
toggled_file = self.lst_files.get_value(self.lst_files.get_iter(path), self.COL_FILE)
itr = self.lst_files.get_iter_first()
while itr is not None and self.lst_files.iter_is_valid(itr):
self.lst_files.set_value(itr, self.COL_EXTEND, self.lst_files.get_value(itr, self.COL_FILE) == toggled_file)
itr = self.lst_files.iter_next(itr)
self.term_model.config['extendfile'] = toggled_file
self.term_model.save_config()
class TermAddDialog:
"""
Wrapper for the dialog used to add a new term to the terminology file.
"""
# INITIALIZERS #
def __init__(self, model):
self.term_model = model
self.lang_controller = model.controller.main_controller.lang_controller
self.unit_controller = model.controller.main_controller.unit_controller
self.gladefilename, self.gui = BaseView.load_glade_file(
["virtaal", "virtaal.glade"],
root='TermAddDlg',
domain='virtaal'
)
self._get_widgets()
def _get_widgets(self):
widget_names = (
'btn_add_term', 'cmb_termfile', 'eb_add_term_errors', 'ent_source',
'ent_target', 'lbl_add_term_errors', 'lbl_srclang', 'lbl_tgtlang',
'txt_comment'
)
for name in widget_names:
setattr(self, name, self.gui.get_widget(name))
self.dialog = self.gui.get_widget('TermAddDlg')
cellr = gtk.CellRendererText()
cellr.props.ellipsize = pango.ELLIPSIZE_MIDDLE
self.lst_termfiles = gtk.ListStore(str)
self.cmb_termfile.set_model(self.lst_termfiles)
self.cmb_termfile.pack_start(cellr)
self.cmb_termfile.add_attribute(cellr, 'text', 0)
self.ent_source.connect('changed', self._on_entry_changed)
self.ent_target.connect('changed', self._on_entry_changed)
# METHODS #
def add_term_unit(self, source, target):
filename = self.cmb_termfile.get_active_text()
store = self.term_model.get_store_for_filename(filename)
if store is None:
logging.debug('No terminology store to extend :(')
return
unit = store.addsourceunit(source)
unit.target = target
buff = self.txt_comment.get_buffer()
comments = buff.get_text(buff.get_start_iter(), buff.get_end_iter())
if comments:
unit.addnote(comments)
store.save()
self.term_model.matcher.extendtm(unit)
#logging.debug('Added new term: [%s] => [%s], file=%s' % (source, target, store.filename))
def reset(self):
unitview = self.unit_controller.view
source_text = u''
for src in unitview.sources:
selection = src.buffer.get_selection_bounds()
if selection:
source_text = src.get_text(*selection)
break
self.ent_source.modify_font(rendering.get_source_font_description())
self.ent_source.set_text(source_text.strip())
target_text = u''
for tgt in unitview.targets:
selection = tgt.buffer.get_selection_bounds()
if selection:
target_text = tgt.get_text(*selection)
break
self.ent_target.modify_font(rendering.get_target_font_description())
self.ent_target.set_text(target_text.strip())
self.txt_comment.get_buffer().set_text('')
self.eb_add_term_errors.hide()
self.btn_add_term.props.sensitive = True
self.lbl_srclang.set_text_with_mnemonic(_(u'_Source term — %(langname)s') % {'langname': self.lang_controller.source_lang.name})
self.lbl_tgtlang.set_text_with_mnemonic(_(u'_Target term — %(langname)s') % {'langname': self.lang_controller.target_lang.name})
self.lst_termfiles.clear()
extendfile = self.term_model.config.get('extendfile', None)
select_index = -1
i = 0
for f in self.term_model.config['files']:
if f == extendfile:
select_index = i
self.lst_termfiles.append([f])
i += 1
if select_index >= 0:
self.cmb_termfile.set_active(select_index)
def run(self, parent=None):
self.reset()
if isinstance(parent, gtk.Widget):
self.dialog.set_transient_for(parent)
self.dialog.show()
self._on_entry_changed(None)
self.ent_source.grab_focus()
response = self.dialog.run()
self.dialog.hide()
if response != gtk.RESPONSE_OK:
return
self.add_term_unit(self.ent_source.get_text(), self.ent_target.get_text())
# EVENT HANDLERS #
def _on_entry_changed(self, entry):
self.btn_add_term.props.sensitive = True
self.eb_add_term_errors.hide()
src_text = self.ent_source.get_text()
tgt_text = self.ent_target.get_text()
dup = self.term_model.get_duplicates(src_text, tgt_text)
if dup:
self.lbl_add_term_errors.set_text(_('Identical entry already exists.'))
self.eb_add_term_errors.modify_bg(gtk.STATE_NORMAL, gdk.color_parse(current_theme['warning_bg']))
self.eb_add_term_errors.show_all()
self.btn_add_term.props.sensitive = False
return
same_src_units = self.term_model.get_units_with_source(src_text)
if src_text and same_src_units:
# We want to separate multiple terms with the correct list
# separator for the UI language:
separator = lang_factory.getlanguage(ui_language).listseperator
#l10n: The variable is an existing term formatted for emphasis. The default is bold formatting, but you can remove/change the markup if needed. Leave it unchanged if you are unsure.
translations = separator.join([_('<b>%s</b>') % (u.target) for u in same_src_units])
errormsg = _('Existing translations: %(translations)s') % {
'translations': translations
}
self.lbl_add_term_errors.set_markup(errormsg)
self.eb_add_term_errors.modify_bg(gtk.STATE_NORMAL, gdk.color_parse(current_theme['warning_bg']))
self.eb_add_term_errors.show_all()
return
| elric/virtaal-debian | virtaal/plugins/terminology/models/localfile/localfileview.py | Python | gpl-2.0 | 17,737 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Campaign.photo'
db.delete_column(u'campaign_campaign', 'photo')
# Adding field 'Campaign.image'
db.add_column(u'campaign_campaign', 'image',
self.gf('cloudinary.models.CloudinaryField')(default=0, max_length=100),
keep_default=False)
def backwards(self, orm):
# Adding field 'Campaign.photo'
db.add_column(u'campaign_campaign', 'photo',
self.gf('cloudinary.models.CloudinaryField')(default=0, max_length=100),
keep_default=False)
# Deleting field 'Campaign.image'
db.delete_column(u'campaign_campaign', 'image')
models = {
u'campaign.campaign': {
'Meta': {'object_name': 'Campaign'},
'goal': ('django.db.models.fields.DecimalField', [], {'max_digits': '15', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('cloudinary.models.CloudinaryField', [], {'max_length': '100'}),
'message': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['campaign'] | fandrefh/AnjoMeu | anjo/campaign/migrations/0003_auto__del_field_campaign_photo__add_field_campaign_image.py | Python | gpl-2.0 | 1,508 |
"""
Motion correction of image sequences by 'efficient subpixel image registration
by cross correlation'. A reference image is iteratively computed by aligning
and averaging a subset of images/frames.
2015 Lloyd Russell, Christoph Schmidt-Hieber
*******************************************************************************
Credit to Marius Pachitariu for concept of registering to aligned mean image.
Credit to Olivier Dupont-Therrien, Doric Lenses Inc., for concept of applying
Gaussian blur & Laplacian to eliminate static inhomogeneities.
Parts of the code are based on:
skimage.feature.register_translation, which is a port of MATLAB code by Manuel
Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup, "Efficient subpixel
image registration algorithms," Optics Letters 33, 156-158 (2008).
Relating to implementation of skimage.feature.register_translation:
Copyright (C) 2011, the scikit-image team
All rights reserved.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*******************************************************************************
@author: llerussell
"""
from __future__ import absolute_import, division
from builtins import map, range
from functools import partial
import multiprocessing
import numpy as np
from scipy.ndimage.interpolation import shift
from scipy.ndimage import laplace
from scipy.ndimage import gaussian_filter
import time
from . import motion
try:
from pyfftw.interfaces.numpy_fft import fftn, ifftn
except ImportError:
from numpy.fft import fftn, ifftn
class DiscreteFourier2D(motion.MotionEstimationStrategy):
"""
Motion correction of image sequences by 'efficient subpixel image
registration by cross correlation'. A reference image is iteratively
computed by aligning and averaging a subset of images/frames.
Parameters
----------
upsample_factor : int, optional
upsample factor. final pixel alignment has resolution of
1/upsample_factor. if 1 only pixel level shifts are made - faster -
and no interpolation. Default: 1.
max_displacement : array of int, optional
The maximum allowed displacement magnitudes in [y,x]. Default: None.
num_images_for_mean : int, optional
number of images to use to make the aligned mean image. Default: 100.
randomise_frames : bool, optional
randomise the images selected to make the mean image? if false the
first 'num_frames_for_mean' frames will be used. Default: True.
err_thresh : float, optional
the threshold of mean pixel offset at which to stop aligning the mean
image. Default: 0.01.
max_iterations : int, optional
the maximum number of iterations to compute the aligned mean image.
Default: 5.
rotation_scaling : bool, optional
not yet implemented. Default: False.
save_name : string, optional
the file name for saving the final registered array of images to disk
from within method. If None or 'none', the array will not be saved.
Default: None.
save_fmt : string, optional
the tiff format to save as. options include 'mptiff', 'bigtiff',
'singles'. Default: 'mptiff'.
n_processes : int, optional
number of workers to use (multiprocessing). Default: 1.
verbose : bool, optional
enable verbose mode. Default: False.
return_registered : bool, optional
return registered frames? Default: False.
laplace : float, optional
Sigma of Gaussian. If positive, apply Gaussian blur & laplacian to all
images before computing the cross correlation. This step is useful to
eliminate static inhomogeneities (such as vignetting) from images.
Typical use case includes single-photon widefield microendoscope imaging
through a GRIN lens. Default: 0.0
References
----------
Parts of the code are based on:
skimage.feature.register_translation, which is a port of MATLAB code
by Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms," Optics Letters 33,
156-158 (2008).
"""
def __init__(self, upsample_factor=1, max_displacement=None,
num_images_for_mean=100,
randomise_frames=True, err_thresh=0.01, max_iterations=5,
rotation_scaling=False, save_fmt='mptiff', save_name=None,
n_processes=1, verbose=False, return_registered=False,
laplace=0.0):
self._params = dict(locals())
del self._params['self']
def _estimate(self, dataset):
"""
Parameters
----------
Returns
-------
displacements : array
(2, num_frames*num_cycles)-array of integers giving the
estimated displacement of each frame
"""
params = self._params
verbose = params['verbose']
n_processes = params['n_processes']
if verbose:
print('Using ' + str(n_processes) + ' worker(s)')
displacements = []
for sequence in dataset:
num_planes = sequence.shape[1]
num_channels = sequence.shape[4]
if num_channels > 1:
raise NotImplementedError("Error: only one colour channel \
can be used for DFT motion correction. Using channel 1.")
for plane_idx in range(num_planes):
# load into memory... need to pass numpy array to dftreg.
# could(should?) rework it to instead accept tiff array
if verbose:
print('Loading plane ' + str(plane_idx + 1) + ' of ' +
str(num_planes) + ' into numpy array')
t0 = time.time()
# reshape, one plane at a time
frames = np.array(sequence[:, plane_idx, :, :, 0])
frames = np.squeeze(frames)
e1 = time.time() - t0
if verbose:
print(' Loaded in: ' + str(e1) + ' s')
# do the registering
# registered_frames return is useless, sima later uses the
# displacements to shift the image (apply_displacements in
# sima/sequence.py: _align method of _MotionCorrectedSequence
# class) but this shifting is only pixel-level, much better
# results if sub-pixel were possible - replace sima's way of
# shifting? this may run into problems when sima then crops the
# final image so no empty rows/columns at edge of any frame in
# the video (trim_criterion)
if params['laplace'] > 0:
framesl = np.array([
np.abs(laplace(gaussian_filter(frame, params['laplace'])))
for frame in frames])
else:
framesl = frames
output = _register(
framesl,
upsample_factor=params['upsample_factor'],
max_displacement=params['max_displacement'],
num_images_for_mean=params['num_images_for_mean'],
randomise_frames=params['randomise_frames'],
err_thresh=params['err_thresh'],
max_iterations=params['max_iterations'],
n_processes=params['n_processes'],
save_fmt=params['save_fmt'],
save_name=params['save_name'],
verbose=params['verbose'],
return_registered=params['return_registered'])
# sort results
if params['return_registered']:
dy, dx, registered_frames = output
else:
dy, dx = output
# get results into a shape sima likes
frame_shifts = np.zeros([len(frames), num_planes, 2])
for idx, frame in enumerate(sequence):
frame_shifts[idx, plane_idx] = [dy[idx], dx[idx]]
displacements.append(frame_shifts)
total_time = time.time() - t0
if verbose:
print(' Total time for plane ' + str(plane_idx + 1) + ': ' +
str(total_time) + ' s')
return displacements
def _register(frames, upsample_factor=1, max_displacement=None,
num_images_for_mean=100, randomise_frames=True, err_thresh=0.01,
max_iterations=5, rotation_scaling=False, save_fmt='mptiff',
save_name=None, n_processes=1, verbose=False,
return_registered=False):
"""
Master function. Make aligned mean image. Register each frame in input
array to aligned mean image.
Parameters
----------
frames : np.ndarray
the frames to align (shape: frames, 1, rows, columns)
upsample : int, optional
upsample factor. final pixel alignment has resolution of
1/upsample_factor. if 1 only pixel level shifts are made - faster -
and no interpolation. Default: 1.
num_images_for_mean : int, optional
number of images to use to make the aligned mean image. Default: 100.
randomise_frames : bool, optional
randomise the images selected to make the mean image? if false the
first 'num_frames_for_mean' frames will be used. Default: True.
err_thresh : float, optional
the threshold of mean pixel offset at which to stop aligning the mean
image. Default: 0.01.
max_iterations : int, optional
the maximum number of iterations to compute the aligned mean image.
Default: 5.
rotation_scaling : bool, optional
not yet implemented. Default: False.
save_name : string, optional
the file name for saving the final registered array of images to disk
from within method. If None or 'none', the array will not be saved.
Default: None.
save_fmt : string, optional
the tiff format to save as. options include 'mptiff', 'bigtiff',
'singles'. Default: 'mptiff'
n_processes : int, optional
number of workers to use (multiprocessing). Default: 1.
verbose : bool, optional
enable verbose mode. Default: False.
return_registered : bool, optional
return registered frames? Default: False.
Returns
-------
dx : float array
horizontal pixel offsets. shift the target image by this amount to
align with reference
dy : float array
vertical pixel offsets. shift the target image by this amount to align
with reference
registered_frames : np.ndarray
the aligned frames
"""
# start timer
t0 = time.time()
# make a mean image
mean_img = _make_mean_img(frames,
num_images_for_mean=num_images_for_mean,
randomise_frames=randomise_frames,
err_thresh=err_thresh,
max_iterations=max_iterations,
upsample_factor=upsample_factor,
n_processes=n_processes,
max_displacement=max_displacement,
verbose=verbose)
e1 = time.time() - t0
if verbose:
print(' Time taken: ' + str(e1) + ' s')
# register all frames
output = _register_all_frames(frames, mean_img,
upsample_factor=upsample_factor,
n_processes=n_processes,
max_displacement=max_displacement,
verbose=verbose,
return_registered=return_registered)
# sort results
if return_registered:
dy, dx, registered_frames = output
else:
dy, dx = output
e2 = time.time() - t0 - e1
if verbose:
print(' Time taken: ' + str(e2) + ' s')
# save?
if return_registered:
if save_name is not None and save_name != 'none':
_save_registered_frames(registered_frames, save_name, save_fmt,
verbose=verbose)
e3 = time.time() - t0 - e1 - e2
if verbose:
print(' Time taken: ' + str(e3) + ' s')
total_time = time.time() - t0
if verbose:
print(' Completed in: ' + str(total_time) + ' s')
if return_registered:
return dy, dx, registered_frames
else:
return dy, dx
def _make_mean_img(frames, num_images_for_mean=100, randomise_frames=True,
err_thresh=0.01, max_iterations=5, upsample_factor=1,
n_processes=1, max_displacement=None, verbose=False):
"""
Make an aligned mean image to use as reference to which all frames are
later aligned.
Parameters
----------
frames : np.ndarray
the frames to align (shape: frames, 1, rows, columns)
num_images_for_mean : int, optional
how many images are used to make the mean reference image.
Default: 100.
randomise_frames : bool, optional
randomise the frames used to make the mean image? If False the first
N images are used Default: True.
err_thresh : float, optional
the threshold of mean pixel offset at which to stop aligning the mean
image. Default: 0.01.
max_iterations : int, optional
number of maximum iterations, if error threshold is never met
Default: 5.
n_processes : int, optional
number of processes to work on the registration in parallel
Default: 1
Returns
-------
mean_img : np.ndarray (size of input images)
the final aligned mean image
"""
input_shape = frames.shape
input_dtype = np.array(frames[0]).dtype
if num_images_for_mean > input_shape[0]:
num_images_for_mean = input_shape[0]
frames_for_mean = np.zeros([num_images_for_mean, input_shape[1],
input_shape[2]], dtype=input_dtype)
if randomise_frames:
if verbose:
print(' Making aligned mean image from ' +
str(num_images_for_mean) + ' random frames...')
for idx, frame_num in enumerate(np.random.choice(input_shape[0],
size=num_images_for_mean,
replace=False)):
frames_for_mean[idx] = frames[frame_num]
else:
if verbose:
print(' Making aligned mean image from first ' +
str(num_images_for_mean) + ' frames...')
frames_for_mean = frames[0:num_images_for_mean]
mean_img = np.mean(frames_for_mean, 0)
iteration = 1
mean_img_err = 9999
while mean_img_err > err_thresh and iteration < max_iterations:
map_function = partial(_register_frame, mean_img=mean_img,
upsample_factor=upsample_factor,
max_displacement=max_displacement,
return_registered=True)
if n_processes > 1:
# configure pool of workers (multiprocessing)
pool = multiprocessing.Pool(n_processes)
results = pool.map(map_function, frames_for_mean)
pool.close()
else:
results = map(map_function, frames_for_mean)
# preallocate the results array
mean_img_dx = np.zeros(num_images_for_mean, dtype=np.float)
mean_img_dy = np.zeros(num_images_for_mean, dtype=np.float)
# get results (0: dy, 1: dx, 2: registered image)
for idx, result in enumerate(results):
mean_img_dy[idx] = result[0]
mean_img_dx[idx] = result[1]
frames_for_mean[idx] = result[2]
# make the new (improved) mean image
mean_img = np.mean(frames_for_mean, 0)
mean_img_err = np.mean(
np.absolute(mean_img_dx)) + np.mean(np.absolute(mean_img_dy))
if verbose:
print(' Iteration ' + str(iteration) +
', average error: ' + str(mean_img_err) + ' pixels')
iteration += 1
return mean_img
def _register_all_frames(frames, mean_img, upsample_factor=1,
n_processes=1, max_displacement=None,
return_registered=False,
verbose=False):
"""
Register all input frames to the computed aligned mean image.
Returns
-------
dx : float array
array of x pixel offsets for each frame
dy : float array
array of y pixel offsets for each frame
registered_frames : np.ndarray (size of input images)
array containing each aligned frame
n_processes : int, optional
number of processes to work on the registration in parallel
"""
input_shape = frames.shape
input_dtype = np.array(frames[0]).dtype
if verbose:
print(' Registering all ' + str(frames.shape[0]) + ' frames...')
map_function = partial(_register_frame, mean_img=mean_img,
upsample_factor=upsample_factor,
max_displacement=max_displacement,
return_registered=return_registered)
if n_processes > 1:
# configure pool of workers (multiprocessing)
pool = multiprocessing.Pool(n_processes)
results = pool.map(map_function, frames)
pool.close()
else:
results = map(map_function, frames)
# preallocate arrays
dx = np.zeros(input_shape[0], dtype=np.float)
dy = np.zeros(input_shape[0], dtype=np.float)
if return_registered:
registered_frames = np.zeros([input_shape[0], input_shape[1],
input_shape[2]], dtype=input_dtype)
# get results (0: dy, 1: dx, 2: registered image)
for idx, result in enumerate(results):
dy[idx] = result[0]
dx[idx] = result[1]
registered_frames[idx] = result[2]
return dy, dx, registered_frames
else:
# get results (0: dy, 1: dx)
for idx, result in enumerate(results):
dy[idx] = result[0]
dx[idx] = result[1]
return dy, dx
def _register_frame(frame, mean_img, upsample_factor=1,
max_displacement=None,
return_registered=False):
"""
Called by _make_mean_img and _register_all_frames
"""
# compute the offsets
dy, dx = _register_translation(mean_img, frame,
upsample_factor=upsample_factor)
if max_displacement is not None:
if dy > max_displacement[0]:
dy = max_displacement[0]
# dy = 0
if dx > max_displacement[1]:
dx = max_displacement[1]
# dx = 0
if return_registered:
registered_frame = shift(frame,
[dy, dx],
order=3,
mode='constant',
cval=0,
output=frame.dtype)
return dy, dx, registered_frame
else:
return dy, dx
def _upsampled_dft(data, upsampled_region_size,
upsample_factor=1, axis_offsets=None):
"""
*****************************************
From skimage.feature.register_translation
*****************************************
Upsampled DFT by matrix multiplication.
This code is intended to provide the same result as if the following
operations were performed:
- Embed the array "data" in an array that is ``upsample_factor`` times
larger in each dimension. ifftshift to bring the center of the
image to (1,1).
- Take the FFT of the larger array.
- Extract an ``[upsampled_region_size]`` region of the result, starting
with the ``[axis_offsets+1]`` element.
It achieves this result by computing the DFT in the output array without
the need to zeropad. Much faster and memory efficient than the zero-padded
FFT approach if ``upsampled_region_size`` is much smaller than
``data.size * upsample_factor``.
Parameters
----------
data : 2D ndarray
The input data array (DFT of original data) to upsample.
upsampled_region_size : integer or tuple of integers
The size of the region to be sampled. If one integer is provided, it
is duplicated up to the dimensionality of ``data``.
upsample_factor : integer, optional
The upsampling factor. Default: 1.
axis_offsets : tuple of integers, optional
The offsets of the region to be sampled. Default: None (uses
image center)
Returns
-------
output : 2D ndarray
The upsampled DFT of the specified region.
"""
# if people pass in an integer, expand it to a list of equal-sized sections
if not hasattr(upsampled_region_size, "__iter__"):
upsampled_region_size = [upsampled_region_size, ] * data.ndim
else:
if len(upsampled_region_size) != data.ndim:
raise ValueError("shape of upsampled region sizes must be equal "
"to input data's number of dimensions.")
if axis_offsets is None:
axis_offsets = [0, ] * data.ndim
else:
if len(axis_offsets) != data.ndim:
raise ValueError("number of axis offsets must be equal to input "
"data's number of dimensions.")
col_kernel = np.exp(
(-1j * 2 * np.pi / (data.shape[1] * upsample_factor)) *
(np.fft.ifftshift(np.arange(data.shape[1]))[:, None] -
np.floor(data.shape[1] / 2)).dot(
np.arange(upsampled_region_size[1])[None, :] - axis_offsets[1])
)
row_kernel = np.exp(
(-1j * 2 * np.pi / (data.shape[0] * upsample_factor)) *
(np.arange(upsampled_region_size[0])[:, None] - axis_offsets[0]).dot(
np.fft.ifftshift(np.arange(data.shape[0]))[None, :] -
np.floor(data.shape[0] / 2))
)
row_kernel_dot = row_kernel.dot(data)
return row_kernel_dot.dot(col_kernel) # hangs here when multiprocessing
def _compute_phasediff(cross_correlation_max):
"""
*****************************************
From skimage.feature.register_translation
*****************************************
Compute global phase difference between the two images (should be
zero if images are non-negative).
Parameters
----------
cross_correlation_max : complex
The complex value of the cross correlation at its maximum point.
"""
return np.arctan2(cross_correlation_max.imag, cross_correlation_max.real)
def _compute_error(cross_correlation_max, src_amp, target_amp):
"""
*****************************************
From skimage.feature.register_translation
*****************************************
Compute RMS error metric between ``src_image`` and ``target_image``.
Parameters
----------
cross_correlation_max : complex
The complex value of the cross correlation at its maximum point.
src_amp : float
The normalized average image intensity of the source image
target_amp : float
The normalized average image intensity of the target image
"""
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\
(src_amp * target_amp)
return np.sqrt(np.abs(error))
def _register_translation(src_image, target_image, upsample_factor=1,
space="real"):
"""
*****************************************
From skimage.feature.register_translation
*****************************************
Efficient subpixel image translation registration by cross-correlation.
This code gives the same precision as the FFT upsampled cross-correlation
in a fraction of the computation time and with reduced memory requirements.
It obtains an initial estimate of the cross-correlation peak by an FFT and
then refines the shift estimation by upsampling the DFT only in a small
neighborhood of that estimate by means of a matrix-multiply DFT.
Parameters
----------
src_image : ndarray
Reference image.
target_image : ndarray
Image to register. Must be same dimensionality as ``src_image``.
upsample_factor : int, optional
Upsampling factor. Images will be registered to within
``1 / upsample_factor`` of a pixel. For example
``upsample_factor == 20`` means the images will be registered
within 1/20th of a pixel. Default: 1 (no upsampling).
space : string, one of "real" or "fourier"
Defines how the algorithm interprets input data. "real" means data
will be FFT'd to compute the correlation, while "fourier" data will
bypass FFT of input data. Case insensitive. Default: "real".
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``target_image`` with
``src_image``. Axis ordering is consistent with numpy (e.g. Z, Y, X)
error : float
Translation invariant normalized RMS error between ``src_image`` and
``target_image``.
phasediff : float
Global phase difference between the two images (should be
zero if images are non-negative).
References
----------
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms,"
Optics Letters 33, 156-158 (2008).
"""
# images must be the same shape
if src_image.shape != target_image.shape:
raise ValueError("Error: images must be same size for "
"register_translation")
# only 2D data makes sense right now
if src_image.ndim != 2 and upsample_factor > 1:
raise NotImplementedError("Error: register_translation only supports "
"subpixel registration for 2D images")
# assume complex data is already in Fourier space
if space.lower() == 'fourier':
src_freq = src_image
target_freq = target_image
# real data needs to be fft'd.
elif space.lower() == 'real':
src_image = np.array(src_image, dtype=np.complex128, copy=False)
target_image = np.array(target_image, dtype=np.complex128, copy=False)
src_freq = fftn(src_image)
target_freq = fftn(target_image)
else:
raise ValueError("Error: register_translation only knows the \"real\" "
"and \"fourier\" values for the ``space`` argument.")
# Whole-pixel shift - Compute cross-correlation by an IFFT
shape = src_freq.shape
image_product = src_freq * target_freq.conj()
cross_correlation = ifftn(image_product)
# Locate maximum
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.array(maxima, dtype=np.float64)
shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]
if upsample_factor == 1:
src_amp = np.sum(np.abs(src_freq) ** 2) / src_freq.size
target_amp = np.sum(np.abs(target_freq) ** 2) / target_freq.size
# CCmax = cross_correlation.max()
# If upsampling > 1, then refine estimate with matrix multiply DFT
else:
# Initial shift estimate in upsampled grid
shifts = np.round(shifts * upsample_factor) / upsample_factor
upsampled_region_size = np.ceil(upsample_factor * 1.5)
# Center of output array at dftshift + 1
dftshift = np.fix(upsampled_region_size / 2.0)
upsample_factor = np.array(upsample_factor, dtype=np.float64)
normalization = (src_freq.size * upsample_factor ** 2)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts * upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
upsample_factor,
sample_region_offset).conj()
cross_correlation /= normalization
# Locate maximum and map back to original pixel grid
maxima = np.array(np.unravel_index(
np.argmax(np.abs(cross_correlation)),
cross_correlation.shape),
dtype=np.float64)
maxima -= dftshift
shifts = shifts + maxima / upsample_factor
# CCmax = cross_correlation.max()
src_amp = _upsampled_dft(src_freq * src_freq.conj(),
1, upsample_factor)[0, 0]
src_amp /= normalization
target_amp = _upsampled_dft(target_freq * target_freq.conj(),
1, upsample_factor)[0, 0]
target_amp /= normalization
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
for dim in range(src_freq.ndim):
if shape[dim] == 1:
shifts[dim] = 0
return shifts
# _compute_error(CCmax, src_amp, target_amp)
# _compute_phasediff(CCmax)
def _save_registered_frames(frames, save_name, save_fmt, verbose=False):
"""
Save. Only use for debugging.
Parameters
----------
Returns
-------
"""
if verbose:
print(' Saving...')
try: # this is ugly
import tifffile
except ImportError:
try:
from sima.misc import tifffile
except ImportError:
if verbose:
print(' Cannot find tifffile')
if save_fmt == 'singles':
for idx in range(frames.shape[0]):
tifffile.imsave(
save_name + '_' + '{number:05d}'.format(number=idx) +
'_DFTreg.tif', frames[idx].astype(np.uint16))
if save_fmt == 'mptiff':
tifffile.imsave(save_name + '_DFTreg.tif',
frames.astype(np.uint16))
elif save_fmt == 'bigtiff':
tifffile.imsave(save_name + '_DFTreg.tif',
frames.astype(np.uint16), bigtiff=True)
| jzaremba/sima | sima/motion/dftreg.py | Python | gpl-2.0 | 31,079 |
# -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# MessageWindow.py - scripts and GUI for main (walk) window
###################################################
import GemRB
import GUIClasses
import GUICommon
import GUICommonWindows
import CommonWindow
import GUIWORLD
from GameCheck import MAX_PARTY_SIZE
from GUIDefines import *
MessageWindow = 0
ActionsWindow = 0
PortraitWindow = 0
OptionsWindow = 0
MessageTA = 0
def OnLoad():
global MessageWindow, ActionsWindow, PortraitWindow, OptionsWindow
GemRB.GameSetPartySize(MAX_PARTY_SIZE)
GemRB.GameSetProtagonistMode(0)
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
GemRB.SetInfoTextColor(0,255,0,255)
ActionsWindow = GemRB.LoadWindow(0)
OptionsWindow = GemRB.LoadWindow(2)
MessageWindow = GemRB.LoadWindow(7)
PortraitWindow = GUICommonWindows.OpenPortraitWindow (1)
MessageTA = MessageWindow.GetControl (1)
MessageTA.SetFlags (IE_GUI_TEXTAREA_AUTOSCROLL|IE_GUI_TEXTAREA_HISTORY)
GemRB.SetVar ("MessageTextArea", MessageTA.ID)
GemRB.SetVar ("ActionsWindow", ActionsWindow.ID)
GemRB.SetVar ("OptionsWindow", OptionsWindow.ID)
GemRB.SetVar ("MessageWindow", -1)
GemRB.SetVar ("OtherWindow", -1)
GemRB.SetVar ("ActionsPosition", 1) #Bottom
GemRB.SetVar ("OptionsPosition", 1) #Bottom
GemRB.SetVar ("MessagePosition", 1) #Bottom
GemRB.SetVar ("OtherPosition", 0) #Left
GemRB.GameSetScreenFlags (0, OP_SET)
CloseButton= MessageWindow.GetControl (0)
CloseButton.SetText(28082)
CloseButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CommonWindow.OnDecreaseSize)
CloseButton.SetFlags (IE_GUI_BUTTON_DEFAULT | IE_GUI_BUTTON_MULTILINE, OP_OR)
OpenButton = OptionsWindow.GetControl (10)
OpenButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CommonWindow.OnIncreaseSize)
# Select all
Button = ActionsWindow.GetControl (1)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUICommon.SelectAllOnPress)
# Select all
Button = ActionsWindow.GetControl (3)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUICommonWindows.ActionStopPressed)
FormationButton = ActionsWindow.GetControl (4)
FormationButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUIWORLD.OpenFormationWindow)
GUICommonWindows.SetupClockWindowControls (ActionsWindow)
GUICommonWindows.SetupMenuWindowControls (OptionsWindow)
UpdateControlStatus ()
def UpdateControlStatus ():
global MessageWindow, PortraitWindow, ActionsWindow, OptionsWindow, MessageTA
Expand = GemRB.GetMessageWindowSize() & (GS_DIALOGMASK|GS_DIALOG)
hideflags = GemRB.HideGUI ()
if Expand:
GemRB.SetVar ("MessageWindow", MessageWindow.ID)
GemRB.SetVar ("PortraitWindow", -1)
GemRB.SetVar ("ActionsWindow", -1)
GemRB.SetVar ("OptionsWindow", -1)
MessageTA = GUIClasses.GTextArea(MessageWindow.ID, GemRB.GetVar ("MessageTextArea"))
MessageTA.SetStatus (IE_GUI_CONTROL_FOCUSED)
Label = MessageWindow.GetControl (0x10000003)
Label.SetText (str (GemRB.GameGetPartyGold ()))
else:
GemRB.SetVar ("MessageWindow", -1)
GemRB.SetVar ("PortraitWindow", PortraitWindow.ID)
GemRB.SetVar ("ActionsWindow", ActionsWindow.ID)
GemRB.SetVar ("OptionsWindow", OptionsWindow.ID)
GUICommon.GameControl.SetStatus(IE_GUI_CONTROL_FOCUSED)
if hideflags:
GemRB.UnhideGUI ()
| Tomsod/gemrb | gemrb/GUIScripts/pst/MessageWindow.py | Python | gpl-2.0 | 3,926 |
""" PyroScope - Controller "torrent".
Copyright (c) 2009 The PyroScope Project <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import logging
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to
from pyroscope.lib.base import render, PageController
from pyroscope.engines import rtorrent
log = logging.getLogger(__name__)
class TorrentController(PageController):
def __init__(self):
self.proxy = rtorrent.Proxy()
def index(self):
# Redirect to view page
return redirect_to(action="view") #, id="HelpIndex")
def view(self, id):
c.hash = id
c.name = id
c.torrents = list(rtorrent.View(self.proxy, "incomplete").items())
# Return a rendered template
return render("pages/torrent.mako")
| pyroscope/pyroscope | pyroscope/pyroscope/controllers/torrent.py | Python | gpl-2.0 | 1,556 |
from Queue import Empty
from multiprocessing import Process, Queue
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import widgets
from scipy import interpolate
from labrad.units import Unit
V, mV, us, GHz, rad = [Unit(s) for s in ('V', 'mV', 'us', 'GHz', 'rad')]
from pyle.dataking import utilMultilevels as ml
from pyle.fitting import fitting
def adjust_s_scanning(qubit, data, qnd=False):
f, phase = data.T
traces = [{'x':f, 'y': phase, 'args':('b.',)}]
if qnd:
params = [{'name': 'qnd_readout frequency', 'val': qubit['qnd_readout frequency'][GHz], 'range': (min(f),max(f)), 'axis': 'x', 'color': 'b'}]
else:
params = [{'name': 'readout frequency', 'val': qubit['readout frequency'][GHz], 'range': (min(f),max(f)), 'axis': 'x', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
if qnd:
qubit['qnd_readout frequency'] = result['qnd_readout frequency']*GHz
else:
qubit['readout frequency'] = result['readout frequency']*GHz
def adjust_phase(qubit, data):
fb, left, right = data.T
traces = [{'x': fb, 'y': left, 'args': ('b.',)},
{'x': fb, 'y': right, 'args': ('r.',)}]
params = [{'name':'adc adjusted phase', 'val': qubit['adc adjusted phase'][rad], 'range':(-np.pi, np.pi), 'axis':'y', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
qubit['adc adjusted phase'] = (-2 - result['adc adjusted phase'])*rad + qubit['adc adjusted phase']
if qubit['adc adjusted phase']>np.pi:
qubit['adc adjusted phase'] = qubit['adc adjusted phase']-2*np.pi
elif qubit['adc adjusted phase']<-np.pi:
qubit['adc adjusted phase'] = qubit['adc adjusted phase']+2*np.pi
def adjust_phase_arc(qubit, data):
fb, left, right = data.T
traces = [{'x': fb, 'y': left, 'args': ('b.',)},
{'x': fb, 'y': right, 'args': ('r.',)}]
params = [{'name': 'operate', 'val': qubit['biasOperate'][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'b'},
{'name': 'readout', 'val': qubit['biasReadout'][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'g'},
{'name': 'reset0', 'val': qubit['biasReset'][0][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'r'},
{'name': 'reset1', 'val': qubit['biasReset'][1][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'm'},
{'name': 'Phase', 'val': qubit['critical phase'][rad], 'range': (-np.pi,np.pi), 'axis': 'y', 'color': 'k'}]
result = adjust(params, traces)
if result is not None:
qubit['biasOperate'] = result['operate']*V
qubit['biasReadout'] = result['readout']*V
qubit['biasReset'] = [result['reset0']*V, result['reset1']*V] * 2
qubit['critical phase'] = result['Phase']*rad
def adjust_squid_steps(qubit, data):
fb, low, high = data.T
traces = [{'x': fb, 'y': low, 'args': ('b.',)},
{'x': fb, 'y': high, 'args': ('r.',)}]
params = [{'name': 'operate', 'val': qubit['biasOperate'][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'b'},
{'name': 'readout', 'val': qubit['biasReadout'][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'g'},
{'name': 'reset0', 'val': qubit['biasReset'][0][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'r'},
{'name': 'reset1', 'val': qubit['biasReset'][1][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'm'},
{'name': 'timing0', 'val': qubit['squidSwitchIntervals'][0][0][us], 'range': (0,60), 'axis': 'y', 'color': 'k'},
{'name': 'timing1', 'val': qubit['squidSwitchIntervals'][0][1][us], 'range': (0,60), 'axis': 'y', 'color': 'gray'},
{'name': 'Edge_left', 'val': qubit['squidEdges'][0][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'g'},
{'name': 'Edge_right', 'val': qubit['squidEdges'][1][V], 'range': (-2.5,2.5), 'axis': 'x', 'color': 'r'}]
result = adjust(params, traces)
if result is not None:
qubit['biasOperate'] = result['operate']*V
qubit['biasReadout'] = result['readout']*V
qubit['biasReset'] = [result['reset0']*V, result['reset1']*V] * 2
qubit['squidSwitchIntervals'] = [(result['timing0']*us, result['timing1']*us)]
qubit['squidEdges'] = [result['Edge_left']*V,result['Edge_right']*V] #mark the edge of two branches of the same color. Converts voltage-to-Phi_not
def adjust_time(data):
t, probs = data[:,0], data[:,1:].T
traces = [{'x': t, 'y': prob, 'args': ('.-',)} for prob in probs]
params = [{'name': 't', 'val': (min(t)+max(t))/2, 'range': (min(t), max(t)), 'axis': 'x', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
return result['t']
def adjust_operate_bias(qubit, data):
fb, prob = data.T
traces = [{'x': fb, 'y': prob, 'args': ('b.-',)}]
params = [{'name': 'fb', 'val': qubit['biasOperate'][mV], 'range': (min(fb),max(fb)), 'axis': 'x', 'color': 'b'},
{'name': 'step', 'val': qubit['biasStepEdge'][mV], 'range': (min(fb),max(fb)), 'axis': 'x', 'color': 'r'}]
result = adjust(params, traces)
if result is not None:
qubit['biasOperate'] = result['fb']*mV
qubit['biasStepEdge'] = result['step']*mV
def adjust_scurve(qubit, data, states):
colors = ['b','g','r','c','m','y','k']
keynames=['measureAmp']+['measureAmp'+str(i) for i in list(np.arange(2,max(max(states)+2,2)))]
mpa, probs = data.T[0], data.T[1:]
traces = [{'x': mpa, 'y': prob, 'args': ('.-',)} for prob in probs]
params = [{'name': 'mpa'+str(state+1), 'val': float(qubit[keynames[state]]), 'range': (min(mpa),max(mpa)), 'axis': 'x', 'color': colors[state]} for state in states]
result = adjust(params, traces)
if result is not None:
for state in states:
qubit[keynames[state]] = result['mpa'+str(state+1)]
def adjust_visibility(qubit, data, states):
numstates=len(states)
mpas, probs, visis = data.T[0], data.T[1:numstates], data.T[numstates:]
colors = ['b','g','r','c','m','y','k']
keynames=['measureAmp']+['measureAmp'+str(i) for i in list(np.arange(2,max(max(states)+1,2)))]
#We have to make sure that the mpa axis is monotonically increasing for scipy.interpolation.interp1d to work properly
if mpas[0]>mpas[-1]: #If mpas runs negatively
mpas = mpas[::-1] #Reverse it's order
probs = probs[:,::-1] #and also reverse the order of the probabilities.
visis = visis[:,::-1] #and also reverse the order of the visibilities.
traces = [{'x':mpas, 'y':vis, 'args': ('.-',)} for vis in visis]+[{'x':mpas, 'y':prob, 'args': ('.-',)} for prob in probs]
params = [{'name':'mpa'+str(state), 'val': float(qubit[keynames[state-1]]), 'range': (min(mpas),max(mpas)), 'axis': 'x', 'color': colors[state-1]} for state in states[1:]]
result = adjust(params, traces)
if result is not None:
for state in states[1:]:
qubit[keynames[state-1]] = result['mpa'+str(state)]
def adjust_frequency(qubit, data, paramName=None):
if paramName is None:
paramName = 'f10'
f, prob = data.T
traces = [{'x': f, 'y': prob, 'args': ('b.-',)}]
params = [{'name': paramName, 'val': qubit[paramName][GHz], 'range': (min(f),max(f)), 'axis': 'x', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
qubit[paramName] = result[paramName]*GHz
def adjust_frequency_02(qubit, data):
f10 = qubit['f10'][GHz]
f21 = qubit['f21'][GHz]
f20_2ph = (f21 + f10) / 2
f, probs = data.T[0], data.T[1:]
traces = [{'x': f, 'y': prob, 'args': ('.-',)} for prob in probs]
params = [{'name': 'f10', 'val': f10, 'range': (min(f),max(f)), 'axis': 'x', 'color': 'b'},
{'name': 'f20_2ph', 'val': f20_2ph, 'range': (min(f),max(f)), 'axis': 'x', 'color': 'r'}]
result = adjust(params, traces)
if result is not None:
f10 = result['f10']
f20_2ph = result['f20_2ph']
f21 = 2*f20_2ph - f10
qubit['f10'] = f10*GHz
qubit['f21'] = f21*GHz
def adjust_fc(qubit, data):
f10 = qubit['f10'][GHz]
fc = qubit['fc'][GHz]
f, probs = data.T[0], data.T[1:]
traces = [{'x': f, 'y': prob, 'args': ('.-',)} for prob in probs]
params = [{'name': 'f10', 'val': qubit['f10'][GHz], 'range': (min(f)-.2,max(f)+.2), 'axis': 'x', 'color': 'b'},
{'name': 'fc', 'val': qubit['fc'][GHz], 'range': (min(f)-.2,max(f)+.2), 'axis': 'x', 'color': 'g'}]
result = adjust(params, traces)
if result is not None:
qubit['f10'] = result['f10']*GHz
qubit['fc'] = result['fc']*GHz
def adjust_rabihigh(qubit, data, state=1):
rabiheight, probs = data.T
traces = [{'x': rabiheight, 'y': probs, 'args': ('.-',)}]
params = [{'name': 'maxRabi', 'val': float(qubit[ml.saveKeyNumber('piAmp',state)]), 'range': (min(rabiheight),max(rabiheight)), 'axis': 'x', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
return result['maxRabi']
else:
return None
def adjust_piZ(qubit, data, state=1):
zAmp, probs = data.T
key = ml.saveKeyNumber('piAmpZ',state)
traces = [{'x': zAmp, 'y': probs, 'args': ('.-',)}]
params = [{'name': 'bestAmp', 'val': float(qubit[key]), 'range': (min(zAmp),max(zAmp)), 'axis': 'x', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
print 'Old %s: %f' %(key, qubit[key])
selectedAmp = result['bestAmp']
qubit['piAmpZ'] = selectedAmp
print 'New %s: %f' %(key, selectedAmp)
def adjust_cZControlPhaseCorrAmp(qubit, data):
cZCorrAmpMax = qubit['cZControlPhaseCorrAmpMax']
cZCorrAmpMin = qubit['cZControlPhaseCorrAmpMin']
controlAmp, probs = data.T
traces = [{'x': controlAmp, 'y': probs, 'args': ('b.-',)}]
params = [{'name': 'MAX', 'val': float(qubit['cZControlPhaseCorrAmpMax']), 'range': (min(controlAmp),max(controlAmp)), 'axis': 'x', 'color': 'b'},
{'name': 'MIN', 'val': float(qubit['cZControlPhaseCorrAmpMin']), 'range': (min(controlAmp),max(controlAmp)), 'axis': 'x', 'color': 'b'}]
result = adjust(params, traces)
if result is not None:
cZCorrAmpMax = result['MAX']
cZCorrAmpMin = result['MIN']
qubit['cZControlPhaseCorrAmpMax'] = cZCorrAmpMax
qubit['cZControlPhaseCorrAmpMin'] = cZCorrAmpMin
def adjust_cZTargetPhaseCorrAmp(qubit, data):
cZCorrAmpMax = qubit['cZTargetPhaseCorrAmpMax']
cZCorrAmpMin = qubit['cZTargetPhaseCorrAmpMin']
targetAmp, probs = data.T
traces = [{'x': targetAmp, 'y': probs, 'args': ('b.-',)}]
params = [{'name': 'MAX', 'val': float(qubit['cZTargetPhaseCorrAmpMax']), 'range': (min(targetAmp),max(targetAmp)), 'axis': 'x', 'color': 'b'},
{'name': 'MIN', 'val': float(qubit['cZTargetPhaseCorrAmpMin']), 'range': (min(targetAmp),max(targetAmp)), 'axis': 'x', 'color': 'r'}]
result = adjust(params, traces)
if result is not None:
cZCorrAmpMax = result['MAX']
cZCorrAmpMin = result['MIN']
qubit['cZTargetPhaseCorrAmpMax'] = cZCorrAmpMax
qubit['cZTargetPhaseCorrAmpMin'] = cZCorrAmpMin
def adjust(*a, **kw):
return runInSubprocess(_adjustGeneric, *a, **kw)
def _adjustGeneric(params, traces):
xlines = []
ylines = []
xsliders = []
ysliders = []
xparams = [p for p in params if p['axis'] == 'x']
yparams = [p for p in params if p['axis'] == 'y']
result = [None]
sh = 0.03
dy = 0.04
sw = 0.025
dx = 0.03
top = 0.95
left = 0.08
bottom = sh + dy*(len(xparams)+2)
right = 1 - sw - dx*(len(yparams)+1)
bgap = 0.1
bw = (right - left - bgap)/2.0
# plot the original data
fig = plt.figure()
ax = fig.add_subplot(111)
plt.subplots_adjust(top=top, left=left, bottom=bottom, right=right)
for trace in traces:
args = trace.get('args', ())
kw = trace.get('kw', {})
ax.plot(trace['x'], trace['y'], *args, **kw)
ax.grid()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# draw lines and create sliders for adjustable params
for i, p in enumerate(xparams):
val = p['val']
x = [val, val] if p['axis'] == 'x' else xlim
y = [val, val] if p['axis'] == 'y' else ylim
line, = ax.plot(x, y, c=p['color'])
xlines.append(line)
min, max = p['range']
slider_ax = fig.add_axes([left, sh+dy*(len(xparams)-i), right-left, sh])
s = widgets.Slider(slider_ax, p['name'], min, max, p['val'], valfmt='%0.3f', color=p['color'])
xsliders.append(s)
for i, p in enumerate(yparams):
val = p['val']
x = [val, val] if p['axis'] == 'x' else xlim
y = [val, val] if p['axis'] == 'y' else ylim
line, = ax.plot(x, y, c=p['color'])
ylines.append(line)
min, max = p['range']
slider_ax = fig.add_axes([1-sw-dx*(len(yparams)-i), bottom, sw, top-bottom])
s = VerticalSlider(slider_ax, p['name'], min, max, p['val'], valfmt='%0.3f', color=p['color'])
ysliders.append(s)
# create save and cancel buttons
btn_ax = fig.add_axes([left, sh, bw, sh])
save_btn = widgets.Button(btn_ax, 'Save')
btn_ax = fig.add_axes([right-bw, sh, bw, sh])
cancel_btn = widgets.Button(btn_ax, 'Cancel')
# event callbacks
def update(val):
for p, line, slider in zip(xparams, xlines, xsliders):
val = p['val'] = slider.val
x = [val, val] if p['axis'] == 'x' else xlim
y = [val, val] if p['axis'] == 'y' else ylim
line.set_xdata(x)
line.set_ydata(y)
for p, line, slider in zip(yparams, ylines, ysliders):
val = p['val'] = slider.val
x = [val, val] if p['axis'] == 'x' else xlim
y = [val, val] if p['axis'] == 'y' else ylim
line.set_xdata(x)
line.set_ydata(y)
plt.draw() # redraw the figure
def save(e):
result[0] = dict((p['name'], p['val']) for p in params)
plt.close(fig)
def cancel(e):
plt.close(fig)
# hook up events
for slider in xsliders + ysliders:
slider.on_changed(update)
cancel_btn.on_clicked(cancel)
save_btn.on_clicked(save)
# initial update
update(None)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt.show()
return result[0]
class VerticalSlider(widgets.Widget):
"""
A vertical slider representing a floating point range
The following attributes are defined
ax : the slider axes.Axes instance
val : the current slider value
vline : a Line2D instance representing the initial value
poly : A patch.Polygon instance which is the slider
valfmt : the format string for formatting the slider text
label : a text.Text instance, the slider label
closedmin : whether the slider is closed on the minimum
closedmax : whether the slider is closed on the maximum
slidermin : another slider - if not None, this slider must be > slidermin
slidermax : another slider - if not None, this slider must be < slidermax
dragging : allow for mouse dragging on slider
Call on_changed to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None, slidermax=None,
dragging=True, **kwargs):
"""
Create a slider from valmin to valmax in axes ax;
valinit - the slider initial position
label - the slider label
valfmt - used to format the slider value
closedmin and closedmax - indicate whether the slider interval is closed
slidermin and slidermax - be used to contrain the value of
this slider to the values of other sliders.
additional kwargs are passed on to self.poly which is the
matplotlib.patches.Rectangle which draws the slider. See the
matplotlib.patches.Rectangle documentation for legal property
names (eg facecolor, edgecolor, alpha, ...)
"""
self.ax = ax
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axhspan(valmin,valinit,0,1, **kwargs)
self.hline = ax.axhline(valinit,0,1, color='r', lw=1)
self.valfmt = valfmt
ax.set_yticks([])
ax.set_ylim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
ax.figure.canvas.mpl_connect('button_press_event', self._update)
if dragging:
ax.figure.canvas.mpl_connect('motion_notify_event', self._update)
# TODO fix text
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt%valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
def _update(self, event):
'update the slider position'
if event.button !=1: return
if event.inaxes != self.ax: return
val = event.ydata
if not self.closedmin and val <= self.valmin: return
if not self.closedmax and val >= self.valmax: return
if self.slidermin is not None:
if val <= self.slidermin.val: return
if self.slidermax is not None:
if val >= self.slidermax.val: return
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[1] = 0, val
xy[2] = 1, val
self.poly.xy = xy
self.valtext.set_text(self.valfmt%val)
if self.drawon: self.ax.figure.canvas.draw()
self.val = val
if not self.eventson: return
for _cid, func in self.observers.items():
func(val)
def on_changed(self, func):
"""
When the slider valud is changed, call this func with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
def reset(self):
"reset the slider to the initial value if needed"
if (self.val != self.valinit):
self.set_val(self.valinit)
def runInSubprocess(f, *a, **kw):
q = Queue()
p = Process(target=_run, args=(q, f, a, kw))
p.start()
while True:
try:
result = q.get(timeout=0.3)
break
except Empty:
if not p.is_alive():
raise Exception('Child process died!')
p.join()
return result
def _run(q, f, a, kw):
q.put(f(*a, **kw))
| McDermott-Group/LabRAD | LabRAD/TestScripts/fpgaTest/pyle/pyle/dataking/squid.py | Python | gpl-2.0 | 19,393 |
__author__ = 'leviwright'
from mainbot.commands import Command
class NickServLogin(Command):
arguments = []
permissionLevel = 3
permitExtraArgs = False
manArgCheck = False
defaultArgs = []
callName = "login"
def on_call(self, event, *args):
self.bot.connection.privmsg("NickServ", "identify %s" % self.bot.nickPass)
for x in self.bot.manOplist:
self.privMsg(x, "Sent request")
| MrMindImplosion/Slack-Bots | mainbot/autoCommand.py | Python | gpl-2.0 | 437 |
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Pavithra <[email protected]>
import os
import re
from avocado import Test
from avocado.utils import archive
from avocado.utils import build
from avocado.utils import distro
from avocado.utils import process
from avocado.utils.software_manager import SoftwareManager
class GDB(Test):
def setUp(self):
sm = SoftwareManager()
dist = distro.detect()
packages = ['gcc', 'dejagnu', 'flex',
'bison', 'texinfo', 'make', 'makeinfo']
if dist.name == 'Ubuntu':
packages.extend(['g++', 'binutils-dev'])
# FIXME: "redhat" as the distro name for RHEL is deprecated
# on Avocado versions >= 50.0. This is a temporary compatibility
# enabler for older runners, but should be removed soon
elif dist.name in ['rhel', 'fedora', 'redhat']:
packages.extend(['gcc-c++', 'binutils-devel', 'texi2html'])
elif dist.name == 'SuSE':
packages.extend(['gcc-c++', 'binutils-devel',
'glibc-devel', 'glibc-devel-static'])
else:
self.fail('no packages list for your distro.')
for package in packages:
if not sm.check_installed(package) and not sm.install(package):
self.cancel("Fail to install %s required for this test." %
package)
test_type = self.params.get('type', default='upstream')
if test_type == 'upstream':
gdb_version = self.params.get('gdb_version', default='10.2')
tarball = self.fetch_asset(
"http://ftp.gnu.org/gnu/gdb/gdb-%s.tar.gz" % gdb_version)
archive.extract(tarball, self.workdir)
sourcedir = os.path.join(
self.workdir, os.path.basename(tarball.split('.tar')[0]))
elif test_type == 'distro':
sourcedir = os.path.join(self.workdir, 'gdb-distro')
if not os.path.exists(sourcedir):
os.makedirs(sourcedir)
sourcedir = sm.get_source("gdb", sourcedir)
os.chdir(sourcedir)
process.run('./configure', ignore_status=True, sudo=True)
build.make(sourcedir)
def test(self):
process.run("make check-gdb", ignore_status=True, sudo=True)
logfile = os.path.join(self.logdir, "stdout")
with open(logfile, 'r') as f:
for line in f.readlines():
for match in re.finditer("of unexpected failures[1-9]", line):
self.log.info(line)
self.fail("Few gdb tests have failed")
| abdhaleegit/avocado-misc-tests | toolchain/gdb.py | Python | gpl-2.0 | 3,103 |
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2017 University of Tuebingen, CERN, CSC, KTH.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test permissions of user account REST API."""
import json
from invenio_db import db
from flask import url_for
from invenio_accounts.models import User
from invenio_oauth2server.models import Token
from invenio_oauth2server import current_oauth2server
from b2share_unit_tests.helpers import create_user
def test_accounts_search_permission(app, test_users, test_community,
login_user):
"""Test permission of listing user accounts."""
def account_search(user, expected_code):
headers = [('Content-Type', 'application/json'),
('Accept', 'application/json')]
with app.app_context():
url = url_for('invenio_accounts_rest.users_list')
if user:
scopes = current_oauth2server.scope_choices()
allowed_token = Token.create_personal(
'allowed_token', user.id,
scopes=[s[0] for s in scopes]
)
# application authentication token header
headers.append(('Authorization',
'Bearer {}'.format(allowed_token.access_token)))
with app.test_client() as client:
if user is not None:
login_user(user, client)
res = client.get(url, headers=headers)
assert res.status_code == expected_code
# anonymous users can't list accounts
account_search(None, 401)
# authenticated users can't list other users' account
account_search(test_users['normal'], 403)
# community members cannot list all users' accounts
account_search(test_community.member, 403)
# community admins can list all users
account_search(test_community.admin, 200)
# admin is allowed to list all accounts
account_search(test_users['admin'], 200)
def test_account_read_permission(app, test_users, test_community,
login_user):
"""Test permission of listing user accounts."""
with app.app_context():
read_user = create_user('read_user')
url = url_for('invenio_accounts_rest.user',
user_id=read_user.id)
db.session.commit()
headers = [('Content-Type', 'application/json'),
('Accept', 'application/json')]
def account_read(user, expected_code):
with app.test_client() as client:
if user is not None:
login_user(user, client)
res = client.get(url, headers=headers)
assert res.status_code == expected_code
# anonymous users can't read accounts
account_read(None, 401)
# authenticated users can't read other users' account
account_read(test_users['normal'], 403)
# community members cannot read other users' account
account_read(test_community.member, 403)
# users can read their own account
account_read(read_user, 200)
# community admins can list all users
account_read(test_community.admin, 200)
# admin is allowed to read all accounts
account_read(test_users['admin'], 200)
def test_account_activation_permission(app, test_users, test_community,
login_user):
"""Test deactivating a user account."""
counter = [0]
def account_update(user, expected_code, modified_user=None):
def account_update_sub(patch_content, content_type):
with app.app_context():
if modified_user is None:
test_user = create_user(
'test_user{}'.format(counter[0]))
else:
test_user = modified_user
counter[0] += 1
url = url_for(
'invenio_accounts_rest.user',
user_id=test_user.id,
)
db.session.commit()
headers = [('Content-Type', content_type),
('Accept', 'application/json')]
with app.test_client() as client:
if user is not None:
login_user(user, client)
res = client.patch(url, headers=headers,
data=json.dumps(patch_content))
assert res.status_code == expected_code
# test with a simple JSON
account_update_sub({'active': False}, 'application/json')
# test with a JSON patch
account_update_sub([{
'op': 'replace', 'path': '/active','value': False
}], 'application/json-patch+json')
# anonymous users can't activate/deactivate accounts
account_update(None, 401)
# authenticated users can't activate/deactivate other users' account
account_update(test_users['normal'], 403)
# users can't deactivate their own accounts
account_update(test_users['normal'], 403, test_users['normal'])
# admin is allowed to activate/deactivate accounts
account_update(test_users['admin'], 200)
def test_account_roles_search_permission(app, test_users, test_community,
login_user):
"""Test permission of listing user accounts."""
with app.app_context():
read_user = create_user('read_user')
url = url_for('invenio_accounts_rest.user_roles_list',
user_id=read_user.id)
db.session.commit()
headers = [('Content-Type', 'application/json'),
('Accept', 'application/json')]
def roles_read(user, expected_code):
with app.test_client() as client:
if user is not None:
login_user(user, client)
res = client.get(url, headers=headers)
assert res.status_code == expected_code
# anonymous users can't read other users' roles
roles_read(None, 401)
# any authenticated user can read other users' roles
roles_read(test_users['normal'], 200)
| EUDAT-B2SHARE/b2share | tests/b2share_unit_tests/users/test_account_rest_permissions.py | Python | gpl-2.0 | 6,918 |
#!/usr/bin/python
#
#
from distutils.core import setup
from spacewalk.common.rhnConfig import CFG, initCFG
initCFG('web')
setup(name = "rhnclient",
version = "5.5.9",
description = CFG.PRODUCT_NAME + " Client Utilities and Libraries",
long_description = CFG.PRODUCT_NAME + """\
Client Utilities
Includes: rhn_check, action handler, and modules to allow
client packages to communicate with RHN.""",
author = 'Joel Martin',
author_email = '[email protected]',
url = 'http://rhn.redhat.com',
packages = ["rhn.actions", "rhn.client"],
license = "GPL",
)
| PaulWay/spacewalk | client/solaris/rhnclient/setup.py | Python | gpl-2.0 | 610 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import hashlib
import itertools
import sqlalchemy as sa
from buildbot.util import unicode2bytes
class DBConnectorComponent:
# A fixed component of the DBConnector, handling one particular aspect of
# the database. Instances of subclasses are assigned to attributes of the
# DBConnector object, so that they are available at e.g.,
# C{master.db.model} or C{master.db.changes}. This parent class takes care
# of the necessary backlinks and other housekeeping.
connector = None
data2db = {}
def __init__(self, connector):
self.db = connector
# set up caches
for method in dir(self.__class__):
o = getattr(self, method)
if isinstance(o, CachedMethod):
setattr(self, method, o.get_cached_method(self))
@property
def master(self):
return self.db.master
_isCheckLengthNecessary = None
def checkLength(self, col, value):
if not self._isCheckLengthNecessary:
if self.db.pool.engine.dialect.name == 'mysql':
self._isCheckLengthNecessary = True
else:
# not necessary, so just stub out the method
self.checkLength = lambda col, value: None
return
assert col.type.length, f"column {col} does not have a length"
if value and len(value) > col.type.length:
raise RuntimeError(f"value for column {col} is greater than max of {col.type.length} "
f"characters: {value}")
def ensureLength(self, col, value):
assert col.type.length, f"column {col} does not have a length"
if value and len(value) > col.type.length:
value = value[:col.type.length // 2] + \
hashlib.sha1(unicode2bytes(value)).hexdigest()[:col.type.length // 2]
return value
# returns a Deferred that returns a value
def findSomethingId(self, tbl, whereclause, insert_values,
_race_hook=None, autoCreate=True):
d = self.findOrCreateSomethingId(tbl, whereclause, insert_values,
_race_hook, autoCreate)
d.addCallback(lambda pair: pair[0])
return d
def findOrCreateSomethingId(self, tbl, whereclause, insert_values,
_race_hook=None, autoCreate=True):
"""
Find a matching row and if one cannot be found optionally create it.
Returns a deferred which resolves to the pair (id, found) where
id is the primary key of the matching row and `found` is True if
a match was found. `found` will be false if a new row was created.
"""
def thd(conn, no_recurse=False):
# try to find the master
q = sa.select([tbl.c.id],
whereclause=whereclause)
r = conn.execute(q)
row = r.fetchone()
r.close()
# found it!
if row:
return row.id, True
if not autoCreate:
return None, False
if _race_hook is not None:
_race_hook(conn)
try:
r = conn.execute(tbl.insert(), [insert_values])
return r.inserted_primary_key[0], False
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
# try it all over again, in case there was an overlapping,
# identical call, but only retry once.
if no_recurse:
raise
return thd(conn, no_recurse=True)
return self.db.pool.do(thd)
def hashColumns(self, *args):
def encode(x):
if x is None:
return b'\xf5'
elif isinstance(x, str):
return x.encode('utf-8')
return str(x).encode('utf-8')
return hashlib.sha1(b'\0'.join(map(encode, args))).hexdigest()
def doBatch(self, batch, batch_n=500):
iterator = iter(batch)
while True:
batch = list(itertools.islice(iterator, batch_n))
if not batch:
break
yield batch
class CachedMethod:
def __init__(self, cache_name, method):
self.cache_name = cache_name
self.method = method
def get_cached_method(self, component):
meth = self.method
meth_name = meth.__name__
cache = component.db.master.caches.get_cache(self.cache_name,
lambda key: meth(component, key))
def wrap(key, no_cache=0):
if no_cache:
return meth(component, key)
return cache.get(key)
wrap.__name__ = meth_name + " (wrapped)"
wrap.__module__ = meth.__module__
wrap.__doc__ = meth.__doc__
wrap.cache = cache
return wrap
def cached(cache_name):
return lambda method: CachedMethod(cache_name, method)
| pmisik/buildbot | master/buildbot/db/base.py | Python | gpl-2.0 | 5,687 |
# Patchwork - automated patch tracking system
# Copyright (C) 2016 Linaro Corporation
#
# SPDX-License-Identifier: GPL-2.0-or-later
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
class IndexView(APIView):
def get(self, request, *args, **kwargs):
"""List API resources."""
return Response({
'projects': reverse('api-project-list', request=request),
'users': reverse('api-user-list', request=request),
'people': reverse('api-person-list', request=request),
'patches': reverse('api-patch-list', request=request),
'covers': reverse('api-cover-list', request=request),
'series': reverse('api-series-list', request=request),
'events': reverse('api-event-list', request=request),
'bundles': reverse('api-bundle-list', request=request),
})
| stephenfin/patchwork | patchwork/api/index.py | Python | gpl-2.0 | 942 |
# -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from osgeo import ogr
from ui_widgetGrid import Ui_GdalToolsWidget as Ui_Widget
from widgetPluginBase import GdalToolsBasePluginWidget as BasePluginWidget
import GdalTools_utils as Utils
class GdalToolsDialog(QWidget, Ui_Widget, BasePluginWidget):
def __init__(self, iface):
QWidget.__init__(self)
self.iface = iface
self.algorithm = ('invdist', 'average', 'nearest', 'datametrics')
self.datametrics = ('minimum', 'maximum', 'range')
self.setupUi(self)
BasePluginWidget.__init__(self, self.iface, "gdal_grid")
# set the default QSpinBoxes value
self.invdistPowerSpin.setValue(2.0)
self.outputFormat = Utils.fillRasterOutputFormat()
self.lastEncoding = Utils.getLastUsedEncoding()
self.setParamsStatus(
[
(self.inputLayerCombo, [SIGNAL("currentIndexChanged(int)"), SIGNAL("editTextChanged(const QString &)")] ),
(self.outputFileEdit, SIGNAL("textChanged(const QString &)")),
(self.zfieldCombo, SIGNAL("currentIndexChanged(int)"), self.zfieldCheck),
(self.algorithmCombo, SIGNAL("currentIndexChanged(int)"), self.algorithmCheck),
(self.stackedWidget, SIGNAL("currentChanged(int)"), self.algorithmCheck),
([self.invdistPowerSpin, self.invdistSmothingSpin, self.invdistRadius1Spin, self.invdistRadius2Spin, self.invdistAngleSpin, self.invdistNoDataSpin], SIGNAL("valueChanged(double)")),
([self.invdistMaxPointsSpin, self.invdistMinPointsSpin], SIGNAL("valueChanged(int)")),
([self.averageRadius1Spin, self.averageRadius2Spin, self.averageAngleSpin, self.averageNoDataSpin], SIGNAL("valueChanged(double)")),
(self.averageMinPointsSpin, SIGNAL("valueChanged(int)")),
([self.nearestRadius1Spin, self.nearestRadius2Spin, self.nearestAngleSpin, self.nearestNoDataSpin], SIGNAL("valueChanged(double)")),
(self.datametricsCombo, SIGNAL("currentIndexChanged(int)")),
([self.datametricsRadius1Spin, self.datametricsRadius2Spin, self.datametricsAngleSpin, self.datametricsNoDataSpin], SIGNAL("valueChanged(double)")),
(self.datametricsMinPointsSpin, SIGNAL("valueChanged(int)"))
]
)
self.connect(self.selectInputFileButton, SIGNAL("clicked()"), self.fillInputFileEdit)
self.connect(self.selectOutputFileButton, SIGNAL("clicked()"), self.fillOutputFileEdit)
self.connect(self.inputLayerCombo, SIGNAL("currentIndexChanged(int)"), self.fillFieldsCombo)
# fill layers combo
self.fillInputLayerCombo()
def fillInputLayerCombo(self):
self.inputLayerCombo.clear()
( self.layers, names ) = Utils.getVectorLayers()
self.inputLayerCombo.addItems( names )
def fillFieldsCombo(self):
index = self.inputLayerCombo.currentIndex()
if index < 0:
return
self.lastEncoding = self.layers[index].dataProvider().encoding()
self.loadFields( self.getInputFileName() )
def fillInputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedVectorFilter()
inputFile, encoding = Utils.FileDialog.getOpenFileName(self, self.tr( "Select the input file for Grid" ), Utils.FileFilter.allVectorsFilter(), lastUsedFilter, True)
if inputFile.isEmpty():
return
Utils.FileFilter.setLastUsedVectorFilter(lastUsedFilter)
self.inputLayerCombo.setCurrentIndex(-1)
self.inputLayerCombo.setEditText(inputFile)
self.lastEncoding = encoding
self.loadFields( inputFile )
def fillOutputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
outputFile = Utils.FileDialog.getSaveFileName(self, self.tr( "Select the raster file to save the results to" ), Utils.FileFilter.allRastersFilter(), lastUsedFilter )
if outputFile.isEmpty():
return
Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
self.outputFormat = Utils.fillRasterOutputFormat( lastUsedFilter, outputFile )
self.outputFileEdit.setText(outputFile)
def getArguments(self):
arguments = QStringList()
if self.zfieldCheck.isChecked() and self.zfieldCombo.currentIndex() >= 0:
arguments << "-zfield"
arguments << self.zfieldCombo.currentText()
if self.inputLayerCombo.currentIndex() >= 0:
arguments << "-l"
arguments << QFileInfo(self.layers[ self.inputLayerCombo.currentIndex() ].source()).baseName()
elif not self.inputLayerCombo.currentText().isEmpty():
arguments << "-l"
arguments << QFileInfo(self.inputLayerCombo.currentText()).baseName()
if self.algorithmCheck.isChecked() and self.algorithmCombo.currentIndex() >= 0:
arguments << "-a"
arguments << self.algorithmArguments(self.algorithmCombo.currentIndex())
if not self.outputFileEdit.text().isEmpty():
arguments << "-of"
arguments << self.outputFormat
arguments << self.getInputFileName()
arguments << self.outputFileEdit.text()
return arguments
def getInputFileName(self):
if self.inputLayerCombo.currentIndex() >= 0:
return self.layers[self.inputLayerCombo.currentIndex()].source()
return self.inputLayerCombo.currentText()
def getOutputFileName(self):
return self.outputFileEdit.text()
def addLayerIntoCanvas(self, fileInfo):
self.iface.addRasterLayer(fileInfo.filePath())
def algorithmArguments(self, index):
algorithm = self.algorithm[index]
arguments = QStringList()
if algorithm == "invdist":
arguments.append(algorithm)
arguments.append("power=" + str(self.invdistPowerSpin.value()))
arguments.append("smothing=" + str(self.invdistSmothingSpin.value()))
arguments.append("radius1=" + str(self.invdistRadius1Spin.value()))
arguments.append("radius2=" + str(self.invdistRadius2Spin.value()))
arguments.append("angle=" + str(self.invdistAngleSpin.value()))
arguments.append("max_points=" + str(self.invdistMaxPointsSpin.value()))
arguments.append("min_points=" + str(self.invdistMinPointsSpin.value()))
arguments.append("nodata=" + str(self.invdistNoDataSpin.value()))
elif algorithm == "average":
arguments.append(algorithm)
arguments.append("radius1=" + str(self.averageRadius1Spin.value()))
arguments.append("radius2=" + str(self.averageRadius2Spin.value()))
arguments.append("angle=" + str(self.averageAngleSpin.value()))
arguments.append("min_points=" + str(self.averageMinPointsSpin.value()))
arguments.append("nodata=" + str(self.averageNoDataSpin.value()))
elif algorithm == "nearest":
arguments.append(algorithm)
arguments.append("radius1=" + str(self.nearestRadius1Spin.value()))
arguments.append("radius2=" + str(self.nearestRadius2Spin.value()))
arguments.append("angle=" + str(self.nearestAngleSpin.value()))
arguments.append("nodata=" + str(self.nearestNoDataSpin.value()))
else:
arguments.append(self.datametrics[self.datametricsCombo.currentIndex()])
arguments.append("radius1=" + str(self.datametricsRadius1Spin.value()))
arguments.append("radius2=" + str(self.datametricsRadius2Spin.value()))
arguments.append("angle=" + str(self.datametricsAngleSpin.value()))
arguments.append("min_points=" + str(self.datametricsMinPointsSpin.value()))
arguments.append("nodata=" + str(self.datametricsNoDataSpin.value()))
return arguments.join(":")
def loadFields(self, vectorFile = QString()):
self.zfieldCombo.clear()
if vectorFile.isEmpty():
return
try:
(fields, names) = Utils.getVectorFields(vectorFile)
except Exception, e:
QErrorMessage(self).showMessage( str(e) )
self.inputLayerCombo.clearEditText()
self.inputLayerCombo.setCurrentIndex(-1)
return
ncodec = QTextCodec.codecForName(self.lastEncoding)
for name in names:
self.zfieldCombo.addItem( ncodec.toUnicode(name) )
| sourcepole/qgis | qgis/python/plugins/GdalTools/tools/doGrid.py | Python | gpl-2.0 | 8,154 |
import sys, re
mnemonics="mov,and,or,xor,add,adc,sto,ld,ror,jsr,sub,sbc,inc,lsr,dec,asr,halt,bswp,putpsr,getpsr,rti,not,out,in,push,pop,cmp,cmpc".split(",")
op = dict([(opcode,mnemonics.index(opcode)) for opcode in mnemonics])
dis = dict([(mnemonics.index(opcode),opcode) for opcode in mnemonics])
pred_dict = {0:"",1:"0.",2:"z.",3:"nz.",4:"c.",5:"nc.",6:"mi.",7:"pl."}
if len(sys.argv) > 3:
with open(sys.argv[3],"r") as f:
input_text = iter(''.join(f.readlines()))
else:
input_text = iter([chr(0)]*100000)
def print_memory_access( type, address, data):
ch = '%s' % chr(data) if ( 0x1F < data < 0x7F) else '.'
print( "%5s: Address : 0x%04x (%5d) : Data : 0x%04x (%5d) %s" % (type,address,address,data,data,ch))
with open(sys.argv[1],"r") as f:
wordmem = [ (int(x,16) & 0xFFFF) for x in f.read().split() ]
(regfile, acc, c, z, pcreg, c_save, s, ei, swiid, interrupt, iomem) = ([0]*16,0,0,0,15,0,0,0,0,0, [0]*65536) # initialise machine state inc PC = reg[15]
print ("PC : Mem : Instruction : SWI I S C Z : %s\n%s" % (''.join([" r%2d " % d for d in range(0,16)]), '-'*130))
while True:
(pc_save,flag_save,regfile[0],preserve_flag) = (regfile[pcreg],(swiid,ei,s,c,z),0,False) # always overwrite regfile location 0 and then dont care about assignments
instr_word = wordmem[regfile[pcreg] & 0xFFFF ] & 0xFFFF
(p0, p1, p2) = ( (instr_word & 0x8000) >> 15, (instr_word & 0x4000) >> 14, (instr_word & 0x2000)>>13)
(opcode, source, dest) = (((instr_word & 0xF00) >> 8) | (0x10 if (p0,p1,p2)==(0,0,1) else 0x00), (instr_word & 0xF0) >>4, instr_word & 0xF)
(instr_len, rdmem, preserve_flag) = (2 if (instr_word & 0x1000) else 1, (opcode in(op["ld"],op["in"],op["pop"])), (dest==pcreg))
operand = wordmem[regfile[pcreg]+1] if (instr_len==2) else (source if opcode in [op["dec"],op["inc"]] else ((opcode==op["pop"])-(opcode==op["push"])))
instr_str = "%s%s r%d," % ((pred_dict[p0<<2 | p1<<1 | p2] if (p0,p1,p2)!=(0,0,1) else ""),dis[opcode],dest)
instr_str += ("%s%d%s" % (("r" if opcode not in (op["inc"],op["dec"]) else ""),source, (",0x%04x" % operand) if instr_len==2 else ''))
instr_str = re.sub("r0","psr",instr_str,1) if (opcode in (op["putpsr"],op["getpsr"])) else instr_str
(mem_str, source) = (" %04x %4s " % (instr_word, "%04x" % (operand) if instr_len==2 else ''), (0 if opcode in (op["dec"],op["inc"]) else source))
regfile[15] += instr_len
eff_addr = (regfile[source] + operand*(opcode!=op["pop"]))&0xFFFF # EA_ED must be computed after PC is brought up to date
ea_ed = wordmem[eff_addr] if (opcode in(op["ld"],op["pop"])) else iomem[eff_addr] if rdmem else eff_addr
if opcode == op["in"]:
try:
ea_ed = ord(input_text.__next__())
except:
ea_ed = 0
if interrupt : # software interrupts dont care about EI bit
(interrupt, regfile[pcreg], pc_int, psr_int , ei) = (0, 0x0002, pc_save, (swiid,ei,s,c,z), 0)
else:
print ("%04x :%s: %-22s : %1X %d %d %d %d : %s" % (pc_save, mem_str, instr_str, swiid ,ei, s, c, z, ' '.join(["%04x" % i for i in regfile])))
if ( ( (p0,p1,p2)==(0,0,1) ) or (bool(p2) ^ (bool(s if p0==1 else z) if p1==1 else bool(c if p0==1 else 1)))):
if opcode == (op["halt"]):
print("Stopped on halt instruction at %04x with halt number 0x%04x" % (regfile[15]-(instr_len), operand) )
break
elif opcode == (op["rti"]) and (dest==15):
(regfile[pcreg], flag_save, preserve_flag ) = (pc_int, (0,psr_int[1],psr_int[2],psr_int[3],psr_int[4]), True )
elif opcode in (op["and"], op["or"]):
regfile[dest] = ((regfile[dest] & ea_ed) if opcode==op["and"] else (regfile[dest] | ea_ed))& 0xFFFF
elif opcode == op["xor"]:
regfile[dest] = (regfile[dest] ^ ea_ed) & 0xFFFF
elif opcode in (op["ror"],op["asr"],op["lsr"]):
(c, regfile[dest]) = (ea_ed & 0x1, ( ((c<<15) if opcode==op["ror"] else (ea_ed&0x8000 if opcode==op["asr"] else 0)) | ((ea_ed&0xFFFF) >> 1)))
elif opcode in (op["add"], op["adc"], op["inc"]) :
res = (regfile[dest] + ea_ed + (c if opcode==op["adc"] else 0)) & 0x1FFFF
(c, regfile[dest]) = ( (res>>16) & 1, res & 0xFFFF)
elif opcode in (op["mov"], op["ld"], op["not"], op["in"], op["pop"]):
(regfile[source],regfile[dest]) = (regfile[source] if opcode !=op["pop"] else ((regfile[source]+operand)&0xFFFF), (~ea_ed if opcode==op["not"] else ea_ed) & 0xFFFF)
if opcode in (op["ld"],op["in"],op["pop"]):
print_memory_access( "IN" if opcode==op["in"] else "LOAD" , eff_addr, ea_ed)
elif opcode in (op["sub"], op["sbc"], op["cmp"], op["cmpc"], op["dec"]) :
res = (regfile[dest] + ((~ea_ed)&0xFFFF) + (c if (opcode in (op["cmpc"],op["sbc"])) else 1)) & 0x1FFFF
dest = 0 if opcode in( op["cmp"], op["cmpc"]) else dest # retarget r0 with result of comparison
(c, regfile[dest]) = ( (res>>16) & 1, res & 0xFFFF)
elif opcode == op["bswp"]:
regfile[dest] = (((ea_ed&0xFF00)>>8)|((ea_ed&0x00FF)<<8)) & 0xFFFF
elif opcode == op["jsr"]:
(preserve_flag,regfile[dest],regfile[pcreg]) = (True,regfile[pcreg],ea_ed)
elif opcode == op["putpsr"]:
(preserve_flag, flag_save, interrupt) = (True, ((ea_ed&0xF0)>>4,(ea_ed&0x8)>>3,(ea_ed&0x4)>>2,(ea_ed&0x2)>>1,(ea_ed)&1), (ea_ed&0xF0)!=0)
elif opcode == op["getpsr"]:
regfile[dest] = ((swiid&0xF)<<4) | (ei<<3) | (s<<2) | (c<<1) | z
elif opcode in (op["sto"],op["push"]):
(regfile[source],preserve_flag,wordmem[ea_ed]) = (ea_ed if opcode==op["push"] else regfile[source], True,regfile[dest])
print_memory_access("STORE",ea_ed,regfile[dest])
elif opcode == op["out"]:
(preserve_flag,iomem[ea_ed], ch) = (True, regfile[dest], '%s' % chr(regfile[dest]) if ( 0x1F < regfile[dest] < 0x7F) else '.')
print_memory_access("OUT",ea_ed,regfile[dest])
(swiid,ei,s,c,z) = flag_save if (preserve_flag or dest==0xF ) else (swiid,ei, (regfile[dest]>>15) & 1, c, 1 if (regfile[dest]==0) else 0)
if len(sys.argv) > 2: # Dump memory for inspection if required
with open(sys.argv[2],"w" ) as f:
f.write( '\n'.join([''.join("%04x " % d for d in wordmem[j:j+16]) for j in [i for i in range(0,len(wordmem),16)]]))
| revaldinho/opc | opc6/opc6emu.py | Python | gpl-3.0 | 6,597 |
"""test building messages with streamsession"""
#-------------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import os
import uuid
from datetime import datetime
import zmq
from zmq.tests import BaseZMQTestCase
from zmq.eventloop.zmqstream import ZMQStream
from IPython.kernel.zmq import session as ss
from IPython.testing.decorators import skipif, module_not_available
from IPython.utils.py3compat import string_types
from IPython.utils import jsonutil
def _bad_packer(obj):
raise TypeError("I don't work")
def _bad_unpacker(bytes):
raise TypeError("I don't work either")
class SessionTestCase(BaseZMQTestCase):
def setUp(self):
BaseZMQTestCase.setUp(self)
self.session = ss.Session()
class TestSession(SessionTestCase):
def test_msg(self):
"""message format"""
msg = self.session.msg('execute')
thekeys = set('header parent_header metadata content msg_type msg_id'.split())
s = set(msg.keys())
self.assertEqual(s, thekeys)
self.assertTrue(isinstance(msg['content'],dict))
self.assertTrue(isinstance(msg['metadata'],dict))
self.assertTrue(isinstance(msg['header'],dict))
self.assertTrue(isinstance(msg['parent_header'],dict))
self.assertTrue(isinstance(msg['msg_id'],str))
self.assertTrue(isinstance(msg['msg_type'],str))
self.assertEqual(msg['header']['msg_type'], 'execute')
self.assertEqual(msg['msg_type'], 'execute')
def test_serialize(self):
msg = self.session.msg('execute', content=dict(a=10, b=1.1))
msg_list = self.session.serialize(msg, ident=b'foo')
ident, msg_list = self.session.feed_identities(msg_list)
new_msg = self.session.unserialize(msg_list)
self.assertEqual(ident[0], b'foo')
self.assertEqual(new_msg['msg_id'],msg['msg_id'])
self.assertEqual(new_msg['msg_type'],msg['msg_type'])
self.assertEqual(new_msg['header'],msg['header'])
self.assertEqual(new_msg['content'],msg['content'])
self.assertEqual(new_msg['parent_header'],msg['parent_header'])
self.assertEqual(new_msg['metadata'],msg['metadata'])
# ensure floats don't come out as Decimal:
self.assertEqual(type(new_msg['content']['b']),type(new_msg['content']['b']))
def test_send(self):
ctx = zmq.Context.instance()
A = ctx.socket(zmq.PAIR)
B = ctx.socket(zmq.PAIR)
A.bind("inproc://test")
B.connect("inproc://test")
msg = self.session.msg('execute', content=dict(a=10))
self.session.send(A, msg, ident=b'foo', buffers=[b'bar'])
ident, msg_list = self.session.feed_identities(B.recv_multipart())
new_msg = self.session.unserialize(msg_list)
self.assertEqual(ident[0], b'foo')
self.assertEqual(new_msg['msg_id'],msg['msg_id'])
self.assertEqual(new_msg['msg_type'],msg['msg_type'])
self.assertEqual(new_msg['header'],msg['header'])
self.assertEqual(new_msg['content'],msg['content'])
self.assertEqual(new_msg['parent_header'],msg['parent_header'])
self.assertEqual(new_msg['metadata'],msg['metadata'])
self.assertEqual(new_msg['buffers'],[b'bar'])
content = msg['content']
header = msg['header']
parent = msg['parent_header']
metadata = msg['metadata']
msg_type = header['msg_type']
self.session.send(A, None, content=content, parent=parent,
header=header, metadata=metadata, ident=b'foo', buffers=[b'bar'])
ident, msg_list = self.session.feed_identities(B.recv_multipart())
new_msg = self.session.unserialize(msg_list)
self.assertEqual(ident[0], b'foo')
self.assertEqual(new_msg['msg_id'],msg['msg_id'])
self.assertEqual(new_msg['msg_type'],msg['msg_type'])
self.assertEqual(new_msg['header'],msg['header'])
self.assertEqual(new_msg['content'],msg['content'])
self.assertEqual(new_msg['metadata'],msg['metadata'])
self.assertEqual(new_msg['parent_header'],msg['parent_header'])
self.assertEqual(new_msg['buffers'],[b'bar'])
self.session.send(A, msg, ident=b'foo', buffers=[b'bar'])
ident, new_msg = self.session.recv(B)
self.assertEqual(ident[0], b'foo')
self.assertEqual(new_msg['msg_id'],msg['msg_id'])
self.assertEqual(new_msg['msg_type'],msg['msg_type'])
self.assertEqual(new_msg['header'],msg['header'])
self.assertEqual(new_msg['content'],msg['content'])
self.assertEqual(new_msg['metadata'],msg['metadata'])
self.assertEqual(new_msg['parent_header'],msg['parent_header'])
self.assertEqual(new_msg['buffers'],[b'bar'])
A.close()
B.close()
ctx.term()
def test_args(self):
"""initialization arguments for Session"""
s = self.session
self.assertTrue(s.pack is ss.default_packer)
self.assertTrue(s.unpack is ss.default_unpacker)
self.assertEqual(s.username, os.environ.get('USER', u'username'))
s = ss.Session()
self.assertEqual(s.username, os.environ.get('USER', u'username'))
self.assertRaises(TypeError, ss.Session, pack='hi')
self.assertRaises(TypeError, ss.Session, unpack='hi')
u = str(uuid.uuid4())
s = ss.Session(username=u'carrot', session=u)
self.assertEqual(s.session, u)
self.assertEqual(s.username, u'carrot')
def test_tracking(self):
"""test tracking messages"""
a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
s = self.session
s.copy_threshold = 1
stream = ZMQStream(a)
msg = s.send(a, 'hello', track=False)
self.assertTrue(msg['tracker'] is ss.DONE)
msg = s.send(a, 'hello', track=True)
self.assertTrue(isinstance(msg['tracker'], zmq.MessageTracker))
M = zmq.Message(b'hi there', track=True)
msg = s.send(a, 'hello', buffers=[M], track=True)
t = msg['tracker']
self.assertTrue(isinstance(t, zmq.MessageTracker))
self.assertRaises(zmq.NotDone, t.wait, .1)
del M
t.wait(1) # this will raise
def test_unique_msg_ids(self):
"""test that messages receive unique ids"""
ids = set()
for i in range(2**12):
h = self.session.msg_header('test')
msg_id = h['msg_id']
self.assertTrue(msg_id not in ids)
ids.add(msg_id)
def test_feed_identities(self):
"""scrub the front for zmq IDENTITIES"""
theids = "engine client other".split()
content = dict(code='whoda',stuff=object())
themsg = self.session.msg('execute',content=content)
pmsg = theids
def test_session_id(self):
session = ss.Session()
# get bs before us
bs = session.bsession
us = session.session
self.assertEqual(us.encode('ascii'), bs)
session = ss.Session()
# get us before bs
us = session.session
bs = session.bsession
self.assertEqual(us.encode('ascii'), bs)
# change propagates:
session.session = 'something else'
bs = session.bsession
us = session.session
self.assertEqual(us.encode('ascii'), bs)
session = ss.Session(session='stuff')
# get us before bs
self.assertEqual(session.bsession, session.session.encode('ascii'))
self.assertEqual(b'stuff', session.bsession)
def test_zero_digest_history(self):
session = ss.Session(digest_history_size=0)
for i in range(11):
session._add_digest(uuid.uuid4().bytes)
self.assertEqual(len(session.digest_history), 0)
def test_cull_digest_history(self):
session = ss.Session(digest_history_size=100)
for i in range(100):
session._add_digest(uuid.uuid4().bytes)
self.assertTrue(len(session.digest_history) == 100)
session._add_digest(uuid.uuid4().bytes)
self.assertTrue(len(session.digest_history) == 91)
for i in range(9):
session._add_digest(uuid.uuid4().bytes)
self.assertTrue(len(session.digest_history) == 100)
session._add_digest(uuid.uuid4().bytes)
self.assertTrue(len(session.digest_history) == 91)
def test_bad_pack(self):
try:
session = ss.Session(pack=_bad_packer)
except ValueError as e:
self.assertIn("could not serialize", str(e))
self.assertIn("don't work", str(e))
else:
self.fail("Should have raised ValueError")
def test_bad_unpack(self):
try:
session = ss.Session(unpack=_bad_unpacker)
except ValueError as e:
self.assertIn("could not handle output", str(e))
self.assertIn("don't work either", str(e))
else:
self.fail("Should have raised ValueError")
def test_bad_packer(self):
try:
session = ss.Session(packer=__name__ + '._bad_packer')
except ValueError as e:
self.assertIn("could not serialize", str(e))
self.assertIn("don't work", str(e))
else:
self.fail("Should have raised ValueError")
def test_bad_unpacker(self):
try:
session = ss.Session(unpacker=__name__ + '._bad_unpacker')
except ValueError as e:
self.assertIn("could not handle output", str(e))
self.assertIn("don't work either", str(e))
else:
self.fail("Should have raised ValueError")
def test_bad_roundtrip(self):
with self.assertRaises(ValueError):
session = ss.Session(unpack=lambda b: 5)
def _datetime_test(self, session):
content = dict(t=datetime.now())
metadata = dict(t=datetime.now())
p = session.msg('msg')
msg = session.msg('msg', content=content, metadata=metadata, parent=p['header'])
smsg = session.serialize(msg)
msg2 = session.unserialize(session.feed_identities(smsg)[1])
assert isinstance(msg2['header']['date'], datetime)
self.assertEqual(msg['header'], msg2['header'])
self.assertEqual(msg['parent_header'], msg2['parent_header'])
self.assertEqual(msg['parent_header'], msg2['parent_header'])
assert isinstance(msg['content']['t'], datetime)
assert isinstance(msg['metadata']['t'], datetime)
assert isinstance(msg2['content']['t'], string_types)
assert isinstance(msg2['metadata']['t'], string_types)
self.assertEqual(msg['content'], jsonutil.extract_dates(msg2['content']))
self.assertEqual(msg['content'], jsonutil.extract_dates(msg2['content']))
def test_datetimes(self):
self._datetime_test(self.session)
def test_datetimes_pickle(self):
session = ss.Session(packer='pickle')
self._datetime_test(session)
@skipif(module_not_available('msgpack'))
def test_datetimes_msgpack(self):
session = ss.Session(packer='msgpack.packb', unpacker='msgpack.unpackb')
self._datetime_test(session)
def test_send_raw(self):
ctx = zmq.Context.instance()
A = ctx.socket(zmq.PAIR)
B = ctx.socket(zmq.PAIR)
A.bind("inproc://test")
B.connect("inproc://test")
msg = self.session.msg('execute', content=dict(a=10))
msg_list = [self.session.pack(msg[part]) for part in
['header', 'parent_header', 'metadata', 'content']]
self.session.send_raw(A, msg_list, ident=b'foo')
ident, new_msg_list = self.session.feed_identities(B.recv_multipart())
new_msg = self.session.unserialize(new_msg_list)
self.assertEqual(ident[0], b'foo')
self.assertEqual(new_msg['msg_type'],msg['msg_type'])
self.assertEqual(new_msg['header'],msg['header'])
self.assertEqual(new_msg['parent_header'],msg['parent_header'])
self.assertEqual(new_msg['content'],msg['content'])
self.assertEqual(new_msg['metadata'],msg['metadata'])
A.close()
B.close()
ctx.term()
| alephu5/Soundbyte | environment/lib/python3.3/site-packages/IPython/kernel/zmq/tests/test_session.py | Python | gpl-3.0 | 12,636 |
#!/usr/bin/env python
# from https://github.com/mapillary/mapillary_tools/
# The MIT License (MIT)
#
# Copyright (c) 2014 Mapillary AB
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import getopt
import os
import sys
import shutil
from math import asin, cos, radians, sin, sqrt
from PIL import Image
from lib.exif_pil import PILExifReader
class GPSDirectionDuplicateFinder:
"""Finds duplicates based on the direction the camera is pointing.
This supports the case where a panorama is being made."""
def __init__(self, max_diff):
self._prev_rotation = None
self._prev_unique_rotation = None
self._max_diff = max_diff
self._latest_text = ""
def get_latest_text(self):
return self._latest_text
def latest_is_duplicate(self, is_duplicate):
if not is_duplicate:
self._prev_unique_rotation = self._prev_rotation
def is_duplicate(self, file_path, exif_reader):
rotation = exif_reader.get_rotation()
if rotation is None:
return None
if self._prev_unique_rotation is None:
self._prev_rotation = rotation
return False
diff = abs(rotation - self._prev_unique_rotation)
is_duplicate = diff < self._max_diff
self._prev_rotation = rotation
self._latest_text = str(int(diff)) + " deg: " + str(is_duplicate)
return is_duplicate
class GPSDistance:
"""Calculates the distance between two sets of GPS coordinates."""
@staticmethod
def get_gps_distance(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees with a result in meters).
This is done using the Haversine Formula.
"""
# Convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])
# Haversine formula
difflat = lat2 - lat1
difflon = lon2 - lon1
a = (sin(difflat / 2) ** 2) + (cos(lat1) * cos(lat2) * sin(difflon / 2)
** 2)
difflon = lon2 - lon1
c = 2 * asin(sqrt(a))
r = 6371000 # Radius of The Earth in meters.
# It is not a perfect sphere, so this is just good enough.
return c * r
class GPSSpeedErrorFinder:
"""Finds images in a sequence that might have an error in GPS data
or suggest a track to be split. It is done by looking at the
speed it would take to travel the distance in question."""
def __init__(self, max_speed_km_h, way_too_high_speed_km_h):
self._prev_lat_lon = None
self._previous = None
self._latest_text = ""
self._previous_filepath = None
self._max_speed_km_h = max_speed_km_h
self._way_too_high_speed_km_h = way_too_high_speed_km_h
self._high_speed = False
self._too_high_speed = False
def set_verbose(self, verbose):
self.verbose = verbose
def get_latest_text(self):
return self._latest_text
def is_error(self, file_path, exif_reader):
"""
Returns if there is an obvious error in the images exif data.
The given image is an instance of PIL's Image class.
the given exif is the data from the get_exif_data function.
"""
speed_gps = exif_reader.get_speed()
if speed_gps is None:
self._latest_text = "No speed given in EXIF data."
return False
self._latest_text = "Speed GPS: " + str(speed_gps) + " km/h"
if speed_gps > self._way_too_high_speed_km_h:
self._latest_text = ("GPS speed is unrealistically high: %s km/h."
% speed_gps)
self._too_high_speed = True
return True
elif speed_gps > self._max_speed_km_h:
self._latest_text = ("GPS speed is high: %s km/h."
% speed_gps )
self._high_speed = True
return True
latlong = exif_reader.get_lat_lon()
timestamp = exif_reader.get_time()
if self._prev_lat_lon is None or self._prev_time is None:
self._prev_lat_lon = latlong
self._prev_time = timestamp
self._previous_filepath = file_path
return False
if latlong is None or timestamp is None:
return False
diff_meters = GPSDistance.get_gps_distance(
self._prev_lat_lon[0], self._prev_lat_lon[1], latlong[0],
latlong[1])
diff_secs = (timestamp - self._prev_time).total_seconds()
if diff_secs == 0:
return False
speed_km_h = (diff_meters / diff_secs) * 3.6
if speed_km_h > self._way_too_high_speed_km_h:
self._latest_text = ("Speed between %s and %s is %s km/h, which is"
" unrealistically high." % (self._previous_filepath, file_path,
int(speed_km_h)))
self._too_high_speed = True
return True
elif speed_km_h > self._max_speed_km_h:
self._latest_text = "Speed between %s and %s is %s km/h." % (
self._previous_filepath, file_path, int(speed_km_h)
)
self._high_speed = True
return True
else:
return False
def is_fast(self):
return self._high_speed
def is_too_fast(self):
return self._too_high_speed
class GPSDistanceDuplicateFinder:
"""Finds duplicates images by looking at the distance between
two GPS points."""
def __init__(self, distance):
self._distance = distance
self._prev_lat_lon = None
self._previous = None
self._latest_text = ""
self._previous_filepath = None
self._prev_unique_lat_lon = None
def get_latest_text(self):
return self._latest_text
def latest_is_duplicate(self, is_duplicate):
if not is_duplicate:
self._prev_unique_lat_lon = self._prev_lat_lon
def is_duplicate(self, file_path, exif_reader):
"""
Returns if the given image is a duplicate of the previous image.
The given image is an instance of PIL's Image class.
the given exif is the data from the get_exif_data function.
"""
latlong = exif_reader.get_lat_lon()
if self._prev_lat_lon is None:
self._prev_lat_lon = latlong
return False
if self._prev_unique_lat_lon is not None and latlong is not None:
diff_meters = GPSDistance.get_gps_distance(
self._prev_unique_lat_lon[0], self._prev_unique_lat_lon[1],
latlong[0], latlong[1])
self._previous_filepath = file_path
is_duplicate = diff_meters <= self._distance
self._prev_lat_lon = latlong
self._latest_text = file_path + ": " + str(
int(diff_meters)) + " m: " + str(is_duplicate)
return is_duplicate
else:
return False
class ImageRemover:
"""Moves images that are (almost) duplicates or contains errors in GPS
data into separate directories."""
def __init__(self, src_dir, duplicate_dir, error_dir):
self._testers = []
self._error_finders = []
self._src_dir = src_dir
self._duplicate_dir = duplicate_dir
self._error_dir = error_dir
self._dryrun = False
self.verbose = 0
def set_verbose(self, verbose):
self.verbose = verbose
def set_dry_run(self, dryrun):
self._dryrun = dryrun
def add_duplicate_finder(self, tester):
self._testers.append(tester)
def add_error_finder(self, finder):
self._error_finders.append(finder)
def _move_into_error_dir(self, file):
self._move_into_dir(file, self._error_dir)
def _move_into_duplicate_dir(self, file):
self._move_into_dir(file, self._duplicate_dir)
def _move_into_dir(self, file, dir):
if not self._dryrun and not os.path.exists(dir):
os.makedirs(dir)
filename = os.path.basename(file)
if not self._dryrun:
shutil.move(file, os.path.join(dir, filename))
print file, " => ", dir
def _read_capture_time(self, filepath):
reader = PILExifReader(filepath)
return reader.read_capture_time()
def _sort_file_list(self, file_list):
'''
Read capture times and sort files in time order.
'''
capture_times = [self._read_capture_time(filepath) for filepath in file_list]
sorted_times_files = zip(capture_times, file_list)
sorted_times_files.sort()
return zip(*sorted_times_files)
def do_magic(self):
"""Perform the task of finding and moving images."""
files = [os.path.join(self._src_dir, f) for f in os.listdir(self._src_dir)
if os.path.isfile(os.path.join(self._src_dir, f)) and
f.lower().endswith('.jpg')]
capturetime, files = self._sort_file_list(files)
for file_path in files:
exif_reader = PILExifReader(file_path)
is_error = self._handle_possible_erro(file_path, exif_reader)
if not is_error:
self._handle_possible_duplicate(file_path, exif_reader)
def _handle_possible_duplicate(self, file_path, exif_reader):
is_duplicate = True
verbose_text = []
for tester in self._testers:
is_this_duplicate = tester.is_duplicate(file_path, exif_reader)
if is_this_duplicate != None:
is_duplicate &= is_this_duplicate
verbose_text.append(tester.get_latest_text())
else:
verbose_text.append("No orientation")
if self.verbose >= 1:
print ", ".join(verbose_text), "=>", is_duplicate
if is_duplicate:
self._move_into_duplicate_dir(file_path)
for tester in self._testers:
tester.latest_is_duplicate(is_duplicate)
return is_duplicate
def _handle_possible_erro(self, file_path, exif_reader):
is_error = False
for finder in self._error_finders:
err = finder.is_error(file, exif_reader)
if err:
print finder.get_latest_text()
is_error |= err
if is_error:
self._move_into_error_dir(file_path)
return is_error
if __name__ == "__main__":
distance = 4
pan = 20
error_dir = "errors"
fast_km_h = 150
too_fast_km_h = 200
min_duplicates = 3
def print_help():
print """Usage: remove-duplicates.py [-h | -d] src_dir duplicate_dir
Finds images in src_dir and moves duplicates to duplicate_dir.
Both src_dir and duplicate_dir are mandatory. If src_dir is not .
and duplicate_dir is not given, it will be named "duplicate" and put
in the current directory.
If duplicate_dir does not exist, it will be created in the current
directory (no matter if it will be used or not).
In order to be considered a duplicate, the image must match ALL criteria
to be a duplicate. With default settings that is, it must have travelled
less than """ + str(distance) + """ meters and be panned less than """ \
"" + str(pan) + """ degrees.
This supports that you ride straight ahead with a significant speed,
that you make panoramas standing still and standing still waiting for
the red light to change into green.
Important: The upload.py from Mapillary uploads *recursively* so do not
put the duplicate_dir under the dir your are uploading from!
Options:
-e --error-dir Give the directory to put pictures into, if they
contains obvious errors.
Default value is""" + error_dir + """
-h --help Print this message and exit.
-d --distance Give the maximum distance in meters images must be taken
not to be considered duplicates. Default is """ \
"" + str(distance) + """ meters.
The distance is calculated from embedded GPS data. If there
is no GPS data the images are ignored.
-a --fast The speed (km/h) which is a bit too fast.
E.g. 40 for a bicycle.
Default value is: """ + str(fast_km_h) + """ km/h
-t --too-fast The speed (km/h) which is way too fast.
E.g. 70 for a bicycle.
Default value is: """ + str(too_fast_km_h) + """ km/h
-p --pan The maximum distance in degrees (0-360) the image must be
panned in order not to be considered a duplicate.
Default is""" + str(pan) + """ degrees.
-m --min-dup Minimum duplicates for a duplicate to be removed.
Default is """ + str(min_duplicates), """.
When larger than 0 the duplicate feature is only used to
remove images due to larger stops, like a red traffic
light. If going really slow this will also cause moving
images.
When 0 individual images are also moved, when the speed
is slow, images will be moved giving a more consistent
expirience when viewing them one by one.
-n --dry-run Do not move any files. Just simulate.
-v --verbose Print extra info.
"""
dryrun = False
verbose = 0
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:p:nve:m:a:t:",
["help", "distance=", "pan=", "dry-run",
"verbose", "error-dir", "min-dup",
"fast=", "too-fast="])
except getopt.GetoptError, err:
print str(err)
sys.exit(2)
for switch, value in opts:
if switch in ("-h", "--help"):
print_help()
sys.exit(0)
elif switch in ("-d", "--distance"):
distance = float(value)
elif switch in ("-p", "--pan"):
pan = float(value)
elif switch in ("-n", "--dry-run"):
dryrun = True
elif switch in ("-v", "--verbose"):
verbose += 1
elif switch in ("-e", "--error-dir"):
error_dir = value
elif switch in ("-m", "--min-dup"):
min_duplicates = int(value)
elif switch in ("-a", "--fast"):
fast_km_h = float(value)
elif switch in ("-t", "--too-fast"):
too_fast_km_h = float(value)
if len(args) == 1 and args[0] != ".":
duplicate_dir = "duplicates"
elif len(args) < 2:
print_help()
sys.exit(2)
else:
duplicate_dir = args[1]
src_dir = args[0]
distance_finder = GPSDistanceDuplicateFinder(distance)
direction_finder = GPSDirectionDuplicateFinder(pan)
speed_error_finder = GPSSpeedErrorFinder(fast_km_h, too_fast_km_h)
image_remover = ImageRemover(src_dir, duplicate_dir, error_dir)
image_remover.set_dry_run(dryrun)
image_remover.set_verbose(verbose)
# Modular: Multiple testers can be added.
image_remover.add_duplicate_finder(distance_finder)
image_remover.add_duplicate_finder(direction_finder)
image_remover.add_error_finder(speed_error_finder)
try:
image_remover.do_magic()
except KeyboardInterrupt:
print "You cancelled."
sys.exit(1)
finally:
show_split = False
if speed_error_finder.is_fast():
show_split = True
print
print ("It looks like you have gone really fast between"
+" some images.")
print "Strongly consider splitting them into multiple series."
print "See the messages earlier."
if speed_error_finder.is_too_fast():
show_split = True
print
print ("It looks like yo have gone unrealistically fast"
+ "between some images to be ok.")
print ("Mabye your GPS started out with a wrong location "
+ "or you traveled between sets?")
print "See the messages earlier."
if show_split:
print
print ("See http://blog.mapillary.com/update/2014/06/16/actioncam-workflow.html"
+ " on how")
print ("to use time_split.py to automatically split a lot "
+ "of images into multiple series.")
| ltog/mapillary_utils | general/remove_duplicates.py | Python | gpl-3.0 | 17,416 |
"""Manifest clonning tools.."""
import json
import requests
import six
import time
import uuid
import zipfile
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
from nailgun import entities
from robottelo.cli.subscription import Subscription
from robottelo.config import settings
from robottelo.constants import INTERFACE_API, INTERFACE_CLI
from robottelo.decorators.func_locker import lock_function
from robottelo.ssh import upload_file
class ManifestCloner(object):
"""Manifest clonning utility class."""
def __init__(self, template=None, signing_key=None):
self.template = template
self.signing_key = signing_key
def _download_manifest_info(self):
"""Download and cache the manifest information."""
self.template = requests.get(settings.fake_manifest.url).content
self.signing_key = requests.get(settings.fake_manifest.key_url).content
self.private_key = serialization.load_pem_private_key(
self.signing_key,
password=None,
backend=default_backend()
)
def clone(self):
"""Clones a RedHat-manifest file.
Change the consumer ``uuid`` and sign the new manifest with
signing key. The certificate for the key must be installed on the
candlepin server in order to accept uploading the cloned
manifest.
:return: A file-like object (``BytesIO`` on Python 3 and
``StringIO`` on Python 2) with the contents of the cloned
manifest.
"""
if self.signing_key is None or self.template is None:
self._download_manifest_info()
template_zip = zipfile.ZipFile(six.BytesIO(self.template))
# Extract the consumer_export.zip from the template manifest.
consumer_export_zip = zipfile.ZipFile(
six.BytesIO(template_zip.read('consumer_export.zip')))
# Generate a new consumer_export.zip file changing the consumer
# uuid.
consumer_export = six.BytesIO()
with zipfile.ZipFile(consumer_export, 'w') as new_consumer_export_zip:
for name in consumer_export_zip.namelist():
if name == 'export/consumer.json':
consumer_data = json.loads(
consumer_export_zip.read(name).decode('utf-8'))
consumer_data['uuid'] = six.text_type(uuid.uuid1())
new_consumer_export_zip.writestr(
name,
json.dumps(consumer_data)
)
else:
new_consumer_export_zip.writestr(
name,
consumer_export_zip.read(name)
)
# Generate a new manifest.zip file with the generated
# consumer_export.zip and new signature.
manifest = six.BytesIO()
with zipfile.ZipFile(
manifest, 'w', zipfile.ZIP_DEFLATED) as manifest_zip:
consumer_export.seek(0)
manifest_zip.writestr(
'consumer_export.zip',
consumer_export.read()
)
consumer_export.seek(0)
signer = self.private_key.signer(
padding.PKCS1v15(), hashes.SHA256())
signer.update(consumer_export.read())
manifest_zip.writestr(
'signature',
signer.finalize()
)
# Make sure that the file-like object is at the beginning and
# ready to be read.
manifest.seek(0)
return manifest
def original(self):
"""Returns the original manifest as a file-like object.
Be aware that using the original manifest and not removing it
afterwards will make impossible to import it on any other Organization.
Make sure to close the returned file-like object in order to clean up
the memory used to store it.
"""
if self.signing_key is None or self.template is None:
self._download_manifest_info()
return six.BytesIO(self.template)
# Cache the ManifestCloner in order to avoid downloading the manifest template
# every single time.
_manifest_cloner = ManifestCloner()
class Manifest(object):
"""Class that holds the contents of a manifest with a generated filename
based on ``time.time``.
To ensure that the manifest content is closed use this class as a context
manager with the ``with`` statement::
with Manifest() as manifest:
# my fancy stuff
"""
def __init__(self, content=None, filename=None):
self._content = content
self.filename = filename
if self._content is None:
self._content = _manifest_cloner.clone()
if self.filename is None:
self.filename = u'/var/tmp/manifest-{0}.zip'.format(
int(time.time()))
@property
def content(self):
if not self._content.closed:
# Make sure that the content is always ready to read
self._content.seek(0)
return self._content
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if not self.content.closed:
self.content.close()
def clone():
"""Clone the cached manifest and return a ``Manifest`` object.
Is hightly recommended to use this with the ``with`` statement to make that
the content of the manifest (file-like object) is closed properly::
with clone() as manifest:
# my fancy stuff
"""
return Manifest()
def original_manifest():
"""Returns a ``Manifest`` object filed with the template manifest.
Make sure to remove the manifest after its usage otherwiser the Satellite 6
server will not accept it anymore on any other organization.
Is hightly recommended to use this with the ``with`` statement to make that
the content of the manifest (file-like object) is closed properly::
with original_manifest() as manifest:
# my fancy stuff
"""
return Manifest(_manifest_cloner.original())
@lock_function
def upload_manifest_locked(org_id, manifest, interface=INTERFACE_API):
"""Upload a manifest with locking, using the requested interface.
:type org_id: int
:type manifest: robottelo.manifests.Manifest
:type interface: str
:returns: the upload result
Note: The manifest uploading is strictly locked only when using this
function
Usage::
# for API interface
manifest = manifests.clone()
upload_manifest_locked(org_id, manifest, interface=INTERFACE_API)
# for CLI interface
manifest = manifests.clone()
upload_manifest_locked(org_id, manifest, interface=INTERFACE_CLI)
# or in one line with default interface
result = upload_manifest_locked(org_id, manifests.clone())
subscription_id = result[id']
"""
if interface not in [INTERFACE_API, INTERFACE_CLI]:
raise ValueError(
'upload manifest with interface "{0}" not supported'
.format(interface)
)
if interface == INTERFACE_API:
with manifest:
result = entities.Subscription().upload(
data={'organization_id': org_id},
files={'content': manifest.content},
)
else:
# interface is INTERFACE_CLI
with manifest:
upload_file(manifest.content, manifest.filename)
result = Subscription.upload({
'file': manifest.filename,
'organization-id': org_id,
})
return result
| ares/robottelo | robottelo/manifests.py | Python | gpl-3.0 | 7,792 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from module.plugins.Hoster import Hoster
class VeehdCom(Hoster):
__name__ = 'VeehdCom'
__type__ = 'hoster'
__pattern__ = r'http://veehd\.com/video/\d+_\S+'
__config__ = [
('filename_spaces', 'bool', "Allow spaces in filename", 'False'),
('replacement_char', 'str', "Filename replacement character", '_'),
]
__version__ = '0.23'
__description__ = """Veehd.com Download Hoster"""
__author_name__ = ('cat')
__author_mail__ = ('cat@pyload')
def _debug(self, msg):
self.logDebug('[%s] %s' % (self.__name__, msg))
def setup(self):
self.html = None
self.multiDL = True
self.req.canContinue = True
def process(self, pyfile):
self.download_html()
if not self.file_exists():
self.offline()
pyfile.name = self.get_file_name()
self.download(self.get_file_url())
def download_html(self):
url = self.pyfile.url
self._debug("Requesting page: %s" % (repr(url),))
self.html = self.load(url)
def file_exists(self):
if self.html is None:
self.download_html()
if '<title>Veehd</title>' in self.html:
return False
return True
def get_file_name(self):
if self.html is None:
self.download_html()
match = re.search(r'<title[^>]*>([^<]+) on Veehd</title>', self.html)
if not match:
self.fail("video title not found")
name = match.group(1)
# replace unwanted characters in filename
if self.getConfig('filename_spaces'):
pattern = '[^0-9A-Za-z\.\ ]+'
else:
pattern = '[^0-9A-Za-z\.]+'
name = re.sub(pattern, self.getConfig('replacement_char'),
name)
return name + '.avi'
def get_file_url(self):
""" returns the absolute downloadable filepath
"""
if self.html is None:
self.download_html()
match = re.search(r'<embed type="video/divx" src="(http://([^/]*\.)?veehd\.com/dl/[^"]+)"',
self.html)
if not match:
self.fail("embedded video url not found")
file_url = match.group(1)
return file_url
| Rusk85/pyload | module/plugins/hoster/VeehdCom.py | Python | gpl-3.0 | 2,311 |
import math
def is_palindrome(n):
s = str(n)
return s == s[::-1]
def is_prime(n):
if n <= 1:
return False
if n % 2 == 0 and n != 2:
return False
if n == 2:
return True
root = math.sqrt(n)
i = 3
while i <= root:
if n % i == 0:
return False
i += 2
return True
i = 999
while i > 0:
if not is_palindrome(i):
i -= 1
continue
if not is_prime(i):
i -= 1
continue
print i
break
| durandj/codeeval | python/3_prime_palindrome.py | Python | gpl-3.0 | 521 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from frappe.model.document import Document
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
class LandedCostVoucher(Document):
def get_items_from_purchase_receipts(self):
self.set("items", [])
for pr in self.get("purchase_receipts"):
if pr.receipt_document_type and pr.receipt_document:
pr_items = frappe.db.sql("""select pr_item.item_code, pr_item.description,
pr_item.qty, pr_item.base_rate, pr_item.base_amount, pr_item.name
from `tab{doctype} Item` pr_item where parent = %s
and exists(select name from tabItem where name = pr_item.item_code and is_stock_item = 1)
""".format(doctype=pr.receipt_document_type), pr.receipt_document, as_dict=True)
for d in pr_items:
item = self.append("items")
item.item_code = d.item_code
item.description = d.description
item.qty = d.qty
item.rate = d.base_rate
item.amount = d.base_amount
item.receipt_document_type = pr.receipt_document_type
item.receipt_document = pr.receipt_document
item.purchase_receipt_item = d.name
if self.get("taxes"):
self.set_applicable_charges_for_item()
def validate(self):
self.check_mandatory()
self.validate_purchase_receipts()
self.set_total_taxes_and_charges()
if not self.get("items"):
self.get_items_from_purchase_receipts()
else:
self.set_applicable_charges_for_item()
def check_mandatory(self):
if not self.get("purchase_receipts"):
frappe.throw(_("Please enter Receipt Document"))
if not self.get("taxes"):
frappe.throw(_("Please enter Taxes and Charges"))
def validate_purchase_receipts(self):
receipt_documents = []
for d in self.get("purchase_receipts"):
if frappe.db.get_value(d.receipt_document_type, d.receipt_document, "docstatus") != 1:
frappe.throw(_("Receipt document must be submitted"))
else:
receipt_documents.append(d.receipt_document)
for item in self.get("items"):
if not item.receipt_document:
frappe.throw(_("Item must be added using 'Get Items from Purchase Receipts' button"))
elif item.receipt_document not in receipt_documents:
frappe.throw(_("Item Row {idx}: {doctype} {docname} does not exist in above '{doctype}' table")
.format(idx=item.idx, doctype=item.receipt_document_type, docname=item.receipt_document))
def set_total_taxes_and_charges(self):
self.total_taxes_and_charges = sum([flt(d.amount) for d in self.get("taxes")])
def set_applicable_charges_for_item(self):
based_on = self.distribute_charges_based_on.lower()
total = sum([flt(d.get(based_on)) for d in self.get("items")])
if not total:
frappe.throw(_("Total {0} for all items is zero, may you should change 'Distribute Charges Based On'").format(based_on))
for item in self.get("items"):
item.applicable_charges = flt(item.get(based_on)) * flt(self.total_taxes_and_charges) / flt(total)
def on_submit(self):
self.update_landed_cost()
def on_cancel(self):
self.update_landed_cost()
def update_landed_cost(self):
for d in self.get("purchase_receipts"):
doc = frappe.get_doc(d.receipt_document_type, d.receipt_document)
# set landed cost voucher amount in pr item
doc.set_landed_cost_voucher_amount()
# set valuation amount in pr item
doc.update_valuation_rate("items")
# save will update landed_cost_voucher_amount and voucher_amount in PR,
# as those fields are allowed to edit after submit
doc.save()
# update latest valuation rate in serial no
self.update_rate_in_serial_no(doc)
# update stock & gl entries for cancelled state of PR
doc.docstatus = 2
doc.update_stock_ledger(allow_negative_stock=True, via_landed_cost_voucher=True)
doc.make_gl_entries_on_cancel(repost_future_gle=False)
# update stock & gl entries for submit state of PR
doc.docstatus = 1
doc.update_stock_ledger(via_landed_cost_voucher=True)
doc.make_gl_entries()
def update_rate_in_serial_no(self, receipt_document):
for item in receipt_document.get("items"):
if item.serial_no:
serial_nos = get_serial_nos(item.serial_no)
if serial_nos:
frappe.db.sql("update `tabSerial No` set purchase_rate=%s where name in ({0})"
.format(", ".join(["%s"]*len(serial_nos))), tuple([item.valuation_rate] + serial_nos))
| elba7r/system | erpnext/stock/doctype/landed_cost_voucher/landed_cost_voucher.py | Python | gpl-3.0 | 4,474 |
# flake8: noqa
"""
PILKit image processors.
A processor accepts an image, does some stuff, and returns the result.
Processors can do anything with the image you want, but their responsibilities
should be limited to image manipulations--they should be completely decoupled
from the filesystem.
"""
from .base import *
from .crop import *
from .overlay import *
from .resize import *
| bzennn/blog_flask | python/lib/python3.5/site-packages/pilkit/processors/__init__.py | Python | gpl-3.0 | 386 |
# Chess Analyses by Jan van Reek
# http://www.endgame.nl/index.html
JvR_links = (
("http://www.endgame.nl/match.htm", "http://www.endgame.nl/MATCHPGN.ZIP"),
("http://www.endgame.nl/bad1870.htm", "http://www.endgame.nl/bad1870.pgn"),
("http://www.endgame.nl/wfairs.htm", "http://www.endgame.nl/wfairs.pgn"),
("http://www.endgame.nl/russia.html", "http://www.endgame.nl/Russia.pgn"),
("http://www.endgame.nl/wien.htm", "http://www.endgame.nl/wien.pgn"),
("http://www.endgame.nl/london1883.htm", "http://www.endgame.nl/london.pgn"),
("http://www.endgame.nl/neur1896.htm", "http://www.endgame.nl/neur1896.pgn"),
("http://www.endgame.nl/newyork.htm", "http://www.endgame.nl/newy.pgn"),
("http://www.endgame.nl/seaside.htm", "http://www.endgame.nl/seaside.pgn"),
("http://www.endgame.nl/CSpr1904.htm", "http://www.endgame.nl/cs1904.pgn"),
("http://www.endgame.nl/stpeter.htm", "http://www.endgame.nl/stp1909.pgn"),
("http://www.endgame.nl/stpeter.htm", "http://www.endgame.nl/stp1914.pgn"),
("http://www.endgame.nl/berlin1928.htm", "http://www.endgame.nl/berlin.pgn"),
("http://www.endgame.nl/bad.htm", "http://www.endgame.nl/bad.pgn"),
("http://www.endgame.nl/nimzowitsch.htm", "http://www.endgame.nl/nimzowitsch.pgn"),
("http://www.endgame.nl/mostrau.htm", "http://www.endgame.nl/mostrau.pgn"),
("http://www.endgame.nl/early.htm", "http://www.endgame.nl/early.pgn"),
("http://www.endgame.nl/bled1931.htm", "http://www.endgame.nl/Alekhine.pgn"),
("http://www.endgame.nl/nott1936.htm", "http://www.endgame.nl/nott1936.pgn"),
("http://www.endgame.nl/wbm.htm", "http://www.endgame.nl/wbm.pgn"),
("http://www.endgame.nl/AVRO1938.htm", "http://www.endgame.nl/avro1938.pgn"),
("http://www.endgame.nl/salz1942.htm", "http://www.endgame.nl/salz1942.pgn"),
("http://www.endgame.nl/itct.html", "http://www.endgame.nl/itct.pgn"),
("http://www.endgame.nl/zurich1953.htm", "http://www.endgame.nl/zurich.pgn"),
("http://www.endgame.nl/spassky.htm", "http://www.endgame.nl/SPASSKY.ZIP"),
("http://www.endgame.nl/dallas1957.htm", "http://www.endgame.nl/dallas57.pgn"),
("http://www.endgame.nl/capamem.htm", "http://www.endgame.nl/capamem.pgn"),
("http://www.endgame.nl/kortschnoj.htm", "http://www.endgame.nl/korchnoi.pgn"),
("http://www.endgame.nl/planinc.htm", "http://www.endgame.nl/Planinc.pgn"),
("http://www.endgame.nl/planinc.htm", "http://www.endgame.nl/memorial.pgn"),
("http://www.endgame.nl/Piatigorsky.htm", "http://www.endgame.nl/piatigorsky.pgn"),
("http://www.endgame.nl/ussr7079.htm", "http://www.endgame.nl/ussr6591.pgn"),
("http://www.endgame.nl/tilburg.htm", "http://www.endgame.nl/tilburg.pgn"),
("http://www.endgame.nl/dglory.htm", "http://www.endgame.nl/dglory.pgn"),
("http://www.endgame.nl/bugojno.htm", "http://www.endgame.nl/Bugojno.pgn"),
("http://www.endgame.nl/montreal.htm", "http://www.endgame.nl/mon1979.pgn"),
("http://www.endgame.nl/moscow88.htm", "http://www.endgame.nl/ussr88.pgn"),
("http://www.endgame.nl/skelleftea.htm", "http://www.endgame.nl/skel1989.pgn"),
("http://www.endgame.nl/vsb.htm", "http://www.endgame.nl/vsb.pgn"),
("http://www.endgame.nl/dortmund.htm", "http://www.endgame.nl/dortmund.pgn"),
("http://www.endgame.nl/Barca.html", "http://www.endgame.nl/Barca.pgn"),
("http://www.endgame.nl/Madrid.html", "http://www.endgame.nl/Madrid.pgn"),
("http://www.endgame.nl/costa_del_sol.html", "http://www.endgame.nl/Costa.pgn"),
("http://www.endgame.nl/Palma.html", "http://www.endgame.nl/Palma.pgn"),
("http://www.endgame.nl/olot.html", "http://www.endgame.nl/Olot.pgn"),
("http://www.endgame.nl/LasPalmas.html", "http://www.endgame.nl/lpalm96.pgn"),
("http://www.endgame.nl/DosH.htm", "http://www.endgame.nl/DosH.pgn"),
("http://www.endgame.nl/wijk.htm", "http://www.endgame.nl/corus.pgn"),
("http://www.endgame.nl/tal.html", "http://www.endgame.nl/Tal.pgn"),
("http://www.endgame.nl/cc.htm", "http://www.endgame.nl/cc.pgn"),
("http://www.endgame.nl/sofia.htm", "http://www.endgame.nl/sofia.pgn"),
("http://www.endgame.nl/linares.htm", "http://www.endgame.nl/linares.pgn"),
("http://www.endgame.nl/Bilbao.html", "http://www.endgame.nl/Bilbao.pgn"),
("http://www.endgame.nl/nanjing.html", "http://www.endgame.nl/Nanjing.pgn"),
("http://www.endgame.nl/dchamps.htm", "http://www.endgame.nl/dch.pgn"),
("http://www.endgame.nl/dsb.htm", "http://www.endgame.nl/dsb.pgn"),
("http://www.endgame.nl/cc-history.htm", "http://www.endgame.nl/cc-history.pgn"),
("http://www.endgame.nl/hastings.htm", "http://www.endgame.nl/hastings.pgn"),
("http://www.endgame.nl/ibm.htm", "http://www.endgame.nl/IBM.pgn"),
("http://www.endgame.nl/gambits.htm", "http://www.endgame.nl/gambit.pgn"),
("http://www.endgame.nl/trebitsch.htm", "http://www.endgame.nl/Trebitsch.pgn"),
("http://www.endgame.nl/cloister.htm", "http://www.endgame.nl/TerApel.pgn"),
("http://www.endgame.nl/Biel.html", "http://www.endgame.nl/Biel.pgn"),
("http://www.endgame.nl/USA.html", "http://www.endgame.nl/USA.pgn"),
("http://www.endgame.nl/uk.html", "http://www.endgame.nl/UK.pgn"),
("http://www.endgame.nl/olympiads.html", "http://www.endgame.nl/olympiads.pgn"),
("http://www.endgame.nl/lone_pine.html", "http://www.endgame.nl/lonepine.pgn"),
("http://www.endgame.nl/staunton.html", "http://www.endgame.nl/Staunton.pgn"),
("http://www.endgame.nl/Hoogeveen.html", "http://www.endgame.nl/crown.pgn"),
("http://www.endgame.nl/paoli.html", "http://www.endgame.nl/Paoli.pgn"),
("http://www.endgame.nl/endgame.htm", "http://www.endgame.nl/endgame.pgn"),
("http://www.endgame.nl/estrin.html", "http://www.endgame.nl/Estrin.pgn"),
("http://www.endgame.nl/Argentina.html", "http://www.endgame.nl/Argentina.pgn"),
("http://www.endgame.nl/comeback.html", "http://www.endgame.nl/comeback.pgn"),
("http://www.endgame.nl/strategy.htm", "http://www.endgame.nl/strategy.pgn"),
("http://www.endgame.nl/computer.html", "http://www.endgame.nl/computer.pgn"),
("http://www.endgame.nl/correspondence.html", "http://www.endgame.nl/gambitnimzo.pgn"),
("http://web.inter.nl.net/hcc/rekius/buckle.htm", "http://web.inter.nl.net/hcc/rekius/buckle.pgn"),
("http://web.inter.nl.net/hcc/rekius/euwe.htm", "http://web.inter.nl.net/hcc/rekius/euwem.pgn"),
)
JvR = []
for item in JvR_links:
JvR.append((item[0], "https://raw.githubusercontent.com/gbtami/JvR-archive/master/%s" % item[1][7:]))
| pychess/pychess | lib/pychess/Database/JvR.py | Python | gpl-3.0 | 6,560 |
## @package hfst.exceptions
## exceptions...
## Base class for HfstExceptions. Holds its own name and the file and line number where it was thrown.
class HfstException:
## A message describing the error in more detail.
def what():
pass
## Two or more HfstTransducers are not of the same type. Same as HfstTransducerTypeMismatchException ???
class HfstTransducerTypeMismatchException(HfstException):
pass
## The library required by the implementation type requested is not linked to HFST.
class ImplementationTypeNotAvailableException(HfstException):
pass
## Function has not been implemented (yet).
class FunctionNotImplementedException(HfstException):
pass
## Flag diacritics encountered on one but not the other side of a transition.
class FlagDiacriticsAreNotIdentitiesException(HfstException):
pass
## The input is not in valid prolog format.
class NotValidPrologFormatException(HfstException):
pass
## Stream cannot be read.
class StreamNotReadableException(HfstException):
pass
## Stream cannot be written.
#
# Thrown by #hfst.HfstOutputStream.write and #hfst.HfstTransducer.write_att
class StreamCannotBeWrittenException(HfstException):
pass
## Stream is closed.
#
# Thrown by #hfst.HfstTransducer.write_att
# #hfst.HfstOutputStream.write
#
# An example:
#
# \verbatim
# try:
# tr = hfst.regex('foo')
# outstr = hfst.HfstOutputStream(filename='testfile')
# outstr.close()
# outstr.write(tr)
# except hfst.exceptions.StreamIsClosedException:
# print("Could not write transducer: stream to file was closed.")
# \endverbatim
class StreamIsClosedException(HfstException):
pass
## The stream is at end.
#
# Thrown by
# #hfst.HfstTransducer
# #hfst.HfstInputStream.__init__
class EndOfStreamException(HfstException):
pass
## Transducer is cyclic.
#
# Thrown by #hfst.HfstTransducer.extract_paths. An example
# \verbatim
# transducer = hfst.regex('[a:b]*')
# try:
# results = transducer.extract_paths(output='text')
# print("The transducer has %i paths:" % len(results))
# print(results)
# except hfst.exceptions.TransducerIsCyclicException:
# print("The transducer is cyclic and has an infinite number of paths. Some of them:")
# results = transducer.extract_paths(output='text', max_cycles=5)
# print(results)
# \endverbatim
class TransducerIsCyclicException(HfstException):
pass
## The stream does not contain transducers.
#
# Thrown by
# #hfst.HfstTransducer
# #hfst.HfstInputStream.__init__
#
# An example.
# \verbatim
# f = open('foofile', 'w')
# f.write('This is an ordinary text file.\n')
# f.close()
# try:
# instr = hfst.HfstInputStream('foofile')
# tr = instr.read()
# print(tr)
# instr.close()
# except hfst.exceptions.NotTransducerStreamException:
# print("Could not print transducer: the file does not contain binary transducers.")
# \endverbatim
class NotTransducerStreamException(HfstException):
pass
## The stream is not in valid AT&T format.
#
# An example:
# \verbatim
# f = open('testfile1.att', 'w')
# f.write('0 1 a b\n\
# 1 2 c\n\
# 2\n')
# f.close()
# f = hfst.hfst_open('testfile1.att', 'r')
# try:
# tr = hfst.read_att(f)
# except hfst.exceptions.NotValidAttFormatException:
# print('Could not read file: it is not in valid ATT format.')
# f.close()
# \endverbatim
# thrown by
# #hfst.HfstTransducer.__init__
class NotValidAttFormatException(HfstException):
pass
## The input is not in valid LexC format.
class NotValidLexcFormatException(HfstException):
pass
## State is not final (and cannot have a final weight).
#
# An example :
#
# \verbatim
# tr = hfst.HfstBasicTransducer()
# tr.add_state(1)
# # An exception is thrown as state number 1 is not final
# try:
# w = tr.get_final_weight(1)
# except hfst.exceptions.StateIsNotFinalException:
# print("State is not final.")
# \endverbatim
#
# You should use function #hfst.HfstBasicTransducer.is_final_state if you are not sure whether a
# state is final.
#
# Thrown by #hfst.HfstBasicTransducer get_final_weight.
class StateIsNotFinalException(HfstException):
pass
## Transducers given as rule context are not automata.
# @see hfst.HfstTransducer.is_automaton()
class ContextTransducersAreNotAutomataException(HfstException):
pass
## Transducers are not automata.
#
# Example:
# \verbatim
# tr1 = hfst.regex('foo:bar')
# tr2 = hfst.regex('bar:baz')
# try:
# tr1.cross_product(tr2)
# except hfst.exceptions.TransducersAreNotAutomataException:
# print('Transducers must be automata in cross product.')
# \endverbatim
# This exception is thrown by
# #hfst.HfstTransducer.cross_product
# when either input transducer does not have equivalent input and
# output symbols in all its transitions.
class TransducersAreNotAutomataException(HfstException):
pass
## The state number argument is not valid.
#
# An example :
#
# \verbatim
# tr = hfst.HfstBasicTransducer()
# tr.add_state(1)
# try:
# w = tr.get_final_weight(2)
# except hfst.exceptions.StateIndexOutOfBoundsException:
# print('State number 2 does not exist')
# \endverbatim
class StateIndexOutOfBoundsException(HfstException):
pass
## Transducer has a malformed HFST header.
#
# Thrown by #hfst.HfstTransducer.__init__ #hfst.HfstInputStream
class TransducerHeaderException(HfstException):
pass
## An OpenFst transducer does not have an input symbol table.
#
# When converting from OpenFst to tropical or log HFST, the OpenFst transducer
# must have at least an input symbol table. If the output symbol table
# is missing, it is assumed to be equivalent to the input symbol table.
#
# Thrown by hfst.HfstTransducer.__init__
class MissingOpenFstInputSymbolTableException(HfstException):
pass
## Two or more transducers do not have the same type.
#
# This can happen if (1) the calling and called transducer in a binary
# operation, (2) two transducers in a pair of transducers,
# (3) two consecutive transducers coming from an HfstInputStream or
# (4) two transducers in a function taking two or more transducers as
# arguments do not have the same type.
#
# An example:
# \verbatim
# hfst.set_default_fst_type(hfst.types.TROPICAL_OPENFST_TYPE)
# tr1 = hfst.regex('foo')
# tr2 = hfst.regex('bar')
# tr2.convert(hfst.types.FOMA_TYPE)
# try:
# tr1.disjunct(tr2)
# except hfst.exceptions.TransducerTypeMismatchException:
# print('The implementation types of transducers must be the same.')
# \endverbatim
class TransducerTypeMismatchException(HfstException):
pass
## The set of transducer pairs is empty.
#
# Thrown by rule functions.
class EmptySetOfContextsException(HfstException):
pass
## The type of a transducer is not specified.
#
# This exception is thrown when an implementation type argument
# is hfst.types.ERROR_TYPE.
class SpecifiedTypeRequiredException(HfstException):
pass
## An error happened probably due to a bug in the HFST code.
class HfstFatalException(HfstException):
pass
## Transducer has wrong type.
#
# This exception suggests that an HfstTransducer has not been properly
# initialized, probably due to a bug in the HFST library. Alternatively
# the default constructor of HfstTransducer has been called at some point.
#
# @see #hfst.HfstTransducer.__init__
class TransducerHasWrongTypeException(HfstException):
pass
## String is not valid utf-8.
#
# This exception suggests that an input string is not valid utf8.
#
class IncorrectUtf8CodingException(HfstException):
pass
## An argument string is an empty string.
# A transition symbol cannot be an empty string.
class EmptyStringException(HfstException):
pass
## A bug in the HFST code.
class SymbolNotFoundException(HfstException):
pass
## A piece of metadata in an HFST header is not supported.
class MetadataException(HfstException):
pass
| wikimedia/operations-debs-contenttranslation-hfst | python/doc/hfst/exceptions/__init__.py | Python | gpl-3.0 | 7,967 |
#!/usr/bin/env python
import qa
import re
import numpy
have_use = re.compile("^\s{1,12}use\s")
remove_warn = re.compile('''(?!.*QA_WARN .+)''', re.VERBOSE)
unwanted = re.compile("(\s|&|\n)", re.VERBOSE)
def do_magic(files, options):
name = files[0]
glob = []
temp = []
for f in files[2:]:
lines = open(f, 'r').readlines()
temp = qa2.remove_amp(filter(remove_warn.match, lines), True)
uses = [f for f in filter(have_use.search, temp) if (
re.match("\s{0,9}use " + name, f))]
for f in uses:
glob.extend(f.split("only: ")[1].strip().split(','))
return numpy.unique([unwanted.sub('', f) for f in glob])
def pretty_format(list, col):
print " public :: &"
str = " & "
for item in list:
if(len(str) + len(item) + 2 > int(col)):
print str + "&"
str = " & "
str = str + item + ", "
print str.rstrip(", ")
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: %prog module_name line_lenght FILES\n\nExample: bin/generate_public.py grid 140 $(find . -name \"*F90\")"
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="make lots of noise [default]")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose",
help="be vewwy quiet (I'm hunting wabbits)")
parser.add_option("-f", "--force",
action="store_true", dest="force",
help="commit despite errors (It will be logged)")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("incorrect number of arguments")
tab = do_magic(args, options)
pretty_format(tab, args[1])
| varadarajan87/piernik | bin/generate_public.py | Python | gpl-3.0 | 1,873 |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
KEY = 'miclaveparafirmarurls';
def index():
"""
Pantalla de bienvenida estatica,
sólo muestra un enlace hacia el manejador
"""
return dict()
@auth.requires_login()
def manager():
"""
Permite crear eventos nuevos, ver/copiar los creados por otros
y editar los que se fueron creados por el usuario actual
"""
form = FORM(T('New Event'),
INPUT(_name='name', requires=IS_NOT_EMPTY()),
INPUT(_type='submit'))
if form.accepts(request, session):
# Generar el nuevo evento vacío
name = form.vars.name
owner_event = auth.user_id
event_data = base_json_event_data.replace('"name":null', '"name":"'+name+'"')
shared_data = base_json_shared_data
id_event = db.events.insert(name=name, owner_event=owner_event,
json_event_data=event_data,
json_shared_data=shared_data)
if id_event:
redirect(URL('default', 'event',
args=[db.events[id_event].slug]))
else:
response.flash = T('The new event can\'t be created')
elif form.errors:
response.flash = T('The form has errors')
if request.vars.id_event:
if request.vars.operation == 'copy':
if not URL.verify(request, hmac_key=KEY): # verifica que la accion sea legitima
raise HTTP(403)
shared_data = base_json_shared_data
row = db.events[request.vars.id_event]
event_data = row.json_event_data
name = row.name + T('(copy)')
if db.events.insert(name=name, owner_event=auth.user_id,
json_event_data=event_data,
json_shared_data=shared_data):
response.flash = T('Event copied')
else:
response.flash = T('It can\'t be copied')
elif request.vars.operation == 'delete':
if not URL.verify(request, hmac_key=KEY): # verifica que la accion sea legitima
raise HTTP(403)
if db.events[request.vars.id_event] \
and db.events[request.vars.id_event].owner_event == auth.user_id:
del db.events[request.vars.id_event]
response.flash = T('Event deleted')
else:
response.flash = T('You do not have permission to do that')
events = db(db.events).select(db.events.ALL)
return dict(events=events,form=form,key=KEY)
@auth.requires_login()
def event():
"""
Es la página principal de la aplicación, tiene dos modos que
dependen de los privilegios del usuario, si el usuario creo este
evento se abre en modo edición de lo contrario en modo lectura,
a excepción de los datos compartidos (actualmente sólo de
asistencia). El parametro será el nombre del evento como slug,
para abrir directamente la página.
Es requerido un usuario logueado.
"""
if not request.args[0]:
redirect(URL('default', 'manager'))
event = db(db.events.slug == request.args[0]).select(db.events.ALL).first()
if not event:
event = db.events[request.args[0]]
if event:
redirect(URL('default', 'event', args=[event.slug]))
else:
raise HTTP(404, T('Event not found'))
is_owner = event.owner_event == auth.user_id
return dict(event=event, is_owner=is_owner)
def print_event():
"""
Genera una vista apta para impresión con los datos de la ventana
padre (event).
"""
return dict()
@auth.requires_login()
def event_data():
"""
Es un webservice de JSON para subir y recoger datos sobre
el evento. El único que puede subir datos es el creador del
evento, los usuarios registrados son los que pueden leer
los datos. El parametro id_event devuelve los datos JSON
de ese evento, si se usa el metodo post/get data se pueden subir
datos que sobreescribiran los actuales (sólo el creador puede
hacer esto), devuelve true si los datos fueron actualizados
con éxito, false si hubo error.
"""
if request.vars.id_event:
if request.vars.data:
if db.events[request.vars.id_event].owner_event == auth.user_id:
# Actualizar los valores
db.events[request.vars.id_event] = dict(json_event_data=request.vars.data)
return 'true'
else:
raise HTTP(500, 'false')
else:
# Devolver json
return db.events[request.vars.id_event].json_event_data
else:
raise HTTP(400, 'false')
@auth.requires_login()
def shared_event_data():
"""
Es un webservice de JSON, en este caso cualquiera registrado
puede modificar los datos del evento. El primer parametro
indica sobre que evento se quiere trabajar, los parametros
get/post son variable, id_object y value. Variable indica a que objeto
json hijo se va a aplicar el cambio, el id a que hijo de la
variable, finalmente el valor indica el nuevo valor, de no
existir se genera y si existe es reemplazado. Si ningún parametro
es dado devuelve todo el objeto JSON shared_data.
Es necesario estar logueado.
"""
if request.vars.id_event:
if request.vars.variable \
and request.vars.id_object and request.vars.value:
json_shared_data = db.events[request.vars.id_event].json_shared_data
import json
python_shared_data = json.loads(json_shared_data)
python_shared_data[request.vars.variable][request.vars.id_object] = request.vars.value
json_shared_data = json.dumps(python_shared_data)
# Actualizar el json en la base de datos
db.events[request.vars.id_event] = dict(json_shared_data=json_shared_data)
return 'true'
else:
return db.events[request.vars.id_event].json_shared_data
else:
raise (400, 'false')
@auth.requires_login()
def rename_event():
""" Renombrar el evento """
if request.vars.id_event and request.vars.new_name:
if db.events[request.vars.id_event].owner_event == auth.user_id:
db.events[request.vars.id_event] = dict(name=request.vars.new_name)
else:
raise (500, 'false')
else:
raise (400, 'false')
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/manage_users (requires membership in
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
| Yelrado/web-admin-events | controllers/default.py | Python | gpl-3.0 | 6,866 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from flask import session
from werkzeug.exceptions import Forbidden
from indico.modules.rb.controllers import RHRoomBookingBase
from indico.modules.rb.util import rb_is_admin
from indico.util.i18n import _
class RHRoomBookingAdminBase(RHRoomBookingBase):
"""
Adds admin authorization. All classes that implement admin
tasks should be derived from this class.
"""
def _checkProtection(self):
if session.user is None:
self._checkSessionUser()
elif not rb_is_admin(session.user):
raise Forbidden(_('You are not authorized to take this action.'))
| belokop/indico_bare | indico/modules/rb/controllers/admin/__init__.py | Python | gpl-3.0 | 1,335 |
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
def serialize_ip_network_group(group):
"""Serialize group to JSON-like object"""
return {
'id': group.id,
'name': group.name,
'identifier': 'IPNetworkGroup:{}'.format(group.id),
'_type': 'IPNetworkGroup'
}
| nop33/indico | indico/modules/networks/util.py | Python | gpl-3.0 | 1,020 |
'''
NFI -- Silensec's Nyuki Forensics Investigator
Copyright (C) 2014 George Nicolaou (george[at]silensec[dot]com)
Silensec Ltd.
Juma Fredrick (j.fredrick[at]silensec[dot]com)
Silensec Ltd.
This file is part of Nyuki Forensics Investigator (NFI).
NFI is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
NFI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with NFI. If not, see <http://www.gnu.org/licenses/>.
'''
from IApp import IApp, KnownTable, DataTypes
import ConvertUtils
class com_google_android_feedback(IApp):
name = "com.google.android.feedback"
cname = "Google Android Feedback"
databases = {
"webviewCookiesChromium.db": [
KnownTable("cookies", None,
{"creation_utc":ConvertUtils.WebkitToUnix,
"expires_utc":ConvertUtils.WebkitToUnix,
"last_access_utc":ConvertUtils.WebkitToUnix },
{"creation_utc":DataTypes.DATE,
"expires_utc":DataTypes.DATE,
"last_access_utc":DataTypes.DATE })
],
}
def __init__(self):
self.known = True
| georgenicolaou/nfi | AndroidApps/com_google_android_feedback.py | Python | gpl-3.0 | 1,587 |
# Developed for module tiericide, this script will quickly print out a market
# conversion map based on patch notes, as well as database conversion mapping.
import argparse
import os.path
import sqlite3
import sys
# Add eos root path to sys.path so we can import ourselves
path = os.path.dirname(str(__file__, sys.getfilesystemencoding()))
sys.path.append(os.path.realpath(os.path.join(path, "..")))
# change to correct conversion
rename_phrase = " is now known as "
conversion_phrase = " is being converted to "
text = """Partial Weapon Navigation is being converted to Phased Scoped Target Painter
Indirect Scanning Dampening Unit I is being converted to Phased Muon Scoped Sensor Dampener
'Broker' Remote Sensor Dampener I is being converted to 'Executive' Remote Sensor Dampener
Initiated Ion Field ECM I is being converted to Hypnos Scoped Magnetometric ECM
FZ-3 Subversive Spatial Destabilizer ECM is being converted to BZ-5 Scoped Gravimetric ECM
'Penumbra' White Noise ECM is being converted to Umbra Scoped Radar ECM
Faint Phase Inversion ECM I is being converted to Enfeebling Scoped Ladar ECM
'Hypnos' Multispectral ECM I is being converted to Compulsive Scoped Multispectral ECM
1Z-3 Subversive ECM Eruption is being converted to Cetus Scoped Burst Jammer
'Prayer' Remote Tracking Computer is being converted to P-S Compact Remote Tracking Computer
'Tycoon' Remote Tracking Computer is being converted to 'Enterprise' Remote Tracking Computer
Monopulse Tracking Mechanism I is being converted to F-12 Enduring Tracking Computer
'Orion' Tracking CPU I is being converted to Optical Compact Tracking Computer
'Economist' Tracking Computer I is being converted to 'Marketeer' Tracking Computer
Beta-Nought Tracking Mode is being converted to 'Basic' Tracking Enhancer
Azimuth Descalloping Tracking Enhancer is being converted to 'Basic' Tracking Enhancer
F-AQ Delay-Line Scan Tracking Subroutines is being converted to 'Basic' Tracking Enhancer
Beam Parallax Tracking Program is being converted to 'Basic' Tracking Enhancer
Sigma-Nought Tracking Mode I is being converted to Fourier Compact Tracking Enhancer
Auto-Gain Control Tracking Enhancer I is being converted to Fourier Compact Tracking Enhancer
F-aQ Phase Code Tracking Subroutines is being converted to Fourier Compact Tracking Enhancer
Lateral Gyrostabilizer is being converted to 'Basic' Gyrostabilizer
F-M2 Weapon Inertial Suspensor is being converted to 'Basic' Gyrostabilizer
Hydraulic Stabilization Actuator is being converted to 'Basic' Gyrostabilizer
Stabilized Weapon Mounts is being converted to 'Basic' Gyrostabilizer
Cross-Lateral Gyrostabilizer I is being converted to Counterbalanced Compact Gyrostabilizer
F-M3 Munition Inertial Suspensor is being converted to Counterbalanced Compact Gyrostabilizer
Pneumatic Stabilization Actuator I is being converted to Counterbalanced Compact Gyrostabilizer
Monophonic Stabilization Actuator I is being converted to 'Kindred' Gyrostabilizer
Monophonic Stabilization Actuator I Blueprint is being converted to 'Kindred' Gyrostabilizer Blueprint
Heat Exhaust System is being converted to 'Basic' Heat Sink
C3S Convection Thermal Radiator is being converted to 'Basic' Heat Sink
'Boreas' Coolant System is being converted to 'Basic' Heat Sink
Stamped Heat Sink is being converted to 'Basic' Heat Sink
Thermal Exhaust System I is being converted to Extruded Compact Heat Sink
C4S Coiled Circuit Thermal Radiator is being converted to Extruded Compact Heat Sink
'Skadi' Coolant System I is being converted to Extruded Compact Heat Sink
'Mangonel' Heat Sink I is being converted to 'Trebuchet' Heat Sink I
'Mangonel' Heat Sink I Blueprint is being converted to 'Trebuchet' Heat Sink Blueprint
Insulated Stabilizer Array is being converted to 'Basic' Magnetic Field Stabilizer
Linear Flux Stabilizer is being converted to 'Basic' Magnetic Field Stabilizer
Gauss Field Balancer is being converted to 'Basic' Magnetic Field Stabilizer
Magnetic Vortex Stabilizer is being converted to 'Basic' Magnetic Field Stabilizer
Insulated Stabilizer Array I is being converted to Vortex Compact Magnetic Field Stabilizer
Linear Flux Stabilizer I is being converted to Vortex Compact Magnetic Field Stabilizer
Gauss Field Balancer I is being converted to Vortex Compact Magnetic Field Stabilizer
'Capitalist' Magnetic Field Stabilizer I is being converted to 'Monopoly' Magnetic Field Stabilizer
'Capitalist' Magnetic Field Stabilizer I Blueprint is being converted to 'Monopoly' Magnetic Field Stabilizer Blueprint
Muon Coil Bolt Array I is being converted to Crosslink Compact Ballistic Control System
Multiphasic Bolt Array I is being converted to Crosslink Compact Ballistic Control System
'Pandemonium' Ballistic Enhancement is being converted to Crosslink Compact Ballistic Control System
Ballistic 'Purge' Targeting System I is being converted to 'Full Duplex' Ballistic Control System
Ballistic 'Purge' Targeting System I Blueprint is being converted to 'Full Duplex' Ballistic Control System Blueprint
'Langour' Drive Disruptor I is being converted to X5 Enduring Stasis Webifier
Patterned Stasis Web I is being converted to Fleeting Compact Stasis Webifier
Fleeting Progressive Warp Scrambler I is being converted to Faint Epsilon Scoped Warp Scrambler
Fleeting Warp Disruptor I is being converted to Faint Scoped Warp Disruptor
GLFF Containment Field is being converted to 'Basic' Damage Control
Interior Force Field Array is being converted to 'Basic' Damage Control
F84 Local Damage System is being converted to 'Basic' Damage Control
Systematic Damage Control is being converted to 'Basic' Damage Control
'Gonzo' Damage Control I is being converted to 'Radical' Damage Control
'Gonzo' Damage Control I Blueprint is being converted to 'Radical' Damage Control Blueprint
Emergency Damage Control I is being converted to IFFA Compact Damage Control
F85 Peripheral Damage System I is being converted to IFFA Compact Damage Control
Pseudoelectron Containment Field I is being converted to IFFA Compact Damage Control
Micro Ld-Acid Capacitor Battery I is being converted to 'Micro' Cap Battery
Micro Ohm Capacitor Reserve I is being converted to 'Micro' Cap Battery
Micro F-4a Ld-Sulfate Capacitor Charge Unit is being converted to 'Micro' Cap Battery
Micro Peroxide Capacitor Power Cell is being converted to 'Micro' Cap Battery
Micro Capacitor Battery II is being converted to 'Micro' Cap Battery
Small Ohm Capacitor Reserve I is being converted to Small Compact Pb-Acid Cap Battery
Small F-4a Ld-Sulfate Capacitor Charge Unit is being converted to Small Compact Pb-Acid Cap Battery
Small Peroxide Capacitor Power Cell is being converted to Small Compact Pb-Acid Cap Battery
Medium Ohm Capacitor Reserve I is being converted to Medium Compact Pb-Acid Cap Battery
Medium F-4a Ld-Sulfate Capacitor Charge Unit is being converted to Medium Compact Pb-Acid Cap Battery
Medium Peroxide Capacitor Power Cell is being converted to Medium Compact Pb-Acid Cap Battery
Large Ohm Capacitor Reserve I is being converted to Large Compact Pb-Acid Cap Battery
Large F-4a Ld-Sulfate Capacitor Charge Unit is being converted to Large Compact Pb-Acid Cap Battery
Large Peroxide Capacitor Power Cell is being converted to Large Compact Pb-Acid Cap Battery
ECCM - Radar I is being converted to Sensor Booster I
ECCM - Ladar I is being converted to Sensor Booster I
ECCM - Magnetometric I is being converted to Sensor Booster I
ECCM - Gravimetric I is being converted to Sensor Booster I
ECCM - Omni I is being converted to Sensor Booster I
ECCM - Radar I Blueprint is being converted to Sensor Booster I Blueprint
ECCM - Ladar I Blueprint is being converted to Sensor Booster I Blueprint
ECCM - Magnetometric I Blueprint is being converted to Sensor Booster I Blueprint
ECCM - Gravimetric I Blueprint is being converted to Sensor Booster I Blueprint
ECCM - Omni I Blueprint is being converted to Sensor Booster I Blueprint
Alumel Radar ECCM Sensor Array I is being converted to Alumel-Wired Enduring Sensor Booster
Alumel Ladar ECCM Sensor Array I is being converted to Alumel-Wired Enduring Sensor Booster
Alumel Gravimetric ECCM Sensor Array I is being converted to Alumel-Wired Enduring Sensor Booster
Alumel Omni ECCM Sensor Array I is being converted to Alumel-Wired Enduring Sensor Booster
Alumel Magnetometric ECCM Sensor Array I is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Ladar ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Gravimetric ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Omni ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Radar ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Magnetometric ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Extra Radar ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Extra Ladar ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Extra Gravimetric ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Extra Magnetometric ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Gravimetric Positional ECCM Sensor System I is being converted to F-90 Compact Sensor Booster
Radar Positional ECCM Sensor System I is being converted to F-90 Compact Sensor Booster
Omni Positional ECCM Sensor System I is being converted to F-90 Compact Sensor Booster
Ladar Positional ECCM Sensor System I is being converted to F-90 Compact Sensor Booster
Magnetometric Positional ECCM Sensor System I is being converted to F-90 Compact Sensor Booster
Incremental Radar ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Incremental Ladar ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Incremental Gravimetric ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Incremental Magnetometric ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Prototype ECCM Radar Sensor Cluster is being converted to Alumel-Wired Enduring Sensor Booster
Prototype ECCM Ladar Sensor Cluster is being converted to Alumel-Wired Enduring Sensor Booster
Prototype ECCM Gravimetric Sensor Cluster is being converted to Alumel-Wired Enduring Sensor Booster
Prototype ECCM Omni Sensor Cluster is being converted to Alumel-Wired Enduring Sensor Booster
Prototype ECCM Magnetometric Sensor Cluster is being converted to Alumel-Wired Enduring Sensor Booster
Conjunctive Radar ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Conjunctive Ladar ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Conjunctive Gravimetric ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Conjunctive Magnetometric ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
ECCM - Omni II is being converted to Sensor Booster II
ECCM - Gravimetric II is being converted to Sensor Booster II
ECCM - Ladar II is being converted to Sensor Booster II
ECCM - Magnetometric II is being converted to Sensor Booster II
ECCM - Radar II is being converted to Sensor Booster II
ECCM - Omni II Blueprint is being converted to Sensor Booster II Blueprint
ECCM - Gravimetric II Blueprint is being converted to Sensor Booster II Blueprint
ECCM - Ladar II Blueprint is being converted to Sensor Booster II Blueprint
ECCM - Magnetometric II Blueprint is being converted to Sensor Booster II Blueprint
ECCM - Radar II Blueprint is being converted to Sensor Booster II Blueprint
'Forger' ECCM - Magnetometric I is being converted to 'Shady' Sensor Booster
'Forger' ECCM - Magnetometric I Blueprint is being converted to 'Shady' Sensor Booster Blueprint
Basic RADAR Backup Array is being converted to 'Basic' Signal Amplifier
Basic Ladar Backup Array is being converted to 'Basic' Signal Amplifier
Basic Gravimetric Backup Array is being converted to 'Basic' Signal Amplifier
Basic Magnetometric Backup Array is being converted to 'Basic' Signal Amplifier
Basic Multi Sensor Backup Array is being converted to 'Basic' Signal Amplifier
Emergency Magnetometric Scanners is being converted to 'Basic' Signal Amplifier
Emergency Multi-Frequency Scanners is being converted to 'Basic' Signal Amplifier
Emergency RADAR Scanners is being converted to 'Basic' Signal Amplifier
Emergency Ladar Scanners is being converted to 'Basic' Signal Amplifier
Emergency Gravimetric Scanners is being converted to 'Basic' Signal Amplifier
Sealed RADAR Backup Cluster is being converted to 'Basic' Signal Amplifier
Sealed Magnetometric Backup Cluster is being converted to 'Basic' Signal Amplifier
Sealed Multi-Frequency Backup Cluster is being converted to 'Basic' Signal Amplifier
Sealed Ladar Backup Cluster is being converted to 'Basic' Signal Amplifier
Sealed Gravimetric Backup Cluster is being converted to 'Basic' Signal Amplifier
Surplus RADAR Reserve Array is being converted to 'Basic' Signal Amplifier
F-42 Reiterative RADAR Backup Sensors is being converted to 'Basic' Signal Amplifier
Surplus Magnetometric Reserve Array is being converted to 'Basic' Signal Amplifier
F-42 Reiterative Magnetometric Backup Sensors is being converted to 'Basic' Signal Amplifier
Surplus Multi-Frequency Reserve Array is being converted to 'Basic' Signal Amplifier
F-42 Reiterative Multi-Frequency Backup Sensors is being converted to 'Basic' Signal Amplifier
Surplus Ladar Reserve Array is being converted to 'Basic' Signal Amplifier
F-42 Reiterative Ladar Backup Sensors is being converted to 'Basic' Signal Amplifier
Surplus Gravimetric Reserve Array is being converted to 'Basic' Signal Amplifier
F-42 Reiterative Gravimetric Backup Sensors is being converted to 'Basic' Signal Amplifier
Gravimetric Backup Array I is being converted to Signal Amplifier I
Ladar Backup Array I is being converted to Signal Amplifier I
Magnetometric Backup Array I is being converted to Signal Amplifier I
Multi Sensor Backup Array I is being converted to Signal Amplifier I
RADAR Backup Array I is being converted to Signal Amplifier I
Gravimetric Backup Array I Blueprint is being converted to Signal Amplifier I Blueprint
Ladar Backup Array I Blueprint is being converted to Signal Amplifier I Blueprint
Magnetometric Backup Array I Blueprint is being converted to Signal Amplifier I Blueprint
Multi Sensor Backup Array I Blueprint is being converted to Signal Amplifier I Blueprint
RADAR Backup Array I Blueprint is being converted to Signal Amplifier I Blueprint
Protected Gravimetric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Protected Ladar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Protected Magnetometric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Protected Multi-Frequency Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Protected RADAR Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Reserve Gravimetric Scanners is being converted to F-89 Compact Signal Amplifier
Reserve Ladar Scanners is being converted to F-89 Compact Signal Amplifier
Reserve Magnetometric Scanners is being converted to F-89 Compact Signal Amplifier
Reserve Multi-Frequency Scanners is being converted to F-89 Compact Signal Amplifier
Reserve RADAR Scanners is being converted to F-89 Compact Signal Amplifier
Secure Gravimetric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Secure Ladar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Secure Magnetometric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Secure Radar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
F-43 Repetitive Gravimetric Backup Sensors is being converted to F-89 Compact Signal Amplifier
F-43 Repetitive Ladar Backup Sensors is being converted to F-89 Compact Signal Amplifier
F-43 Repetitive Magnetometric Backup Sensors is being converted to F-89 Compact Signal Amplifier
F-43 Repetitive Multi-Frequency Backup Sensors is being converted to F-89 Compact Signal Amplifier
F-43 Repetitive RADAR Backup Sensors is being converted to F-89 Compact Signal Amplifier
Shielded Gravimetric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Shielded Ladar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Shielded Magnetometric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Shielded Radar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Surrogate Gravimetric Reserve Array I is being converted to F-89 Compact Signal Amplifier
Surrogate Ladar Reserve Array I is being converted to F-89 Compact Signal Amplifier
Surrogate Magnetometric Reserve Array I is being converted to F-89 Compact Signal Amplifier
Surrogate Multi-Frequency Reserve Array I is being converted to F-89 Compact Signal Amplifier
Surrogate RADAR Reserve Array I is being converted to F-89 Compact Signal Amplifier
Warded Gravimetric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Warded Ladar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Warded Magnetometric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Warded Radar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Gravimetric Backup Array II is being converted to Signal Amplifier II
Ladar Backup Array II is being converted to Signal Amplifier II
Magnetometric Backup Array II is being converted to Signal Amplifier II
Multi Sensor Backup Array II is being converted to Signal Amplifier II
RADAR Backup Array II is being converted to Signal Amplifier II
Gravimetric Backup Array II Blueprint is being converted to Signal Amplifier II Blueprint
Ladar Backup Array II Blueprint is being converted to Signal Amplifier II Blueprint
Magnetometric Backup Array II Blueprint is being converted to Signal Amplifier II Blueprint
Multi Sensor Backup Array II Blueprint is being converted to Signal Amplifier II Blueprint
RADAR Backup Array II Blueprint is being converted to Signal Amplifier II Blueprint
Gravimetric Firewall is being converted to 'Firewall' Signal Amplifier
Ladar Firewall is being converted to 'Firewall' Signal Amplifier
Magnetometric Firewall is being converted to 'Firewall' Signal Amplifier
Multi Sensor Firewall is being converted to 'Firewall' Signal Amplifier
RADAR Firewall is being converted to 'Firewall' Signal Amplifier
ECCM Projector I is being converted to Remote Sensor Booster I
ECCM Projector I Blueprint is being converted to Remote Sensor Booster I Blueprint
Scattering ECCM Projector I is being converted to Linked Enduring Sensor Booster
Piercing ECCM Emitter I is being converted to Coadjunct Scoped Remote Sensor Booster
Spot Pulsing ECCM I is being converted to F-23 Compact Remote Sensor Booster
Phased Muon ECCM Caster I is being converted to F-23 Compact Remote Sensor Booster
ECCM Projector II is being converted to Remote Sensor Booster II
ECCM Projector II Blueprint is being converted to Remote Sensor Booster II Blueprint
Prototype Sensor Booster is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Scanning CPU I is being converted to F-90 Compact Sensor Booster
Amplitude Signal Enhancer is being converted to 'Basic' Signal Amplifier
'Acolyth' Signal Booster is being converted to 'Basic' Signal Amplifier
Type-E Discriminative Signal Augmentation is being converted to 'Basic' Signal Amplifier
F-90 Positional Signal Amplifier is being converted to 'Basic' Signal Amplifier
'Mendicant' Signal Booster I is being converted to F-89 Compact Signal Amplifier
Wavelength Signal Enhancer I is being converted to F-89 Compact Signal Amplifier
Type-D Attenuation Signal Augmentation is being converted to F-89 Compact Signal Amplifier
Connected Remote Sensor Booster is being converted to F-23 Compact Remote Sensor Booster
'Boss' Remote Sensor Booster is being converted to 'Bootleg' Remote Sensor Booster
'Entrepreneur' Remote Sensor Booster is being converted to 'Bootleg' Remote Sensor Booster
'Pacifier' Large Remote Armor Repairer is being converted to 'Peace' Large Remote Armor Repairer
'Pacifier' Large Remote Armor Repairer Blueprint is being converted to 'Peace' Large Remote Armor Repairer Blueprint
'Broker' Remote Sensor Dampener I Blueprint is being converted to 'Executive' Remote Sensor Dampener Blueprint
'Tycoon' Remote Tracking Computer Blueprint is being converted to 'Enterprise' Remote Tracking Computer Blueprint
'Economist' Tracking Computer I Blueprint is being converted to 'Marketeer' Tracking Computer Blueprint"""
def main(old, new):
# Open both databases and get their cursors
old_db = sqlite3.connect(os.path.expanduser(old))
old_cursor = old_db.cursor()
new_db = sqlite3.connect(os.path.expanduser(new))
new_cursor = new_db.cursor()
renames = {}
conversions = {}
for x in text.splitlines():
if conversion_phrase in x:
c = x.split(conversion_phrase)
container = conversions
elif rename_phrase in x:
c = x.split(rename_phrase)
container = renames
else:
print("Unknown format: {}".format(x))
sys.exit()
old_name, new_name = c[0], c[1]
old_item, new_item = None, None
if "Blueprint" in old_name or "Blueprint" in new_name:
print("Blueprint: Skipping this line: %s"%x)
continue
# gather item info
new_cursor.execute('SELECT "typeID" FROM "invtypes" WHERE "typeName" = ?', (new_name,))
for row in new_cursor:
new_item = row[0]
break
old_cursor.execute('SELECT "typeID" FROM "invtypes" WHERE "typeName" = ?', (old_name,))
for row in old_cursor:
old_item = row[0]
break
if not old_item:
print("Error finding old item in {} -> {}".format(old_name, new_name))
if not new_item:
print("Error finding new item in {} -> {}".format(old_name, new_name))
if not container.get((new_item,new_name), None):
container[(new_item,new_name)] = []
container[(new_item,new_name)].append((old_item, old_name))
print(" # Renamed items")
for new, old in renames.items():
if len(old) != 1:
print("Incorrect length, key: {}, value: {}".format(new, old))
sys.exit()
old = old[0]
print(" \"{}\": \"{}\",".format(old[1], new[1]))
# Convert modules
print("\n # Converted items")
for new, olds in conversions.items():
for old in olds:
print(" \"{}\": \"{}\",".format(old[1], new[1]))
print()
print()
for new, old in conversions.items():
print(" {}: ( # {}".format(new[0], new[1]))
for item in old:
print(" {}, # {}".format(item[0], item[1]))
print(" ),")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--old", type=str)
parser.add_argument("-n", "--new", type=str)
args = parser.parse_args()
main(args.old, args.new)
| bsmr-eve/Pyfa | scripts/conversion.py | Python | gpl-3.0 | 23,244 |
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
from libqtopensesame.extensions import base_extension
class quick_switcher(base_extension):
"""
desc:
The quick-switcher allows you to quickly navigate to items and
functions, and to quickly activate menu actions.
"""
# We need to update or fully refresh the dialog after several structural
# changes.
def event_startup(self):
self.d = None
def event_open_experiment(self, path):
self.d = None
def event_rename_item(self, from_name, to_name):
if self.d is not None:
self.d.rename_item(from_name, to_name)
def event_new_item(self, name, _type):
if self.d is not None:
self.d.add_item(name)
def event_delete_item(self, name):
if self.d is not None:
self.d.delete_item(name)
def event_purge_unused_items(self):
self.d = None
def event_regenerate(self):
self.d = None
def event_change_item(self, name):
if self.d is not None:
if self.experiment.items._type(name) == u'inline_script':
self.d.refresh_item(name)
def event_open_item(self, name):
if self.d is not None:
self.d.bump_item(name)
def init_dialog(self):
"""
desc:
Re-init the dialog.
"""
self.set_busy()
from quick_switcher_dialog.dialog import quick_switcher
self.d = quick_switcher(self.main_window)
self.set_busy(False)
def activate(self):
"""
desc:
Pops up the quick-switcher dialog.
"""
if not hasattr(self, u'd') or self.d is None:
self.init_dialog()
self.d.items_list_widget.sortItems()
self.d.exec_()
| eort/OpenSesame | opensesame_extensions/quick_switcher/quick_switcher.py | Python | gpl-3.0 | 2,188 |
"""
This code will fail at runtime...
Could you help `mypy` catching the problem at compile time?
"""
def sum_numbers(*n) -> float:
"""Sums up any number of numbers"""
return sum(n)
if __name__ == '__main__':
sum_numbers(1, 2.0) # this is not a bug
sum_numbers('4', 5) # this is a bug - can `mypy` catch it?!
| etingof/talks | pyvo-optional-static-typing/code/12-add-annotations.py | Python | gpl-3.0 | 331 |
"""
Microsoft Windows Help (HLP) parser for Hachoir project.
Documents:
- Windows Help File Format / Annotation File Format / SHG and MRB File Format
written by M. Winterhoff ([email protected])
found on http://www.wotsit.org/
Author: Victor Stinner
Creation date: 2007-09-03
"""
from hachoir_py3.parser import Parser
from hachoir_py3.field import (FieldSet,
Bits, Int32, UInt16, UInt32,
NullBytes, RawBytes, PaddingBytes, String)
from hachoir_py3.core.endian import LITTLE_ENDIAN
from hachoir_py3.core.text_handler import (textHandler, hexadecimal,
displayHandler, humanFilesize)
class FileEntry(FieldSet):
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = self["res_space"].value * 8
def createFields(self):
yield displayHandler(UInt32(self, "res_space", "Reserved space"), humanFilesize)
yield displayHandler(UInt32(self, "used_space", "Used space"), humanFilesize)
yield Bits(self, "file_flags", 8, "(=4)")
yield textHandler(UInt16(self, "magic"), hexadecimal)
yield Bits(self, "flags", 16)
yield displayHandler(UInt16(self, "page_size", "Page size in bytes"), humanFilesize)
yield String(self, "structure", 16, strip="\0", charset="ASCII")
yield NullBytes(self, "zero", 2)
yield UInt16(self, "nb_page_splits", "Number of page splits B+ tree has suffered")
yield UInt16(self, "root_page", "Page number of B+ tree root page")
yield PaddingBytes(self, "one", 2, pattern="\xFF")
yield UInt16(self, "nb_page", "Number of B+ tree pages")
yield UInt16(self, "nb_level", "Number of levels of B+ tree")
yield UInt16(self, "nb_entry", "Number of entries in B+ tree")
size = (self.size - self.current_size) // 8
if size:
yield PaddingBytes(self, "reserved_space", size)
class HlpFile(Parser):
PARSER_TAGS = {
"id": "hlp",
"category": "misc",
"file_ext": ("hlp",),
"min_size": 32,
"description": "Microsoft Windows Help (HLP)",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["magic"].value != 0x00035F3F:
return "Invalid magic"
if self["filesize"].value != self.stream.size // 8:
return "Invalid magic"
return True
def createFields(self):
yield textHandler(UInt32(self, "magic"), hexadecimal)
yield UInt32(self, "dir_start", "Directory start")
yield Int32(self, "first_free_block", "First free block")
yield UInt32(self, "filesize", "File size in bytes")
yield self.seekByte(self["dir_start"].value)
yield FileEntry(self, "file[]")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "end", size)
| SickGear/SickGear | lib/hachoir_py3/parser/misc/hlp.py | Python | gpl-3.0 | 2,932 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
import logging
from datetime import datetime
from typing import Any, ClassVar, Dict
import tinydb
logger = logging.getLogger(__name__)
class Migration(metaclass=abc.ABCMeta):
"""Migrates schema from <SCHEMA_VERSION-1> to <SCHEMA_VERSION>."""
SCHEMA_VERSION: ClassVar[int] = 0
def __init__(self, *, db: tinydb.TinyDB, snapcraft_version: str) -> None:
self.db = db
self._snapcraft_version = snapcraft_version
def _query_control_record(self) -> Dict[str, Any]:
"""Query control record (single document in 'control' table)."""
control_table = self.db.table("control")
control_records = control_table.all()
if len(control_records) == 0:
return dict(schema_version=0)
elif len(control_records) == 1:
return control_records[0]
raise RuntimeError(f"Invalid control records: {control_records!r}")
def _update_control_schema_version(self) -> None:
"""Update 'control' table record to SCHEMA_VERSION."""
control_record = self._query_control_record()
control_record["schema_version"] = self.SCHEMA_VERSION
control_table = self.db.table("control")
control_table.truncate()
control_table.insert(control_record)
def _record_migration(self) -> None:
"""Record migration in 'migration' table."""
migration_table = self.db.table("migration")
migration_table.insert(
{
"schema_version": self.SCHEMA_VERSION,
"snapcraft_version": self._snapcraft_version,
"timestamp": datetime.utcnow().isoformat() + "Z",
}
)
@abc.abstractmethod
def _migrate(self) -> None:
"""Per-migration implementation."""
...
def apply(self) -> int:
"""Apply migration, if determined to be necessary.
Returns current schema version."""
control_record = self._query_control_record()
current_schema_version = control_record["schema_version"]
if self.SCHEMA_VERSION <= current_schema_version:
logger.debug(
f"Migration apply: migration {self.SCHEMA_VERSION} already applied, ignoring..."
)
return current_schema_version
logger.debug(
f"Migration apply: applying migration for {self.SCHEMA_VERSION} for {control_record}"
)
self._migrate()
self._record_migration()
self._update_control_schema_version()
return self.SCHEMA_VERSION
class MigrationV1(Migration):
"""Default (Initial) Migration to v1."""
SCHEMA_VERSION: ClassVar[int] = 1
def _migrate_control(self) -> None:
control_table = self.db.table("control")
control_table.insert(
{
"created_with_snapcraft_version": self._snapcraft_version,
"schema_version": self.SCHEMA_VERSION,
}
)
def _migrate(self) -> None:
"""Per-migration implementation."""
self._migrate_control()
| chipaca/snapcraft | snapcraft/internal/db/migration.py | Python | gpl-3.0 | 3,728 |
from .utils.dataIO import fileIO
from .utils import checks
from __main__ import send_cmd_help
from __main__ import settings as bot_settings
# Sys.
from operator import itemgetter, attrgetter
import discord
from discord.ext import commands
#from copy import deepcopy
import aiohttp
import asyncio
import json
import os
import http.client
DIR_DATA = "data/omaps"
POINTER = DIR_DATA+"/pointer.png"
MAP = DIR_DATA+"/map.png"
class OpenStreetMaps:
"""The openstreetmap.org cog"""
def __init__(self,bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=False)
async def prevmap(self, ctx):
"""Resend the last openstreetmap.org result"""
user = ctx.message.author
channel = ctx.message.channel
if not fileIO(MAP, "check"):
await self.bot.say("` No previous map available.`")
else:
await self.bot.send_file(channel, MAP)
@commands.command(pass_context=True, no_pm=False)
async def maps(self, ctx, zoom, *country):
"""Search at openstreetmap.org\n
zoom: upclose, street, city, country, world
Type: 'none' to skip"""
user = ctx.message.author
channel = ctx.message.channel
country = "+".join(country)
longitude = 0.0
latitude = 0.0
adressNum = 1
limitResult = 0
# Set tile zoom
if zoom == 'upclose':
zoomMap = 18
elif zoom == 'street':
zoomMap = 16
elif zoom == 'city':
zoomMap = 11
elif zoom == 'country':
zoomMap = 8
elif zoom == 'world':
zoomMap = 2
else:
zoomMap = 16
# Get input data
search = country
await self.bot.say("` What city?`")
response = await self.bot.wait_for_message(author=ctx.message.author)
response = response.content.lower().strip().replace(" ", "+")
if response == "none":
pass
else:
search = search+","+response
#http://wiki.openstreetmap.org/wiki/Nominatim
await self.bot.say("` Enter your search term for the given location (building, company, address...) or type: none`")
response = await self.bot.wait_for_message(author=ctx.message.author)
response = response.content.lower().strip().replace(" ", "+")
if response == "none":
pass
else:
search = search+","+response
#print (search)
# Get xml result from openstreetmap.org
try:
domain = "nominatim.openstreetmap.org"
search = "/search?q={}&format=xml&polygon=1&addressdetails=1".format(search)
#print(domain+search)
conn = http.client.HTTPConnection(domain)
conn.request("GET", search)
r1 = conn.getresponse()
data = r1.read()
conn.close()
except Exception as e:
await self.bot.say("` Error getting GPS data.`")
print("Error getting GPS data.")
print(e)
return
try:
display_name = "-"
soup = BeautifulSoup(data, 'html.parser')
links = soup.findAll('place', lon=True)
results = len(links)
if results == 0:
await self.bot.say("`No results, try to rephrase`")
return
#print("results:\n"+str(results))
#print("display_name:\n"+display_name)
#print("longitude/latitude:\n"+str(longitude)+","+str(latitude))
except Exception as e:
await self.bot.say("`Something went wrong while parsing xml data...`")
print('parse XML failed')
print(e)
return
await self.bot.send_typing(channel)
if results > 1:
list = "```erlang\nResults\n-\n"
index = 0
for link in links:
index += 1
list = list + "(" +str(index) + "): "+ link["display_name"] + "\n"
list = list +"```` Enter result number...`"
await self.bot.say(list)
response = await self.bot.wait_for_message(author=ctx.message.author)
input = response.content.lower().strip()
# Set values for geotiler
try:
input = int(input)-1
except:
input = 0
place_id = (links[input]["place_id"])
display_name = (links[input]["display_name"])
longitude = (links[input]['lon'])
latitude = (links[input]['lat'])
else:
# Set values for geotiler
place_id = (links[0]["place_id"])
display_name = (links[0]["display_name"])
longitude = (links[0]['lon'])
latitude = (links[0]['lat'])
await self.bot.send_typing(channel)
#print([latitude, longitude, zoomMap])
map = geotiler.Map(center=(float(longitude), float(latitude)), zoom=zoomMap, size=(720, 720))
map.extent
image = await geotiler.render_map_async(map)
image.save(MAP)
await self.bot.send_typing(channel)
# Add pointer and text.
savedMap = Image(filename=MAP)
pointer = Image(filename=POINTER)
for o in COMPOSITE_OPERATORS:
w = savedMap.clone()
r = pointer.clone()
with Drawing() as draw:
draw.composite(operator='atop', left=311, top=311, width=90, height=90, image=r) #720
draw(w)
# Text
draw.fill_color = Color("#7289DA")
draw.stroke_color = Color("#5370D7")
draw.stroke_width = 0.3
draw.fill_opacity = 0.7
draw.stroke_opacity = 0.7
draw.font_style = 'oblique'
draw.font_size = 32
splitDisplayName = display_name.split(',')
# Object name/number
draw.text(x=20, y=35, body=splitDisplayName[0])
draw(w)
del splitDisplayName[0]
# Print location info on map.
line0 = ""
line1 = ""
draw.font_size = 18
for i in splitDisplayName:
if len(str(line0)) > 30:
line1 = line1 + i + ","
else:
line0 = line0 + i + ","
# line 0
if len(str(line0)) > 2:
draw.text(x=15, y=60, body=line0)
draw(w)
# line 1
if len(str(line1)) > 2:
draw.text(x=15, y=80, body=line1)
draw(w)
# Copyright Open Street Map
draw.fill_color = Color("#000000")
draw.stroke_color = Color("#333333")
draw.fill_opacity = 0.3
draw.stroke_opacity = 0.3
draw.font_style = 'normal'
draw.font_size = 14
draw.text(x=550, y=700, body="© OpenStreetMap.org") #720
draw(w)
w.save(filename=MAP)
await self.bot.send_file(channel, MAP)
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Set-up
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def check_folders():
if not os.path.exists(DIR_DATA):
print("Creating {} folder...".format(DIR_DATA))
os.makedirs(DIR_DATA)
def check_files():
if not os.path.isfile(POINTER):
print("pointer.png is missing!")
class ModuleNotFound(Exception):
def __init__(self, m):
self.message = m
def __str__(self):
return self.message
def setup(bot):
global geotiler
global Color, Drawing, display, Image, Color, Image, COMPOSITE_OPERATORS
global BeautifulSoup
check_folders()
check_files()
try:
import geotiler
except:
raise ModuleNotFound("geotiler is not installed. Do 'pip3 install geotiler --upgrade' to use this cog.")
try:
from bs4 import BeautifulSoup
except:
raise ModuleNotFound("BeautifulSoup is not installed. Do 'pip3 install BeautifulSoup --upgrade' to use this cog.")
try:
from wand.image import Image, COMPOSITE_OPERATORS
from wand.drawing import Drawing
from wand.display import display
from wand.image import Image
from wand.color import Color
except:
raise ModuleNotFound("Wand is not installed. Do 'pip3 install Wand --upgrade' and make sure you have ImageMagick installed http://docs.wand-py.org/en/0.4.2/guide/install.html")
bot.add_cog(OpenStreetMaps(bot))
| Canule/Mash-Cogs | omaps/omaps.py | Python | gpl-3.0 | 8,950 |
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansSylotiNagri-Regular'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x0039) #glyph00057
glyphs.append(0x0034) #uniA82A
glyphs.append(0x0035) #uniA82B
glyphs.append(0x0036) #glyph00054
glyphs.append(0x0040) #glyph00064
glyphs.append(0x0053) #uni09EE
glyphs.append(0x0038) #glyph00056
glyphs.append(0x0015) #uniA80B
glyphs.append(0x0016) #uniA80C
glyphs.append(0x003D) #glyph00061
glyphs.append(0x0014) #uniA80A
glyphs.append(0x0019) #uniA80F
glyphs.append(0x0037) #glyph00055
glyphs.append(0x0017) #uniA80D
glyphs.append(0x0018) #uniA80E
glyphs.append(0x0032) #uniA828
glyphs.append(0x0001) #uniFEFF
glyphs.append(0x004D) #uni09E8
glyphs.append(0x0054) #uni09EF
glyphs.append(0x0048) #uni2055
glyphs.append(0x0050) #uni09EB
glyphs.append(0x0002) #uni000D
glyphs.append(0x0051) #uni09EC
glyphs.append(0x0052) #uni09ED
glyphs.append(0x002C) #uniA822
glyphs.append(0x0049) #uni0964
glyphs.append(0x004A) #uni0965
glyphs.append(0x003E) #glyph00062
glyphs.append(0x0042) #glyph00066
glyphs.append(0x002D) #uniA823
glyphs.append(0x0023) #uniA819
glyphs.append(0x0022) #uniA818
glyphs.append(0x0033) #uniA829
glyphs.append(0x0043) #glyph00067
glyphs.append(0x001F) #uniA815
glyphs.append(0x001E) #uniA814
glyphs.append(0x0021) #uniA817
glyphs.append(0x0020) #uniA816
glyphs.append(0x001B) #uniA811
glyphs.append(0x001A) #uniA810
glyphs.append(0x001D) #uniA813
glyphs.append(0x001C) #uniA812
glyphs.append(0x0047) #glyph00071
glyphs.append(0x0041) #glyph00065
glyphs.append(0x004C) #uni09E7
glyphs.append(0x0044) #glyph00068
glyphs.append(0x0045) #glyph00069
glyphs.append(0x0028) #uniA81E
glyphs.append(0x0027) #uniA81D
glyphs.append(0x0003) #uni00A0
glyphs.append(0x0029) #uniA81F
glyphs.append(0x0024) #uniA81A
glyphs.append(0x003F) #glyph00063
glyphs.append(0x0026) #uniA81C
glyphs.append(0x0025) #uniA81B
glyphs.append(0x0005) #uni200C
glyphs.append(0x0004) #uni200B
glyphs.append(0x003B) #glyph00059
glyphs.append(0x0006) #uni200D
glyphs.append(0x003A) #glyph00058
glyphs.append(0x004E) #uni09E9
glyphs.append(0x002F) #uniA825
glyphs.append(0x0007) #uni2010
glyphs.append(0x0008) #uni2011
glyphs.append(0x004B) #uni09E6
glyphs.append(0x0009) #uni25CC
glyphs.append(0x004F) #uni09EA
glyphs.append(0x003C) #glyph00060
glyphs.append(0x0046) #glyph00070
glyphs.append(0x002A) #uniA820
glyphs.append(0x002B) #uniA821
glyphs.append(0x0012) #uniA808
glyphs.append(0x0013) #uniA809
glyphs.append(0x002E) #uniA824
glyphs.append(0x0000) #.notdef
glyphs.append(0x0030) #uniA826
glyphs.append(0x0031) #uniA827
glyphs.append(0x000C) #uniA802
glyphs.append(0x000D) #uniA803
glyphs.append(0x000A) #uniA800
glyphs.append(0x000B) #uniA801
glyphs.append(0x0010) #uniA806
glyphs.append(0x0011) #uniA807
glyphs.append(0x000E) #uniA804
glyphs.append(0x000F) #uniA805
return glyphs
| davelab6/pyfontaine | fontaine/charsets/noto_glyphs/notosanssylotinagri_regular.py | Python | gpl-3.0 | 3,639 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.queries import get_property_name
from jx_sqlite.utils import GUID, untyped_column
from mo_dots import concat_field, relative_field, set_default, startswith_field
from mo_json import EXISTS, OBJECT, STRUCT
from mo_logs import Log
class Schema(object):
"""
A Schema MAPS ALL COLUMNS IN SNOWFLAKE FROM THE PERSPECTIVE OF A SINGLE TABLE (a nested_path)
"""
def __init__(self, nested_path, snowflake):
if nested_path[-1] != '.':
Log.error("Expecting full nested path")
self.path = concat_field(snowflake.fact_name, nested_path[0])
self.nested_path = nested_path
self.snowflake = snowflake
# def add(self, column_name, column):
# if column_name != column.names[self.nested_path[0]]:
# Log.error("Logic error")
#
# self.columns.append(column)
#
# for np in self.nested_path:
# rel_name = column.names[np]
# container = self.namespace.setdefault(rel_name, set())
# hidden = [
# c
# for c in container
# if len(c.nested_path[0]) < len(np)
# ]
# for h in hidden:
# container.remove(h)
#
# container.add(column)
#
# container = self.namespace.setdefault(column.es_column, set())
# container.add(column)
# def remove(self, column_name, column):
# if column_name != column.names[self.nested_path[0]]:
# Log.error("Logic error")
#
# self.namespace[column_name] = [c for c in self.namespace[column_name] if c != column]
def __getitem__(self, item):
output = self.snowflake.namespace.columns.find(self.path, item)
return output
# def __copy__(self):
# output = Schema(self.nested_path)
# for k, v in self.namespace.items():
# output.namespace[k] = copy(v)
# return output
def get_column_name(self, column):
"""
RETURN THE COLUMN NAME, FROM THE PERSPECTIVE OF THIS SCHEMA
:param column:
:return: NAME OF column
"""
relative_name = relative_field(column.name, self.nested_path[0])
return get_property_name(relative_name)
@property
def namespace(self):
return self.snowflake.namespace
def keys(self):
"""
:return: ALL COLUMN NAMES
"""
return set(c.name for c in self.columns)
@property
def columns(self):
return self.snowflake.namespace.columns.find(self.snowflake.fact_name)
def column(self, prefix):
full_name = untyped_column(concat_field(self.nested_path, prefix))
return set(
c
for c in self.snowflake.namespace.columns.find(self.snowflake.fact_name)
for k, t in [untyped_column(c.name)]
if k == full_name and k != GUID
if c.jx_type not in [OBJECT, EXISTS]
)
def leaves(self, prefix):
full_name = concat_field(self.nested_path, prefix)
return set(
c
for c in self.snowflake.namespace.columns.find(self.snowflake.fact_name)
for k in [c.name]
if startswith_field(k, full_name) and k != GUID or k == full_name
if c.jx_type not in [OBJECT, EXISTS]
)
def map_to_sql(self, var=""):
"""
RETURN A MAP FROM THE RELATIVE AND ABSOLUTE NAME SPACE TO COLUMNS
"""
origin = self.nested_path[0]
if startswith_field(var, origin) and origin != var:
var = relative_field(var, origin)
fact_dict = {}
origin_dict = {}
for k, cs in self.namespace.items():
for c in cs:
if c.jx_type in STRUCT:
continue
if startswith_field(get_property_name(k), var):
origin_dict.setdefault(c.names[origin], []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(c.name, []).append(c)
elif origin == var:
origin_dict.setdefault(concat_field(var, c.names[origin]), []).append(c)
if origin != c.nested_path[0]:
fact_dict.setdefault(concat_field(var, c.name), []).append(c)
return set_default(origin_dict, fact_dict)
| klahnakoski/TestLog-ETL | vendor/jx_sqlite/schema.py | Python | mpl-2.0 | 4,659 |
"""bug 993786 - update_crash_adu_by_build_signature-bad-buildids
Revision ID: 21e4e35689f6
Revises: 224f0fda6ecb
Create Date: 2014-04-08 18:46:19.755028
"""
# revision identifiers, used by Alembic.
revision = '21e4e35689f6'
down_revision = '224f0fda6ecb'
from alembic import op
from socorrolib.lib import citexttype, jsontype, buildtype
from socorrolib.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
load_stored_proc(op, ['update_crash_adu_by_build_signature.sql'])
def downgrade():
load_stored_proc(op, ['update_crash_adu_by_build_signature.sql'])
| KaiRo-at/socorro | alembic/versions/21e4e35689f6_bug_993786_update_crash_adu_by_build_.py | Python | mpl-2.0 | 728 |
import frappe
from frappe.utils import get_fullname
def execute():
for user_id in frappe.db.sql_list("""select distinct user_id from `tabEmployee`
where ifnull(user_id, '')!=''
group by user_id having count(name) > 1"""):
fullname = get_fullname(user_id)
employee = frappe.db.get_value("Employee", {"employee_name": fullname, "user_id": user_id})
if employee:
frappe.db.sql("""update `tabEmployee` set user_id=null
where user_id=%s and name!=%s""", (user_id, employee)) | gangadhar-kadam/hrerp | erpnext/patches/4_0/fix_employee_user_id.py | Python | agpl-3.0 | 491 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from openerp.exceptions import AccessError
##############################################################################
#
# OLD API
#
##############################################################################
from openerp.osv import osv, fields
class res_partner(osv.Model):
_inherit = 'res.partner'
#
# add related fields to test them
#
_columns = {
# a regular one
'related_company_partner_id': fields.related(
'company_id', 'partner_id', type='many2one', obj='res.partner'),
# a related field with a single field
'single_related_company_id': fields.related(
'company_id', type='many2one', obj='res.company'),
# a related field with a single field that is also a related field!
'related_related_company_id': fields.related(
'single_related_company_id', type='many2one', obj='res.company'),
}
class TestFunctionCounter(osv.Model):
_name = 'test_old_api.function_counter'
def _compute_cnt(self, cr, uid, ids, fname, arg, context=None):
res = {}
for cnt in self.browse(cr, uid, ids, context=context):
res[cnt.id] = cnt.access and cnt.cnt + 1 or 0
return res
_columns = {
'access': fields.datetime('Datetime Field'),
'cnt': fields.function(
_compute_cnt, type='integer', string='Function Field', store=True),
}
class TestFunctionNoInfiniteRecursion(osv.Model):
_name = 'test_old_api.function_noinfiniterecursion'
def _compute_f1(self, cr, uid, ids, fname, arg, context=None):
res = {}
for tf in self.browse(cr, uid, ids, context=context):
res[tf.id] = 'create' in tf.f0 and 'create' or 'write'
cntobj = self.pool['test_old_api.function_counter']
cnt_id = self.pool['ir.model.data'].xmlid_to_res_id(
cr, uid, 'test_new_api.c1')
cntobj.write(
cr, uid, cnt_id, {'access': datetime.datetime.now()},
context=context)
return res
_columns = {
'f0': fields.char('Char Field'),
'f1': fields.function(
_compute_f1, type='char', string='Function Field', store=True),
}
##############################################################################
#
# NEW API
#
##############################################################################
from openerp import models, fields, api, _
class Category(models.Model):
_name = 'test_new_api.category'
name = fields.Char(required=True)
parent = fields.Many2one('test_new_api.category')
display_name = fields.Char(compute='_compute_display_name', inverse='_inverse_display_name')
discussions = fields.Many2many('test_new_api.discussion', 'test_new_api_discussion_category',
'category', 'discussion')
@api.one
@api.depends('name', 'parent.display_name') # this definition is recursive
def _compute_display_name(self):
if self.parent:
self.display_name = self.parent.display_name + ' / ' + self.name
else:
self.display_name = self.name
@api.one
def _inverse_display_name(self):
names = self.display_name.split('/')
# determine sequence of categories
categories = []
for name in names[:-1]:
category = self.search([('name', 'ilike', name.strip())])
categories.append(category[0])
categories.append(self)
# assign parents following sequence
for parent, child in zip(categories, categories[1:]):
if parent and child:
child.parent = parent
# assign name of last category, and reassign display_name (to normalize it)
self.name = names[-1].strip()
def read(self, fields=None, load='_classic_read'):
if self.search_count([('id', 'in', self._ids), ('name', '=', 'NOACCESS')]):
raise AccessError('Sorry')
return super(Category, self).read(fields, load)
class Discussion(models.Model):
_name = 'test_new_api.discussion'
name = fields.Char(string='Title', required=True,
help="General description of what this discussion is about.")
moderator = fields.Many2one('res.users')
categories = fields.Many2many('test_new_api.category',
'test_new_api_discussion_category', 'discussion', 'category')
participants = fields.Many2many('res.users')
messages = fields.One2many('test_new_api.message', 'discussion')
message_changes = fields.Integer(string='Message changes')
important_messages = fields.One2many('test_new_api.message', 'discussion',
domain=[('important', '=', True)])
@api.onchange('moderator')
def _onchange_moderator(self):
self.participants |= self.moderator
@api.onchange('messages')
def _onchange_messages(self):
self.message_changes = len(self.messages)
class Message(models.Model):
_name = 'test_new_api.message'
discussion = fields.Many2one('test_new_api.discussion', ondelete='cascade')
body = fields.Text()
author = fields.Many2one('res.users', default=lambda self: self.env.user)
name = fields.Char(string='Title', compute='_compute_name', store=True)
display_name = fields.Char(string='Abstract', compute='_compute_display_name')
size = fields.Integer(compute='_compute_size', search='_search_size')
double_size = fields.Integer(compute='_compute_double_size')
discussion_name = fields.Char(related='discussion.name')
author_partner = fields.Many2one(
'res.partner', compute='_compute_author_partner',
search='_search_author_partner')
important = fields.Boolean()
@api.one
@api.constrains('author', 'discussion')
def _check_author(self):
if self.discussion and self.author not in self.discussion.participants:
raise ValueError(_("Author must be among the discussion participants."))
@api.one
@api.depends('author.name', 'discussion.name')
def _compute_name(self):
self.name = "[%s] %s" % (self.discussion.name or '', self.author.name or '')
@api.one
@api.depends('author.name', 'discussion.name', 'body')
def _compute_display_name(self):
stuff = "[%s] %s: %s" % (self.author.name, self.discussion.name or '', self.body or '')
self.display_name = stuff[:80]
@api.one
@api.depends('body')
def _compute_size(self):
self.size = len(self.body or '')
def _search_size(self, operator, value):
if operator not in ('=', '!=', '<', '<=', '>', '>=', 'in', 'not in'):
return []
# retrieve all the messages that match with a specific SQL query
query = """SELECT id FROM "%s" WHERE char_length("body") %s %%s""" % \
(self._table, operator)
self.env.cr.execute(query, (value,))
ids = [t[0] for t in self.env.cr.fetchall()]
return [('id', 'in', ids)]
@api.one
@api.depends('size')
def _compute_double_size(self):
# This illustrates a subtle situation: self.double_size depends on
# self.size. When size is computed, self.size is assigned, which should
# normally invalidate self.double_size. However, this may not happen
# while self.double_size is being computed: the last statement below
# would fail, because self.double_size would be undefined.
self.double_size = 0
size = self.size
self.double_size = self.double_size + size
@api.one
@api.depends('author', 'author.partner_id')
def _compute_author_partner(self):
self.author_partner = author.partner_id
@api.model
def _search_author_partner(self, operator, value):
return [('author.partner_id', operator, value)]
class Multi(models.Model):
""" Model for testing multiple onchange methods in cascade that modify a
one2many field several times.
"""
_name = 'test_new_api.multi'
name = fields.Char(related='partner.name', readonly=True)
partner = fields.Many2one('res.partner')
lines = fields.One2many('test_new_api.multi.line', 'multi')
@api.onchange('name')
def _onchange_name(self):
for line in self.lines:
line.name = self.name
@api.onchange('partner')
def _onchange_partner(self):
for line in self.lines:
line.partner = self.partner
class MultiLine(models.Model):
_name = 'test_new_api.multi.line'
multi = fields.Many2one('test_new_api.multi', ondelete='cascade')
name = fields.Char()
partner = fields.Many2one('res.partner')
class MixedModel(models.Model):
_name = 'test_new_api.mixed'
number = fields.Float(digits=(10, 2), default=3.14)
date = fields.Date()
now = fields.Datetime(compute='_compute_now')
lang = fields.Selection(string='Language', selection='_get_lang')
reference = fields.Reference(string='Related Document',
selection='_reference_models')
@api.one
def _compute_now(self):
# this is a non-stored computed field without dependencies
self.now = fields.Datetime.now()
@api.model
def _get_lang(self):
langs = self.env['res.lang'].search([])
return [(lang.code, lang.name) for lang in langs]
@api.model
def _reference_models(self):
models = self.env['ir.model'].search([('state', '!=', 'manual')])
return [(model.model, model.name)
for model in models
if not model.model.startswith('ir.')]
class BoolModel(models.Model):
_name = 'domain.bool'
bool_true = fields.Boolean('b1', default=True)
bool_false = fields.Boolean('b2', default=False)
bool_undefined = fields.Boolean('b3')
| songmonit/CTTMSONLINE_V8 | openerp/addons/test_new_api/models.py | Python | agpl-3.0 | 10,759 |
from django.core.exceptions import MultipleObjectsReturned
from django.shortcuts import redirect
from django.urls import reverse, path
from wagtail.api.v2.router import WagtailAPIRouter
from wagtail.api.v2.views import PagesAPIViewSet, BaseAPIViewSet
from wagtail.images.api.v2.views import ImagesAPIViewSet
from wagtail.documents.api.v2.views import DocumentsAPIViewSet
class OpenstaxPagesAPIEndpoint(PagesAPIViewSet):
"""
OpenStax custom Pages API endpoint that allows finding pages and books by pk or slug
"""
def detail_view(self, request, pk=None, slug=None):
param = pk
if slug is not None:
self.lookup_field = 'slug'
param = slug
try:
return super().detail_view(request, param)
except MultipleObjectsReturned:
# Redirect to the listing view, filtered by the relevant slug
# The router is registered with the `wagtailapi` namespace,
# `pages` is our endpoint namespace and `listing` is the listing view url name.
return redirect(
reverse('wagtailapi:pages:listing') + f'?{self.lookup_field}={param}'
)
@classmethod
def get_urlpatterns(cls):
"""
This returns a list of URL patterns for the endpoint
"""
return [
path('', cls.as_view({'get': 'listing_view'}), name='listing'),
path('<int:pk>/', cls.as_view({'get': 'detail_view'}), name='detail'),
path('<slug:slug>/', cls.as_view({'get': 'detail_view'}), name='detail'),
path('find/', cls.as_view({'get': 'find_view'}), name='find'),
]
class OpenStaxImagesAPIViewSet(ImagesAPIViewSet):
meta_fields = BaseAPIViewSet.meta_fields + ['tags', 'download_url', 'height', 'width']
nested_default_fields = BaseAPIViewSet.nested_default_fields + ['title', 'download_url', 'height', 'width']
# Create the router. “wagtailapi” is the URL namespace
api_router = WagtailAPIRouter('wagtailapi')
# Add the three endpoints using the "register_endpoint" method.
# The first parameter is the name of the endpoint (eg. pages, images). This
# is used in the URL of the endpoint
# The second parameter is the endpoint class that handles the requests
api_router.register_endpoint('pages', OpenstaxPagesAPIEndpoint)
api_router.register_endpoint('images', OpenStaxImagesAPIViewSet)
api_router.register_endpoint('documents', DocumentsAPIViewSet)
| openstax/openstax-cms | openstax/api.py | Python | agpl-3.0 | 2,453 |
def get_ip_address(request):
ip_address = request.META.get('HTTP_X_FORWARDED_FOR')
if ip_address:
return ip_address.split(',')[-1]
return request.META.get('REMOTE_ADDR')
| jessamynsmith/boards-backend | blimp_boards/utils/request.py | Python | agpl-3.0 | 192 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.http.response import HttpResponseNotFound
from shuup.xtheme._theme import get_current_theme
_VIEW_CACHE = {}
def clear_view_cache(**kwargs):
_VIEW_CACHE.clear()
setting_changed.connect(clear_view_cache, dispatch_uid="shuup.xtheme.views.extra.clear_view_cache")
def _get_view_by_name(theme, view_name):
view = theme.get_view(view_name)
if hasattr(view, "as_view"): # Handle CBVs
view = view.as_view()
if view and not callable(view):
raise ImproperlyConfigured("View %r not callable" % view)
return view
def get_view_by_name(theme, view_name):
if not theme:
return None
cache_key = (theme.identifier, view_name)
if cache_key not in _VIEW_CACHE:
view = _get_view_by_name(theme, view_name)
_VIEW_CACHE[cache_key] = view
else:
view = _VIEW_CACHE[cache_key]
return view
def extra_view_dispatch(request, view):
"""
Dispatch to an Xtheme extra view.
:param request: A request
:type request: django.http.HttpRequest
:param view: View name
:type view: str
:return: A response of some ilk
:rtype: django.http.HttpResponse
"""
theme = get_current_theme(request)
view_func = get_view_by_name(theme, view)
if not view_func:
msg = "%s/%s: Not found" % (getattr(theme, "identifier", None), view)
return HttpResponseNotFound(msg)
return view_func(request)
| hrayr-artunyan/shuup | shuup/xtheme/views/extra.py | Python | agpl-3.0 | 1,769 |
#!/usr/bin/env python
# Shellscript to verify r.gwflow calculation, this calculation is based on
# the example at page 167 of the following book:
# author = "Kinzelbach, W. and Rausch, R.",
# title = "Grundwassermodellierung",
# publisher = "Gebr{\"u}der Borntraeger (Berlin, Stuttgart)",
# year = "1995"
#
import sys
import os
import grass.script as grass
# Overwrite existing maps
grass.run_command("g.gisenv", set="OVERWRITE=1")
grass.message(_("Set the region"))
# The area is 2000m x 1000m with a cell size of 25m x 25m
grass.run_command("g.region", res=50, n=950, s=0, w=0, e=2000)
grass.run_command("r.mapcalc", expression="phead= if(row() == 19, 5, 3)")
grass.run_command("r.mapcalc", expression="status=if((col() == 1 && row() == 13) ||\
(col() == 1 && row() == 14) ||\
(col() == 2 && row() == 13) ||\
(col() == 2 && row() == 14) ||\
(row() == 19), 2, 1)")
grass.run_command("r.mapcalc", expression="hydcond=0.001")
grass.run_command("r.mapcalc", expression="recharge=0.000000006")
grass.run_command("r.mapcalc", expression="top=20")
grass.run_command("r.mapcalc", expression="bottom=0")
grass.run_command("r.mapcalc", expression="syield=0.001")
grass.run_command("r.mapcalc", expression="null=0.0")
#compute a steady state groundwater flow
grass.run_command("r.gwflow", "f", solver="cholesky", top="top", bottom="bottom", phead="phead", \
status="status", hc_x="hydcond", hc_y="hydcond", s="syield", \
recharge="recharge", output="gwresult", dt=864000000000, type="unconfined", budget="water_budget")
| AsherBond/MondocosmOS | grass_trunk/raster/r.gwflow/valid_calc_excavation.py | Python | agpl-3.0 | 1,561 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.core.exceptions import ImproperlyConfigured
from django.utils.http import urlquote
from userena.utils import get_gravatar, generate_sha1, get_protocol
from userena.managers import UserenaManager, UserenaBaseProfileManager
from userena import settings as userena_settings
from guardian.shortcuts import get_perms
from guardian.shortcuts import assign
from easy_thumbnails.fields import ThumbnailerImageField
import datetime
import random
import hashlib
PROFILE_PERMISSIONS = (
('view_profile', 'Can view profile'),
)
def upload_to_mugshot(instance, filename):
"""
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.id)
return '%(path)s%(hash)s.%(extension)s' % {'path': userena_settings.USERENA_MUGSHOT_PATH,
'hash': hash[:10],
'extension': extension}
class UserenaSignup(models.Model):
"""
Userena model which stores all the necessary information to have a full
functional user implementation on your Django website.
"""
user = models.OneToOneField(User,
verbose_name=_('user'),
related_name='userena_signup')
last_active = models.DateTimeField(_('last active'),
blank=True,
null=True,
help_text=_('The last date that the user was active.'))
activation_key = models.CharField(_('activation key'),
max_length=40,
blank=True)
activation_notification_send = models.BooleanField(_('notification send'),
default=False,
help_text=_('Designates whether this user has already got a notification about activating their account.'))
email_unconfirmed = models.EmailField(_('unconfirmed email address'),
blank=True,
help_text=_('Temporary email address when the user requests an email change.'))
email_confirmation_key = models.CharField(_('unconfirmed email verification key'),
max_length=40,
blank=True)
email_confirmation_key_created = models.DateTimeField(_('creation date of email confirmation key'),
blank=True,
null=True)
objects = UserenaManager()
class Meta:
verbose_name = _('userena registration')
verbose_name_plural = _('userena registrations')
def __unicode__(self):
return '%s' % self.user.username
def change_email(self, email):
"""
Changes the email address for a user.
A user needs to verify this new email address before it becomes
active. By storing the new email address in a temporary field --
``temporary_email`` -- we are able to set this email address after the
user has verified it by clicking on the verification URI in the email.
This email gets send out by ``send_verification_email``.
:param email:
The new email address that the user wants to use.
"""
self.email_unconfirmed = email
salt, hash = generate_sha1(self.user.username)
self.email_confirmation_key = hash
self.email_confirmation_key_created = timezone.now()
self.save()
# Send email for activation
self.send_confirmation_email()
def send_confirmation_email(self):
"""
Sends an email to confirm the new email address.
This method sends out two emails. One to the new email address that
contains the ``email_confirmation_key`` which is used to verify this
this email address with :func:`UserenaUser.objects.confirm_email`.
The other email is to the old email address to let the user know that
a request is made to change this email address.
"""
context= {'user': self.user,
'new_email': self.email_unconfirmed,
'protocol': get_protocol(),
'confirmation_key': self.email_confirmation_key,
'site': Site.objects.get_current()}
# Email to the old address
subject_old = render_to_string('accounts/emails/confirmation_email_subject_old.txt',
context)
subject_old = ''.join(subject_old.splitlines())
message_old = render_to_string('accounts/emails/confirmation_email_message_old.txt',
context)
send_mail(subject_old,
message_old,
settings.DEFAULT_FROM_EMAIL,
[self.user.email])
# Email to the new address
subject_new = render_to_string('accounts/emails/confirmation_email_subject_new.txt',
context)
subject_new = ''.join(subject_new.splitlines())
message_new = render_to_string('accounts/emails/confirmation_email_message_new.txt',
context)
send_mail(subject_new,
message_new,
settings.DEFAULT_FROM_EMAIL,
[self.email_unconfirmed,])
def activation_key_expired(self):
"""
Checks if activation key is expired.
Returns ``True`` when the ``activation_key`` of the user is expired and
``False`` if the key is still valid.
The key is expired when it's set to the value defined in
``USERENA_ACTIVATED`` or ``activation_key_created`` is beyond the
amount of days defined in ``USERENA_ACTIVATION_DAYS``.
"""
expiration_days = datetime.timedelta(days=userena_settings.USERENA_ACTIVATION_DAYS)
expiration_date = self.user.date_joined + expiration_days
if self.activation_key == userena_settings.USERENA_ACTIVATED:
return True
if timezone.now() >= expiration_date:
return True
return False
def send_activation_email(self, auto_join_secret = False):
"""
Sends a activation email to the user.
This email is send when the user wants to activate their newly created
user.
"""
if not auto_join_secret:
activation_url = reverse('userena_activate', args=(self.user.username, self.activation_key))
else:
if isinstance(auto_join_secret, basestring):
auto_join_key = auto_join_secret
else:
auto_join_key = hashlib.md5(self.activation_key +
settings.AGORA_API_AUTO_ACTIVATION_SECRET).hexdigest()
activation_url = reverse('auto_join_activate', args=(self.user.username, auto_join_key))
context= {'user': self.user,
'protocol': get_protocol(),
'activation_days': userena_settings.USERENA_ACTIVATION_DAYS,
'activation_url': activation_url,
'site': Site.objects.get_current()}
subject = render_to_string('accounts/emails/activation_email_subject.txt',
context)
subject = ''.join(subject.splitlines())
message = render_to_string('accounts/emails/activation_email_message.txt',
context)
send_mail(subject,
message,
settings.DEFAULT_FROM_EMAIL,
[self.user.email,])
class UserenaBaseProfile(models.Model):
""" Base model needed for extra profile functionality """
PRIVACY_CHOICES = (
('open', _('Open')),
('registered', _('Registered')),
('closed', _('Closed')),
)
MUGSHOT_SETTINGS = {'size': (userena_settings.USERENA_MUGSHOT_SIZE,
userena_settings.USERENA_MUGSHOT_SIZE),
'crop': 'smart'}
mugshot = ThumbnailerImageField(_('mugshot'),
blank=True,
upload_to=upload_to_mugshot,
resize_source=MUGSHOT_SETTINGS,
help_text=_('A personal image displayed in your profile.'))
privacy = models.CharField(_('privacy'),
max_length=15,
choices=PRIVACY_CHOICES,
default=userena_settings.USERENA_DEFAULT_PRIVACY,
help_text = _('Designates who can view your profile.'))
objects = UserenaBaseProfileManager()
class Meta:
"""
Meta options making the model abstract and defining permissions.
The model is ``abstract`` because it only supplies basic functionality
to a more custom defined model that extends it. This way there is not
another join needed.
We also define custom permissions because we don't know how the model
that extends this one is going to be called. So we don't know what
permissions to check. For ex. if the user defines a profile model that
is called ``MyProfile``, than the permissions would be
``add_myprofile`` etc. We want to be able to always check
``add_profile``, ``change_profile`` etc.
"""
abstract = True
permissions = PROFILE_PERMISSIONS
def __unicode__(self):
return 'Profile of %(username)s' % {'username': self.user.username}
def get_mugshot_url(self, custom_size = userena_settings.USERENA_MUGSHOT_SIZE):
"""
Returns the image containing the mugshot for the user.
The mugshot can be a uploaded image or a Gravatar.
Gravatar functionality will only be used when
``USERENA_MUGSHOT_GRAVATAR`` is set to ``True``.
:return:
``None`` when Gravatar is not used and no default image is supplied
by ``USERENA_MUGSHOT_DEFAULT``.
"""
# First check for a mugshot and if any return that.
if self.mugshot:
return settings.MEDIA_URL +\
settings.MUGSHOTS_DIR +\
self.mugshot.name.split("/")[-1]
# Use Gravatar if the user wants to.
if userena_settings.USERENA_MUGSHOT_GRAVATAR:
if userena_settings.USERENA_MUGSHOT_DEFAULT == 'blank-unitials-ssl':
d = 'https://unitials.com/mugshot/%s/%s.png' % (
custom_size, self.get_initials()
)
elif userena_settings.USERENA_MUGSHOT_DEFAULT == 'blank-unitials':
d = 'http://unitials.com/mugshot/%s/%s.png' % (
custom_size, self.get_initials()
)
return get_gravatar(self.user.email, custom_size, d)
# Gravatar not used, check for a default image.
else:
if userena_settings.USERENA_MUGSHOT_DEFAULT not in ['404', 'mm',
'identicon',
'monsterid',
'wavatar',
'blank']:
return userena_settings.USERENA_MUGSHOT_DEFAULT
else: return None
def get_full_name_or_username(self):
"""
Returns the full name of the user, or if none is supplied will return
the username.
Also looks at ``USERENA_WITHOUT_USERNAMES`` settings to define if it
should return the username or email address when the full name is not
supplied.
:return:
``String`` containing the full name of the user. If no name is
supplied it will return the username or email address depending on
the ``USERENA_WITHOUT_USERNAMES`` setting.
"""
user = self.user
if user.first_name or user.last_name:
# We will return this as translated string. Maybe there are some
# countries that first display the last name.
name = _("%(first_name)s %(last_name)s") % \
{'first_name': user.first_name,
'last_name': user.last_name}
else:
# Fallback to the username if usernames are used
if not userena_settings.USERENA_WITHOUT_USERNAMES:
name = "%(username)s" % {'username': user.username}
else:
name = "%(email)s" % {'email': user.email}
return name.strip()
def can_view_profile(self, user):
"""
Can the :class:`User` view this profile?
Returns a boolean if a user has the rights to view the profile of this
user.
Users are divided into four groups:
``Open``
Everyone can view your profile
``Closed``
Nobody can view your profile.
``Registered``
Users that are registered on the website and signed
in only.
``Admin``
Special cases like superadmin and the owner of the profile.
Through the ``privacy`` field a owner of an profile can define what
they want to show to whom.
:param user:
A Django :class:`User` instance.
"""
# Simple cases first, we don't want to waste CPU and DB hits.
# Everyone.
if self.privacy == 'open': return True
# Registered users.
elif self.privacy == 'registered' and isinstance(user, User):
return True
# Checks done by guardian for owner and admins.
elif 'view_profile' in get_perms(user, self):
return True
# Fallback to closed profile.
return False
class UserenaLanguageBaseProfile(UserenaBaseProfile):
"""
Extends the :class:`UserenaBaseProfile` with a language choice.
Use this model in combination with ``UserenaLocaleMiddleware`` automatically
set the language of users when they are signed in.
"""
language = models.CharField(_('language'),
max_length=5,
choices=settings.LANGUAGES,
default=settings.LANGUAGE_CODE[:2])
class Meta:
abstract = True
permissions = PROFILE_PERMISSIONS
| pirata-cat/agora-ciudadana | userena/models.py | Python | agpl-3.0 | 15,318 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2013-11-25
@author: Martin H. Bramwell
'''
import oerplib
import sys
import socket
from models.OErpModel import OErpModel
class OpenERP(object):
def __init__(self, credentials):
db = credentials['db_name']
user_id = credentials['user_id']
host_name = credentials['host_name']
host_port = credentials['host_port']
user_pwd = credentials['user_pwd']
print "Getting connection to {} for {}".format(db, user_id)
try:
oerp = oerplib.OERP(
server=host_name, protocol='xmlrpc', port=host_port)
OErpModel.openErpConnection['super'] = oerp
if db in oerp.db.list():
db_connect(user_id, user_pwd, db)
else:
print "There is no database called : {}".format(db)
except socket.gaierror:
sys.exit(
"Is this the correct URL : {}".format(host_name))
except socket.error:
sys.exit(
"Is this the correct port number : {}".format(host_port))
def db_connect(usr, pwd, db):
oerp = OErpModel.openErpConnection['super']
user = oerp.login(user=usr, database=db, passwd=pwd)
OErpModel.openErpConnection['admin'] = user
# print " - - {} - - ".format(user)
| martinhbramwell/GData_OpenERP_Data_Pump | openerp_utils.py | Python | agpl-3.0 | 1,342 |
#!/usr/bin/env python3
"""Test behaviour of the test running and the term program interaction."""
import sys
import pexpect
from testrunner import run
def _shellping(child, timeout=1):
"""Issue a 'shellping' command.
Raises a pexpect exception on failure.
:param timeout: timeout for the answer
"""
child.sendline('shellping')
child.expect_exact('shellpong\r\n', timeout=timeout)
def _wait_shell_ready(child, numtries=5):
"""Wait until the shell is ready by using 'shellping'."""
for _ in range(numtries - 1):
try:
_shellping(child)
except pexpect.TIMEOUT:
pass
else:
break
else:
# This one should fail
_shellping(child)
def _test_no_local_echo(child):
"""Verify that there is not local echo while testing."""
msg = 'true this should not be echoed'
child.sendline(msg)
res = child.expect_exact([pexpect.TIMEOUT, msg], timeout=1)
assert res == 0, "There should have been a timeout and not match stdin"
def testfunc(child):
"""Run some tests to verify the board under test behaves correctly.
It currently tests:
* local echo
"""
child.expect_exact("Running 'tests_tools' application")
_wait_shell_ready(child)
# Verify there is no local and remote echo as it is disabled
_test_no_local_echo(child)
# The node should still answer after the previous one
_shellping(child)
if __name__ == "__main__":
sys.exit(run(testfunc))
| cladmi/RIOT | tests/test_tools/tests/01-run.py | Python | lgpl-2.1 | 1,512 |
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
##
## $QT_END_LICENSE$
##
#############################################################################
import os, sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class CustomWidget(QWidget):
def __init__(self, parent, fake = False):
QWidget.__init__(self, parent)
gradient = QLinearGradient(QPointF(0, 0), QPointF(100.0, 100.0))
baseColor = QColor(0xa6, 0xce, 0x39, 0x7f)
gradient.setColorAt(0.0, baseColor.light(150))
gradient.setColorAt(0.75, baseColor.light(75))
self.brush = QBrush(gradient)
self.fake = fake
self.fakeBrush = QBrush(Qt.red, Qt.DiagCrossPattern)
qtPath = QPainterPath()
qtPath.setFillRule(Qt.OddEvenFill)
qtPath.moveTo(-45.0, -20.0)
qtPath.lineTo(0.0, -45.0)
qtPath.lineTo(45.0, -20.0)
qtPath.lineTo(45.0, 45.0)
qtPath.lineTo(-45.0, 45.0)
qtPath.lineTo(-45.0, -20.0)
qtPath.closeSubpath()
qtPath.moveTo(15.0, 5.0)
qtPath.lineTo(35.0, 5.0)
qtPath.lineTo(35.0, 40.0)
qtPath.lineTo(15.0, 40.0)
qtPath.lineTo(15.0, 5.0)
qtPath.moveTo(-35.0, -15.0)
qtPath.closeSubpath()
qtPath.lineTo(-10.0, -15.0)
qtPath.lineTo(-10.0, 10.0)
qtPath.lineTo(-35.0, 10.0)
qtPath.lineTo(-35.0, -15.0)
qtPath.closeSubpath()
self.path = qtPath
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
if self.fake:
painter.fillRect(event.rect(), QBrush(Qt.white))
painter.fillRect(event.rect(), self.fakeBrush)
painter.setBrush(self.brush)
painter.translate(60, 60)
painter.drawPath(self.path)
painter.end()
def sizeHint(self):
return QSize(120, 120)
def minimumSizeHint(self):
return QSize(120, 120)
if __name__ == "__main__":
try:
qt = sys.argv[1]
except IndexError:
qt = "4.1"
if qt != "4.0" and qt != "4.1":
sys.stderr.write("Usage: %s [4.0|4.1]\n" % sys.argv[0])
sys.exit(1)
app = QApplication(sys.argv)
exec_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
label = QLabel()
label.setPixmap(QPixmap(os.path.join(exec_dir, "background.png")))
layout = QGridLayout()
label.setLayout(layout)
if qt == "4.0":
layout.addWidget(CustomWidget(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Opaque (Default)", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
layout.addWidget(CustomWidget(label), 0, 0, Qt.AlignCenter)
caption = QLabel("Contents Propagated (Default)", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 0, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
contentsWidget = CustomWidget(label)
contentsWidget.setAttribute(Qt.WA_ContentsPropagated, True)
layout.addWidget(contentsWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With WA_ContentsPropagated set", label)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
autoFillWidget = CustomWidget(label)
autoFillWidget.setAutoFillBackground(True)
layout.addWidget(autoFillWidget, 0, 1, Qt.AlignCenter)
caption = QLabel("With autoFillBackground set", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 1, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
noBackgroundWidget = CustomWidget(label, fake = True)
noBackgroundWidget.setAttribute(Qt.WA_NoBackground, True)
layout.addWidget(noBackgroundWidget, 0, 2, Qt.AlignCenter)
caption = QLabel("With WA_NoBackground set", label)
caption.setWordWrap(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 2, Qt.AlignCenter | Qt.AlignTop)
elif qt == "4.1":
opaqueWidget = CustomWidget(label, fake = True)
opaqueWidget.setAttribute(Qt.WA_OpaquePaintEvent, True)
layout.addWidget(opaqueWidget, 0, 2, Qt.AlignCenter)
caption = QLabel("With WA_OpaquePaintEvent set", label)
caption.setAutoFillBackground(True)
caption.setMargin(2)
layout.addWidget(caption, 1, 2, Qt.AlignCenter | Qt.AlignTop)
if qt == "4.0":
label.setWindowTitle("Qt 4.0: Painting Custom Widgets")
elif qt == "4.1":
label.setWindowTitle("Qt 4.1: Painting Custom Widgets")
label.resize(404, 160)
label.show()
sys.exit(app.exec_())
| RLovelett/qt | doc/src/diagrams/contentspropagation/customwidget.py | Python | lgpl-2.1 | 6,694 |
# RUN: %python -m artiq.compiler.testbench.signature +diag %s >%t
# RUN: OutputCheck %s --file-to-check=%t
def f():
delay_mu(2)
def g():
delay_mu(2)
x = f if True else g
def h():
with interleave:
f()
# CHECK-L: ${LINE:+1}: fatal: it is not possible to interleave this function call within a 'with interleave:' statement because the compiler could not prove that the same function would always be called
x()
| JQIamo/artiq | artiq/test/lit/interleaving/error_inlining.py | Python | lgpl-3.0 | 447 |
#!/usr/bin/env python
import re
from lilac.controller import ADMIN, LOGGER
from lilac.orm import Backend
from lilac.tool import access, set_secure_cookie
from lilac.model import User
from solo.template import render_template
from solo.web.util import jsonify
from solo.web import ctx
from webob import exc
from lilac.paginator import Paginator
USER_STATUSES = {
'actived': 'actived',
'banned': 'banned',
}
USER = 'user'
ROOT = 'root'
ADMIN = 'administrator'
ROLES = {
# 'root' : 'root',
'administrator': 'administrator',
'user': 'user'
}
def user_menu(m):
ctl = UserController()
# User Api
m.connect('userinfo', '/userinfo', controller=ctl, action='userinfo')
m.connect('login_page', '/login', controller=ctl, action='login_page', conditions=dict(method=["GET"]))
m.connect('login', '/login', controller=ctl, action='login', conditions=dict(method=["POST"]))
m.connect('logout', '/logout', controller=ctl, action='logout')
m.connect('add_user_page', '/user/add', controller=ctl, action='add_page', conditions=dict(method=["GET"]))
m.connect('add_user', '/user/add', controller=ctl, action='add', conditions=dict(method=["POST"]))
m.connect('user_index', '/user', controller=ctl, action='index', conditions=dict(method=["GET"]))
m.connect('edit_user_page', '/user/:uid/edit', controller=ctl, action='edit_page', conditions=dict(method=["GET"]))
m.connect('edit_user', '/user/:uid/edit', controller=ctl, action='edit', conditions=dict(method=["POST"]))
class UserController(object):
@access()
def index(self, page=1):
user = ctx.request.user
if user.role != 'root':
raise exc.HTTPFound(location='/user/%d/edit' % (user.uid))
page = int(page)
users = Backend('user').paginate(page, 10)
return render_template('user.index.html', users=users)
@jsonify
def userinfo(self):
return ctx.request.user
def login_page(self):
if ctx.request.user.uid != 0:
raise exc.HTTPFound('/task')
return render_template('login.html')
def login(self, username='', password=''):
LOGGER.error('username=%s', username)
username = username.strip()
password = password.strip()
user = Backend('user').find_by_username(username)
if user and user.check(password):
set_secure_cookie('auth', str(user.uid))
LOGGER.info('success')
raise exc.HTTPFound(location='/task')
return render_template('login.html')
def logout(self):
if ctx.request.user.uid != 0:
ctx.response.delete_cookie('auth')
raise exc.HTTPFound(location='/login')
@access(ROOT)
def add_page(self):
return render_template('user.add.html', statuses=USER_STATUSES, roles=ROLES)
@jsonify
@access(ROOT)
def add(self, username, email, real_name, password, status='', role='user'):
username, real_name = username.strip(), real_name.strip()
if not re.match(r'^[A-Za-z0-9_]{4,16}$', username):
return {'status' : 'error', 'msg' : 'user name: %s must be the ^[A-Za-z0-9_]{4,16}$ pattern' %(username)}
if not re.match(r'^[A-Za-z0-9_ ]{4,16}$', real_name):
return {'status' : 'error', 'msg' : 'real name: %s must be the [A-Za-z0-9_]{4,16} pattern' %(real_name)}
if not re.match(r'^[A-Za-z0-9@#$%^&+=]{4,16}$', password):
return {'status' : 'error', 'msg' : 'password: %s must be the ^[A-Za-z0-9@#$%^&+=]{4,16}$ pattern' %(password)}
if status not in USER_STATUSES:
status = 'actived'
if role not in ROLES:
role = 'user'
if len(email) > 7 and re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email):
if Backend('user').find_by_email(email):
return {'status' : 'error', 'msg' : 'email:%s is used' %(email)}
if Backend('user').find_by_username(username):
return {'status' : 'error', 'msg' : 'user name:%s is used' %(username)}
user = User(username, email, real_name, password, status, role)
Backend('user').save(user)
return {'status' : 'info', 'msg' : 'saved'}
@access()
def edit_page(self, uid):
uid = int(uid)
user = Backend('user').find(uid)
if not user:
raise exc.HTTPNotFound('Not Found')
return render_template('user.edit.html', statuses=USER_STATUSES, roles=ROLES, user=user)
@jsonify
@access()
def edit(self, uid, email, real_name, password, newpass1, newpass2, status, role='user'):
real_name, newpass1, newpass2 = real_name.strip(), newpass1.strip(), newpass2.strip()
uid = int(uid)
user = Backend('user').find(uid)
if not user:
raise exc.HTTPNotFound('user not found')
me = ctx.request.user
if me.uid == user.uid:
if re.match(r'[A-Za-z0-9@#$%^&+=]{4,16}', newpass1):
if password and newpass1 and newpass1 == newpass2:
user.password = newpass1
elif newpass1:
return {'status' : 'error', 'msg' : 'password: %s must be the [A-Za-z0-9_]{4,16} pattern' %(newpass1)}
if len(email) > 7 and re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email):
user_ = Backend('user').find_by_email(email)
if user_ and user_.uid != user.uid:
return {'status' : 'error', 'msg' : 'email:%s is used' %(email)}
else:
user.email = email
if me.uid == 1 and user.uid != 1:
if role in (ADMIN, USER):
user.role = role
if user.status != status and status in USER_STATUSES:
user.status = status
if re.match(r'^[A-Za-z0-9_ ]{4,16}$', real_name):
if user.real_name != real_name:
user.real_name = real_name
Backend('user').save(user)
return {'status' : 'info', 'msg' : 'updated'}
| thomashuang/Lilac | lilac/controller/user.py | Python | lgpl-3.0 | 6,114 |
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from .module import RegionsjobModule
__all__ = ['RegionsjobModule']
| vicnet/weboob | modules/regionsjob/__init__.py | Python | lgpl-3.0 | 847 |
# -*- coding: utf-8 -*-
""" Module for converting various mesh formats."""
# Copyright (C) 2006 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Garth N. Wells (gmsh function)
# Modified by Alexander H. Jarosch (gmsh fix)
# Modified by Angelo Simone (Gmsh and Medit fix)
# Modified by Andy R. Terrel (gmsh fix and triangle function)
# Modified by Magnus Vikstrom (metis and scotch function)
# Modified by Bartosz Sawicki (diffpack function)
# Modified by Gideon Simpson (Exodus II function)
# Modified by Kent-Andre Mardal (Star-CD function)
# Modified by Nuno Lopes (fix for emc2 mesh format (medit version 0))
# Modified by Neilen Marais (add gmsh support for reading physical region)
# Modified by Evan Lezar (add support for reading gmsh physical regions on facets)
# Modified by Jan Blechta (add triangle support for marker on edges and attributes on triangles)
#
# Last changed: 2014-02-06
# NOTE: This module does not depend on (py)dolfin beeing installed.
# NOTE: If future additions need that please import dolfin in a try: except:
# NOTE: clause and tell the user to install dolfin if it is not installed.
from __future__ import print_function
import getopt
import sys
from instant import get_status_output
import re
import warnings
import os.path
import numpy
import six
from . import abaqus
from . import xml_writer
def format_from_suffix(suffix):
"Return format for given suffix"
if suffix == "xml":
return "xml"
elif suffix == "mesh":
return "mesh"
elif suffix == "gmsh":
return "gmsh"
elif suffix == "msh":
return "gmsh"
elif suffix == "gra":
return "metis"
elif suffix == "grf":
return "scotch"
elif suffix == "grid":
return "diffpack"
elif suffix == "inp":
return "abaqus"
elif suffix == "ncdf":
return "NetCDF"
elif suffix =="exo":
return "ExodusII"
elif suffix =="e":
return "ExodusII"
elif suffix == "vrt" or suffix == "cel":
return "StarCD"
elif suffix == "ele" or suffix == "node":
return "Triangle"
else:
_error("Sorry, unknown suffix %s." % suffix)
def mesh2xml(ifilename, ofilename):
"""Convert between .mesh and .xml, parser implemented as a
state machine:
0 = read 'Vertices'
1 = read number of vertices
2 = read next vertex
3 = read 'Triangles' or 'Tetrahedra'
4 = read number of cells
5 = read next cell
6 = done
"""
print("Converting from Medit format (.mesh) to DOLFIN XML format")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Remove newline
line = line.strip(" \n\r").split(" ")
# Read dimension either on same line or following line
if line[0] == "Dimension":
if (len(line) == 2):
line = line[1]
else:
line = ifile.readline()
num_dims = int(line)
if num_dims == 2:
cell_type = "triangle"
dim = 2
elif num_dims == 3:
cell_type = "tetrahedron"
dim = 3
break
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Step to beginning of file
ifile.seek(0)
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
line = line.rstrip("\n\r")
if state == 0:
if line == "Vertices" or line == " Vertices":
state += 1
elif state == 1:
num_vertices = int(line)
xml_writer.write_header_vertices(ofile, num_vertices)
state +=1
elif state == 2:
if num_dims == 2:
(x, y, tmp) = line.split()
x = float(x)
y = float(y)
z = 0.0
elif num_dims == 3:
(x, y, z, tmp) = line.split()
x = float(x)
y = float(y)
z = float(z)
xml_writer.write_vertex(ofile, num_vertices_read, x, y, z)
num_vertices_read +=1
if num_vertices == num_vertices_read:
xml_writer.write_footer_vertices(ofile)
state += 1
elif state == 3:
if (line == "Triangles" or line == " Triangles") and num_dims == 2:
state += 1
if line == "Tetrahedra" and num_dims == 3:
state += 1
elif state == 4:
num_cells = int(line)
xml_writer.write_header_cells(ofile, num_cells)
state +=1
elif state == 5:
if num_dims == 2:
(n0, n1, n2, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
(n0, n1, n2, n3, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
n3 = int(n3) - 1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
state += 1
elif state == 6:
break
# Check that we got all data
if state == 6:
print("Conversion done")
else:
_error("Missing data, unable to convert")
# Write footer
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def gmsh2xml(ifilename, handler):
"""Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
parser implemented as a state machine:
0 = read 'MeshFormat'
1 = read mesh format data
2 = read 'EndMeshFormat'
3 = read 'Nodes'
4 = read number of vertices
5 = read vertices
6 = read 'EndNodes'
7 = read 'Elements'
8 = read number of cells
9 = read cells
10 = done
Afterwards, extract physical region numbers if they are defined in
the mesh file as a mesh function.
"""
print("Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format")
# The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}
cell_type_for_dim = {1: "interval", 2: "triangle", 3: "tetrahedron" }
# the gmsh element types supported for conversion
supported_gmsh_element_types = [1, 2, 4, 15]
# Open files
ifile = open(ifilename, "r")
# Scan file for cell type
cell_type = None
highest_dim = 0
line = ifile.readline()
while line:
# Remove newline
line = line.rstrip("\n\r")
# Read dimension
if line.find("$Elements") == 0:
line = ifile.readline()
num_elements = int(line)
if num_elements == 0:
_error("No elements found in gmsh file.")
line = ifile.readline()
# Now iterate through elements to find largest dimension. Gmsh
# format might include elements of lower dimensions in the element list.
# We also need to count number of elements of correct dimensions.
# Also determine which vertices are not used.
dim_count = {0: 0, 1: 0, 2: 0, 3: 0}
vertices_used_for_dim = {0: [], 1: [], 2: [], 3: []}
# Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements
tags_for_dim = {0: [], 1: [], 2: [], 3: []}
while line.find("$EndElements") == -1:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type in supported_gmsh_element_types:
dim = gmsh_dim[elem_type]
if highest_dim < dim:
highest_dim = dim
node_num_list = [int(node) for node in element[3 + num_tags:]]
vertices_used_for_dim[dim].extend(node_num_list)
if num_tags > 0:
tags_for_dim[dim].append(tuple(int(tag) for tag in element[3:3+num_tags]))
dim_count[dim] += 1
else:
#TODO: output a warning here. "gmsh element type %d not supported" % elem_type
pass
line = ifile.readline()
else:
# Read next line
line = ifile.readline()
# Check that we got the cell type and set num_cells_counted
if highest_dim == 0:
_error("Unable to find cells of supported type.")
num_cells_counted = dim_count[highest_dim]
vertex_set = set(vertices_used_for_dim[highest_dim])
vertices_used_for_dim[highest_dim] = None
vertex_dict = {}
for n,v in enumerate(vertex_set):
vertex_dict[v] = n
# Step to beginning of file
ifile.seek(0)
# Set mesh type
handler.set_mesh_type(cell_type_for_dim[highest_dim], highest_dim)
# Initialise node list (gmsh does not export all vertexes in order)
nodelist = {}
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
# Only import the dolfin objects if facet markings exist
process_facets = False
if len(tags_for_dim[highest_dim-1]) > 0:
# first construct the mesh
try:
from dolfin import MeshEditor, Mesh
except ImportError:
_error("DOLFIN must be installed to handle Gmsh boundary regions")
mesh = Mesh()
mesh_editor = MeshEditor ()
mesh_editor.open( mesh, highest_dim, highest_dim )
process_facets = True
else:
# TODO: Output a warning or an error here
me = None
while state != 10:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
line = line.rstrip("\n\r")
if state == 0:
if line == "$MeshFormat":
state = 1
elif state == 1:
(version, file_type, data_size) = line.split()
state = 2
elif state == 2:
if line == "$EndMeshFormat":
state = 3
elif state == 3:
if line == "$Nodes":
state = 4
elif state == 4:
num_vertices = len(vertex_dict)
handler.start_vertices(num_vertices)
if process_facets:
mesh_editor.init_vertices_global(num_vertices, num_vertices)
state = 5
elif state == 5:
(node_no, x, y, z) = line.split()
node_no = int(node_no)
x,y,z = [float(xx) for xx in (x,y,z)]
if node_no in vertex_dict:
node_no = vertex_dict[node_no]
else:
continue
nodelist[int(node_no)] = num_vertices_read
handler.add_vertex(num_vertices_read, [x, y, z])
if process_facets:
if highest_dim == 1:
coords = numpy.array([x])
elif highest_dim == 2:
coords = numpy.array([x, y])
elif highest_dim == 3:
coords = numpy.array([x, y, z])
mesh_editor.add_vertex(num_vertices_read, coords)
num_vertices_read +=1
if num_vertices == num_vertices_read:
handler.end_vertices()
state = 6
elif state == 6:
if line == "$EndNodes":
state = 7
elif state == 7:
if line == "$Elements":
state = 8
elif state == 8:
handler.start_cells(num_cells_counted)
if process_facets:
mesh_editor.init_cells_global(num_cells_counted, num_cells_counted)
state = 9
elif state == 9:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type in supported_gmsh_element_types:
dim = gmsh_dim[elem_type]
else:
dim = 0
if dim == highest_dim:
node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
for node in node_num_list:
if not node in nodelist:
_error("Vertex %d of %s %d not previously defined." %
(node, cell_type_for_dim[dim], num_cells_read))
cell_nodes = [nodelist[n] for n in node_num_list]
handler.add_cell(num_cells_read, cell_nodes)
if process_facets:
cell_nodes = numpy.array([nodelist[n] for n in node_num_list], dtype=numpy.uintp)
mesh_editor.add_cell(num_cells_read, cell_nodes)
num_cells_read +=1
if num_cells_counted == num_cells_read:
handler.end_cells()
if process_facets:
mesh_editor.close()
state = 10
elif state == 10:
break
# Write mesh function based on the Physical Regions defined by
# gmsh, but only if they are not all zero. All zero physical
# regions indicate that no physical regions were defined.
if highest_dim not in [1,2,3]:
_error("Gmsh tags not supported for dimension %i. Probably a bug" % dim)
tags = tags_for_dim[highest_dim]
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in physical_regions):
handler.start_meshfunction("physical_region", dim, num_cells_counted)
for i, physical_region in enumerate(physical_regions):
handler.add_entity_meshfunction(i, physical_region)
handler.end_meshfunction()
# Now process the facet markers
tags = tags_for_dim[highest_dim-1]
if (len(tags) > 0) and (mesh is not None):
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in physical_regions):
mesh.init(highest_dim-1,0)
# Get the facet-node connectivity information (reshape as a row of node indices per facet)
if highest_dim==1:
# for 1d meshes the mesh topology returns the vertex to vertex map, which isn't what we want
# as facets are vertices
facets_as_nodes = numpy.array([[i] for i in range(mesh.num_facets())])
else:
facets_as_nodes = mesh.topology()(highest_dim-1,0)().reshape ( mesh.num_facets(), highest_dim )
# Build the reverse map
nodes_as_facets = {}
for facet in range(mesh.num_facets()):
nodes_as_facets[tuple(facets_as_nodes[facet,:])] = facet
data = [int(0*k) for k in range(mesh.num_facets()) ]
for i, physical_region in enumerate(physical_regions):
nodes = [n-1 for n in vertices_used_for_dim[highest_dim-1][highest_dim*i:(highest_dim*i+highest_dim)]]
nodes.sort()
if physical_region != 0:
try:
index = nodes_as_facets[tuple(nodes)]
data[index] = physical_region
except IndexError:
raise Exception ( "The facet (%d) was not found to mark: %s" % (i, nodes) )
# Create and initialise the mesh function
handler.start_meshfunction("facet_region", highest_dim-1, mesh.num_facets() )
for index, physical_region in enumerate ( data ):
handler.add_entity_meshfunction(index, physical_region)
handler.end_meshfunction()
# Check that we got all data
if state == 10:
print("Conversion done")
else:
_error("Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?")
# Close files
ifile.close()
def triangle2xml(ifilename, ofilename):
"""Convert between triangle format
(http://www.cs.cmu.edu/~quake/triangle.html) and .xml. The
given ifilename should be the prefix for the corresponding
.node, and .ele files.
"""
def get_next_line (fp):
"""Helper function for skipping comments and blank lines"""
line = fp.readline()
if line == '':
_error("Hit end of file prematurely.")
line = line.strip()
if not (line.startswith('#') or line == ''):
return line
return get_next_line(fp)
print("Converting from Triangle format {.node, .ele} to DOLFIN XML format")
# Open files
for suffix in [".node", ".ele"]:
if suffix in ifilename and ifilename[-len(suffix):] == suffix:
ifilename = ifilename.replace(suffix, "")
node_file = open(ifilename+".node", "r")
ele_file = open(ifilename+".ele", "r")
ofile = open(ofilename, "w")
try:
edge_file = open(ifilename+".edge", "r")
print("Found .edge file")
except IOError:
edge_file = None
# Read all the nodes
nodes = {}
num_nodes, dim, attr, bound = list(map(int, get_next_line(node_file).split()))
while len(nodes) < num_nodes:
node, x, y = get_next_line(node_file).split()[:3]
nodes[int(node)] = (float(x), float(y))
# Read all the triangles
tris = {}
tri_attrs = {}
num_tris, n_per_tri, attrs = list(map(int, get_next_line(ele_file).split()))
while len(tris) < num_tris:
line = get_next_line(ele_file).split()
tri, n1, n2, n3 = list(map(int, line[:4]))
# vertices are ordered according to current UFC ordering scheme -
# - may change in future!
tris[tri] = tuple(sorted((n1, n2, n3)))
tri_attrs[tri] = tuple(map(float, line[4:4+attrs]))
# Read all the boundary markers from edges
edge_markers_global = {}
edge_markers_local = []
got_negative_edge_markers = False
if edge_file is not None:
num_edges, num_edge_markers = list(map(int, get_next_line(edge_file).split()))
if num_edge_markers == 1:
while len(edge_markers_global) < num_edges:
edge, v1, v2, marker = list(map(int, get_next_line(edge_file).split()))
if marker < 0: got_negative_edge_markers = True
edge_markers_global[tuple(sorted((v1, v2)))] = marker
if got_negative_edge_markers:
print("Some edge markers are negative! dolfin will increase "\
"them by probably 2**32 when loading xml. "\
"Consider using non-negative edge markers only.")
for tri, vertices in six.iteritems(tris):
v0, v1, v2 = sorted((vertices[0:3]))
try:
edge_markers_local.append((tri, 0, \
edge_markers_global[(v1, v2)]))
edge_markers_local.append((tri, 1, \
edge_markers_global[(v0, v2)]))
edge_markers_local.append((tri, 2, \
edge_markers_global[(v0, v1)]))
except IndexError:
raise Exception("meshconvert.py: The facet was not found.")
elif num_edge_markers == 0:
print("...but no markers in it. Ignoring it")
else:
print("...but %d markers specified in it. It won't be processed."\
%num_edge_markers)
# Write everything out
xml_writer.write_header_mesh(ofile, "triangle", 2)
xml_writer.write_header_vertices(ofile, num_nodes)
node_off = 0 if 0 in nodes else -1
for node, node_t in six.iteritems(nodes):
xml_writer.write_vertex(ofile, node+node_off, node_t[0], node_t[1], 0.0)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_cells(ofile, num_tris)
tri_off = 0 if 0 in tris else -1
for tri, tri_t in six.iteritems(tris):
xml_writer.write_cell_triangle(ofile, tri+tri_off, tri_t[0] + node_off,
tri_t[1] + node_off, tri_t[2] + node_off)
xml_writer.write_footer_cells(ofile)
if len(edge_markers_local) > 0:
xml_writer.write_header_domains(ofile)
xml_writer.write_header_meshvaluecollection(ofile, \
"edge markers", 1, len(edge_markers_local), "uint")
for tri, local_edge, marker in edge_markers_local:
xml_writer.write_entity_meshvaluecollection(ofile, \
1, tri+tri_off, marker, local_edge)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_footer_domains(ofile)
xml_writer.write_footer_mesh(ofile)
for i in range(attrs):
afilename = ofilename.replace(".xml", ".attr"+str(i)+".xml")
afile = open(afilename, "w")
xml_writer.write_header_meshfunction2(afile)
xml_writer.write_header_meshvaluecollection(afile, \
"triangle attribs "+str(i), 2, num_tris, "double")
for tri, tri_a in six.iteritems(tri_attrs):
xml_writer.write_entity_meshvaluecollection(afile, \
2, tri+tri_off, tri_a[i], 0)
xml_writer.write_footer_meshvaluecollection(afile)
xml_writer.write_footer_meshfunction(afile)
print("triangle attributes from .ele file written to "+afilename)
afile.close()
# Close files
node_file.close()
ele_file.close()
if edge_file is not None:
edge_file.close()
ofile.close()
def xml_old2xml(ifilename, ofilename):
"Convert from old DOLFIN XML format to new."
print("Converting from old (pre DOLFIN 0.6.2) to new DOLFIN XML format...")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type (assuming there is just one)
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Read dimension
if "<triangle" in line:
cell_type = "triangle"
dim = 2
break
elif "<tetrahedron" in line:
cell_type = "tetrahedron"
dim = 3
break
# Step to beginning of file
ifile.seek(0)
# Read lines and make changes
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Modify line
if "xmlns" in line:
line = "<dolfin xmlns:dolfin=\"http://fenicsproject.org\">\n"
if "<mesh>" in line:
line = " <mesh celltype=\"%s\" dim=\"%d\">\n" % (cell_type, dim)
if dim == 2 and " z=\"0.0\"" in line:
line = line.replace(" z=\"0.0\"", "")
if " name=" in line:
line = line.replace(" name=", " index=")
if " name =" in line:
line = line.replace(" name =", " index=")
if "n0" in line:
line = line.replace("n0", "v0")
if "n1" in line:
line = line.replace("n1", "v1")
if "n2" in line:
line = line.replace("n2", "v2")
if "n3" in line:
line = line.replace("n3", "v3")
# Write line
ofile.write(line)
# Close files
ifile.close();
ofile.close();
print("Conversion done")
def metis_graph2graph_xml(ifilename, ofilename):
"Convert from Metis graph format to DOLFIN Graph XML."
print("Converting from Metis graph format to DOLFIN Graph XML.")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
xml_writer.write_header_graph(ofile, "directed")
xml_writer.write_header_vertices(ofile, int(num_vertices))
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
xml_writer.write_graph_vertex(ofile, i, len(edges))
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_edges(ofile, 2*int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
for i in range(int(num_vertices)):
print("vertex %g", i)
line = ifile.readline()
edges = line.split()
for e in edges:
xml_writer.write_graph_edge(ofile, i, int(e))
xml_writer.write_footer_edges(ofile)
xml_writer.write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def scotch_graph2graph_xml(ifilename, ofilename):
"Convert from Scotch graph format to DOLFIN Graph XML."
print("Converting from Scotch graph format to DOLFIN Graph XML.")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Skip graph file version number
ifile.readline()
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
# Read start index and numeric flag
# Start index is 0 or 1 (C/Fortran)
# Numeric flag is 3 bits where bit 1 enables vertex labels
# bit 2 enables edge weights and bit 3 enables vertex weights
line = ifile.readline()
(start_index, numeric_flag) = line.split()
# Handling not implented
if not numeric_flag == "000":
_error("Handling of scotch vertex labels, edge- and vertex weights not implemented")
xml_writer.write_header_graph(ofile, "undirected")
xml_writer.write_header_vertices(ofile, int(num_vertices))
# Read vertices and edges, first number gives number of edges from this vertex (not used)
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
xml_writer.write_graph_vertex(ofile, i, len(edges)-1)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_edges(ofile, int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
ifile.readline()
ifile.readline()
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
for j in range(1, len(edges)):
xml_writer.write_graph_edge(ofile, i, int(edges[j]))
xml_writer.write_footer_edges(ofile)
xml_writer.write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def diffpack2xml(ifilename, ofilename):
"Convert from Diffpack tetrahedral/triangle grid format to DOLFIN XML."
print(diffpack2xml.__doc__)
# Format strings for MeshFunction XML files
meshfunction_header = """\
<?xml version="1.0" encoding="UTF-8"?>\n
<dolfin xmlns:dolfin="http://www.fenics.org/dolfin/">
<mesh_function type="uint" dim="%d" size="%d">\n"""
meshfunction_entity = " <entity index=\"%d\" value=\"%d\"/>\n"
meshfunction_footer = " </mesh_function>\n</dolfin>"
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read and analyze header
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
if re.search(r"Number of elements", line):
num_cells = int(re.match(r".*\s(\d+).*", line).group(1))
if re.search(r"Number of nodes", line):
num_vertices = int(re.match(r".*\s(\d+).*", line).group(1))
if re.search(r"Number of space dim.", line):
num_dims = int(re.match(r".*\s(\d+).*", line).group(1))
if num_dims == 3:
xml_writer.write_header_mesh(ofile, "tetrahedron", 3)
elem_type = "ElmT4n3D"
write_cell_func = xml_writer.write_cell_tetrahedron
else:
xml_writer.write_header_mesh(ofile, "triangle", 2)
elem_type = "ElmT3n2D"
write_cell_func = xml_writer.write_cell_triangle
xml_writer.write_header_vertices(ofile, num_vertices)
# Read & write vertices and collect markers for vertices
vertex_markers = []
unique_vertex_markers = set()
for i in range(num_vertices):
line = ifile.readline()
m = re.match(r"^.*\(\s*(.*)\s*\).*\](.*)$", line)
x = list(map(float, re.split("[\s,]+", m.group(1))))
xml_writer.write_vertex(ofile, i, *x)
markers = list(map(int, m.group(2).split()))
vertex_markers.append(markers)
unique_vertex_markers.update(markers)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_cells(ofile, num_cells)
# Output unique vertex markers as individual VertexFunctions
unique_vertex_markers.difference_update([0])
for unique_marker in unique_vertex_markers:
ofile_marker = open(ofilename.replace(".xml", "") + \
"_marker_" + str(unique_marker)+".xml", "w")
xml_writer.write_header_meshfunction(ofile_marker, 0, num_vertices)
for ind, markers in enumerate(vertex_markers):
if unique_marker in markers:
xml_writer.write_entity_meshfunction(ofile_marker, ind, unique_marker)
else:
xml_writer.write_entity_meshfunction(ofile_marker, ind, 0)
xml_writer.write_footer_meshfunction(ofile_marker)
# Ignore comment lines
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
# Read & write cells and collect cell and face markers
cell_markers = []
facet_markers = []
facet_to_vert = [[1,2,3], [0,2,3], [0,1,3], [0,1,2]]
vert_to_facet = facet_to_vert # The same!
cell_ind = 0
while cell_ind < num_cells:
line = ifile.readline()
v = line.split()
if not v:
continue
if v[1] != elem_type:
_error("Only tetrahedral (ElmT4n3D) and triangular (ElmT3n2D) elements are implemented.")
# Store Cell markers
cell_markers.append(int(v[2]))
# Sort vertex indices
cell_indices = sorted([int(x)-1 for x in v[3:]])
write_cell_func(ofile, cell_ind, *cell_indices)
if num_dims == 2:
cell_ind += 1
continue
# Check Facet info
process_facet = set(range(4))
for local_vert_ind, global_vert_ind in enumerate(cell_indices):
# If no marker is included for vertex skip corresponding facet
if not vertex_markers[global_vert_ind]:
process_facet.difference_update(facet_to_vert[local_vert_ind])
# Process facets
for local_facet in process_facet:
# Start with markers from first vertex
global_first_vertex = cell_indices[facet_to_vert[local_facet][0]]
marker_intersection = set(vertex_markers[global_first_vertex])
# Process the other vertices
for local_vert in facet_to_vert[local_facet][1:]:
marker_intersection.intersection_update(\
vertex_markers[cell_indices[local_vert]])
if not marker_intersection:
break
# If not break we have a marker on local_facet
else:
assert(len(marker_intersection)==1)
facet_markers.append((cell_ind, local_facet, \
marker_intersection.pop()))
# Bump cell_ind
cell_ind += 1
xml_writer.write_footer_cells(ofile)
xml_writer.write_header_domains(ofile)
# Write facet markers if any
if facet_markers:
xml_writer.write_header_meshvaluecollection(ofile, "m", 2, \
len(facet_markers), "uint")
for cell, local_facet, marker in facet_markers:
xml_writer.write_entity_meshvaluecollection(ofile, 2, cell, \
marker, local_facet)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_header_meshvaluecollection(ofile, "m", num_dims, \
len(cell_markers), "uint")
for cell, marker in enumerate(cell_markers):
xml_writer.write_entity_meshvaluecollection(ofile, num_dims, cell, \
marker)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_footer_domains(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
class ParseError(Exception):
""" Error encountered in source file.
"""
class DataHandler(object):
""" Baseclass for handlers of mesh data.
The actual handling of mesh data encountered in the source file is
delegated to a polymorfic object. Typically, the delegate will write the
data to XML.
@ivar _state: the state which the handler is in, one of State_*.
@ivar _cell_type: cell type in mesh. One of CellType_*.
@ivar _dim: mesh dimensions.
"""
State_Invalid, State_Init, State_Vertices, State_Cells, \
State_MeshFunction, State_MeshValueCollection = list(range(6))
CellType_Tetrahedron, CellType_Triangle, CellType_Interval = list(range(3))
def __init__(self):
self._state = self.State_Invalid
def set_mesh_type(self, cell_type, dim):
assert self._state == self.State_Invalid
self._state = self.State_Init
if cell_type == "tetrahedron":
self._cell_type = self.CellType_Tetrahedron
elif cell_type == "triangle":
self._cell_type = self.CellType_Triangle
elif cell_type == "interval":
self._cell_type = self.CellType_Interval
self._dim = dim
def start_vertices(self, num_vertices):
assert self._state == self.State_Init
self._state = self.State_Vertices
def add_vertex(self, vertex, coords):
assert self._state == self.State_Vertices
def end_vertices(self):
assert self._state == self.State_Vertices
self._state = self.State_Init
def start_cells(self, num_cells):
assert self._state == self.State_Init
self._state = self.State_Cells
def add_cell(self, cell, nodes):
assert self._state == self.State_Cells
def end_cells(self):
assert self._state == self.State_Cells
self._state = self.State_Init
def start_domains(self):
assert self._state == self.State_Init
def end_domains(self):
self._state = self.State_Init
def start_meshfunction(self, name, dim, size):
assert self._state == self.State_Init
self._state = self.State_MeshFunction
def add_entity_meshfunction(self, index, value):
assert self._state == self.State_MeshFunction
def end_meshfunction(self):
assert self._state == self.State_MeshFunction
self._state = self.State_Init
def start_mesh_value_collection(self, name, dim, size, etype):
assert self._state == self.State_Init
self._state = self.State_MeshValueCollection
def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):
assert self._state == self.State_MeshValueCollection
def end_mesh_value_collection(self):
assert self._state == self.State_MeshValueCollection
self._state = self.State_Init
def warn(self, msg):
""" Issue warning during parse.
"""
warnings.warn(msg)
def error(self, msg):
""" Raise error during parse.
This method is expected to raise ParseError.
"""
raise ParseError(msg)
def close(self):
self._state = self.State_Invalid
class XmlHandler(DataHandler):
""" Data handler class which writes to Dolfin XML.
"""
def __init__(self, ofilename):
DataHandler.__init__(self)
self._ofilename = ofilename
self.__ofile = open(ofilename, "w")
self.__ofile_meshfunc = None
def ofile(self):
return self.__ofile
def set_mesh_type(self, cell_type, dim):
DataHandler.set_mesh_type(self, cell_type, dim)
xml_writer.write_header_mesh(self.__ofile, cell_type, dim)
def start_vertices(self, num_vertices):
DataHandler.start_vertices(self, num_vertices)
xml_writer.write_header_vertices(self.__ofile, num_vertices)
def add_vertex(self, vertex, coords):
DataHandler.add_vertex(self, vertex, coords)
xml_writer.write_vertex(self.__ofile, vertex, *coords)
def end_vertices(self):
DataHandler.end_vertices(self)
xml_writer.write_footer_vertices(self.__ofile)
def start_cells(self, num_cells):
DataHandler.start_cells(self, num_cells)
xml_writer.write_header_cells(self.__ofile, num_cells)
def add_cell(self, cell, nodes):
DataHandler.add_cell(self, cell, nodes)
if self._cell_type == self.CellType_Tetrahedron:
func = xml_writer.write_cell_tetrahedron
elif self._cell_type == self.CellType_Triangle:
func = xml_writer.write_cell_triangle
elif self._cell_type == self.CellType_Interval:
func = xml_writer.write_cell_interval
func(self.__ofile, cell, *nodes)
def end_cells(self):
DataHandler.end_cells(self)
xml_writer.write_footer_cells(self.__ofile)
def start_meshfunction(self, name, dim, size):
DataHandler.start_meshfunction(self, name, dim, size)
fname = os.path.splitext(self.__ofile.name)[0]
self.__ofile_meshfunc = open("%s_%s.xml" % (fname, name), "w")
xml_writer.write_header_meshfunction(self.__ofile_meshfunc, dim, size)
def add_entity_meshfunction(self, index, value):
DataHandler.add_entity_meshfunction(self, index, value)
xml_writer.write_entity_meshfunction(self.__ofile_meshfunc, index, value)
def end_meshfunction(self):
DataHandler.end_meshfunction(self)
xml_writer.write_footer_meshfunction(self.__ofile_meshfunc)
self.__ofile_meshfunc.close()
self.__ofile_meshfunc = None
def start_domains(self):
#DataHandler.start_domains(self)
xml_writer.write_header_domains(self.__ofile)
def end_domains(self):
#DataHandler.end_domains(self)
xml_writer.write_footer_domains(self.__ofile)
def start_mesh_value_collection(self, name, dim, size, etype):
DataHandler.start_mesh_value_collection(self, name, dim, size, etype)
xml_writer.write_header_meshvaluecollection(self.__ofile, name, dim, size, etype)
def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):
DataHandler.add_entity_mesh_value_collection(self, dim, index, value)
xml_writer.write_entity_meshvaluecollection(self.__ofile, dim, index, value, local_entity=local_entity)
def end_mesh_value_collection(self):
DataHandler.end_mesh_value_collection(self)
xml_writer.write_footer_meshvaluecollection(self.__ofile)
def close(self):
DataHandler.close(self)
if self.__ofile.closed:
return
xml_writer.write_footer_mesh(self.__ofile)
self.__ofile.close()
if self.__ofile_meshfunc is not None:
self.__ofile_meshfunc.close()
def netcdf2xml(ifilename,ofilename):
"Convert from NetCDF format to DOLFIN XML."
print("Converting from NetCDF format (.ncdf) to DOLFIN XML format")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
cell_type = None
dim = 0
# Scan file for dimension, number of nodes, number of elements
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if re.search(r"num_dim.*=", line):
dim = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_nodes.*=", line):
num_vertices = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_elem.*=", line):
num_cells = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"connect1 =",line):
break
num_dims=dim
# Set cell type
if dim == 2:
cell_type = "triangle"
if dim == 3:
cell_type = "tetrahedron"
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
xml_writer.write_header_cells(ofile, num_cells)
num_cells_read = 0
# Read and write cells
while 1:
# Read next line
line = ifile.readline()
if not line:
break
connect=re.split("[,;]",line)
if num_dims == 2:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
n3 = int(connect[3])-1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
xml_writer.write_header_vertices(ofile, num_vertices)
break
num_vertices_read = 0
coords = [[],[],[]]
coord = -1
while 1:
line = ifile.readline()
if not line:
_error("Missing data")
if re.search(r"coord =",line):
break
# Read vertices
while 1:
line = ifile.readline()
if not line:
break
if re.search(r"\A\s\s\S+,",line):
coord+=1
print("Found x_"+str(coord)+" coordinates")
coords[coord] += line.split()
if re.search(r";",line):
break
# Write vertices
for i in range(num_vertices):
if num_dims == 2:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = 0
if num_dims == 3:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = float(re.split(",",coords[2].pop(0))[0])
xml_writer.write_vertex(ofile, i, x, y, z)
# Write footer
xml_writer.write_footer_vertices(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def exodus2xml(ifilename,ofilename):
"Convert from Exodus II format to DOLFIN XML."
print("Converting from Exodus II format to NetCDF format")
name = ifilename.split(".")[0]
netcdffilename = name +".ncdf"
status, output = get_status_output('ncdump '+ifilename + ' > '+netcdffilename)
if status != 0:
raise IOError("Something wrong while executing ncdump. Is ncdump "\
"installed on the system?")
netcdf2xml(netcdffilename, ofilename)
def _error(message):
"Write an error message"
for line in message.split("\n"):
print("*** %s" % line)
sys.exit(2)
def convert2xml(ifilename, ofilename, iformat=None):
""" Convert a file to the DOLFIN XML format.
"""
convert(ifilename, XmlHandler(ofilename), iformat=iformat)
def convert(ifilename, handler, iformat=None):
""" Convert a file using a provided data handler.
Note that handler.close is called when this function finishes.
@param ifilename: Name of input file.
@param handler: The data handler (instance of L{DataHandler}).
@param iformat: Format of input file.
"""
if iformat is None:
iformat = format_from_suffix(os.path.splitext(ifilename)[1][1:])
# XXX: Backwards-compat
if hasattr(handler, "_ofilename"):
ofilename = handler._ofilename
# Choose conversion
if iformat == "mesh":
# Convert from mesh to xml format
mesh2xml(ifilename, ofilename)
elif iformat == "gmsh":
# Convert from gmsh to xml format
gmsh2xml(ifilename, handler)
elif iformat == "Triangle":
# Convert from Triangle to xml format
triangle2xml(ifilename, ofilename)
elif iformat == "xml-old":
# Convert from old to new xml format
xml_old2xml(ifilename, ofilename)
elif iformat == "metis":
# Convert from metis graph to dolfin graph xml format
metis_graph2graph_xml(ifilename, ofilename)
elif iformat == "scotch":
# Convert from scotch graph to dolfin graph xml format
scotch_graph2graph_xml(ifilename, ofilename)
elif iformat == "diffpack":
# Convert from Diffpack tetrahedral grid format to xml format
diffpack2xml(ifilename, ofilename)
elif iformat == "abaqus":
# Convert from abaqus to xml format
abaqus.convert(ifilename, handler)
elif iformat == "NetCDF":
# Convert from NetCDF generated from ExodusII format to xml format
netcdf2xml(ifilename, ofilename)
elif iformat =="ExodusII":
# Convert from ExodusII format to xml format via NetCDF
exodus2xml(ifilename, ofilename)
elif iformat == "StarCD":
# Convert from Star-CD tetrahedral grid format to xml format
starcd2xml(ifilename, ofilename)
else:
_error("Sorry, cannot convert between %s and DOLFIN xml file formats." % iformat)
# XXX: handler.close messes things for other input formats than abaqus or gmsh
if iformat in ("abaqus", "gmsh"):
handler.close()
def starcd2xml(ifilename, ofilename):
"Convert from Star-CD tetrahedral grid format to DOLFIN XML."
print(starcd2xml.__doc__)
if not os.path.isfile(ifilename[:-3] + "vrt") or not os.path.isfile(ifilename[:-3] + "cel"):
print("StarCD format requires one .vrt file and one .cel file")
sys.exit(2)
# open output file
ofile = open(ofilename, "w")
# Open file, the vertices are in a .vrt file
ifile = open(ifilename[:-3] + "vrt", "r")
write_header_mesh(ofile, "tetrahedron", 3)
# Read & write vertices
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of vertices
num_vertices = -1
counter = 0
# nodenr_map is needed because starcd support node numbering like 1,2,4 (ie 3 is missing)
nodenr_map = {}
for line in lines:
nodenr = int(line[0:15])
nodenr_map[nodenr] = counter
counter += 1
num_vertices = counter
# third, run over all vertices
xml_writer.write_header_vertices(ofile, num_vertices)
for line in lines:
nodenr = int(line[0:15])
vertex0 = float(line[15:31])
vertex1 = float(line[31:47])
vertex2 = float(line[47:63])
xml_writer.write_vertex(ofile, nodenr_map[nodenr], float(vertex0), float(vertex1), float(vertex2))
xml_writer.write_footer_vertices(ofile)
# Open file, the cells are in a .cel file
ifile = open(ifilename[:-3] + "cel", "r")
# Read & write cells
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of cells
num_cells = -1
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if node4 > 0:
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
counter += 1
else:
print("The file does contain cells that are not tetraheders. The cell number is ", cellnr, " the line read was ", line)
else:
# triangles on the surface
# print "The file does contain cells that are not tetraheders node4==0. The cell number is ", cellnr, " the line read was ", line
#sys.exit(2)
pass
num_cells = counter
# third, run over all cells
xml_writer.write_header_cells(ofile, num_cells)
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if (node4 > 0):
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
xml_writer.write_cell_tetrahedron(ofile, counter, nodenr_map[node0], nodenr_map[node1], nodenr_map[node2], nodenr_map[node4])
counter += 1
xml_writer.write_footer_cells(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
| FEniCS/dolfin | site-packages/dolfin_utils/meshconvert/meshconvert.py | Python | lgpl-3.0 | 50,178 |
import gc
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
import numpy as np
import os
import arboretum
import json
import sklearn.metrics
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from scipy.sparse import dok_matrix, coo_matrix
from sklearn.utils.multiclass import type_of_target
if __name__ == '__main__':
path = "data"
aisles = pd.read_csv(os.path.join(path, "aisles.csv"), dtype={'aisle_id': np.uint8, 'aisle': 'category'})
departments = pd.read_csv(os.path.join(path, "departments.csv"),
dtype={'department_id': np.uint8, 'department': 'category'})
order_prior = pd.read_csv(os.path.join(path, "order_products__prior.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
order_train = pd.read_csv(os.path.join(path, "order_products__train.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
orders = pd.read_csv(os.path.join(path, "orders.csv"), dtype={'order_id': np.uint32,
'user_id': np.uint32,
'eval_set': 'category',
'order_number': np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8
})
product_embeddings = pd.read_pickle('data/product_embeddings.pkl')
embedings = list(range(32))
product_embeddings = product_embeddings[embedings + ['product_id']]
order_prev = pd.merge(order_train, orders, on='order_id')
order_prev.order_number -= 1
order_prev = pd.merge(order_prev[
['user_id', 'order_number', 'product_id', 'reordered', 'add_to_cart_order', 'order_dow',
'order_hour_of_day']], orders[['user_id', 'order_number', 'order_id']],
on=['user_id', 'order_number'])
order_prev.drop(['order_number', 'user_id'], axis=1, inplace=True)
order_prev.rename(columns={
'reordered': 'reordered_prev',
'add_to_cart_order': 'add_to_cart_order_prev',
'order_dow': 'order_dow_prev',
'order_hour_of_day': 'order_hour_of_day_prev'
}, inplace=True)
products = pd.read_csv(os.path.join(path, "products.csv"), dtype={'product_id': np.uint16,
'aisle_id': np.uint8,
'department_id': np.uint8})
order_train = pd.read_pickle(os.path.join(path, 'chunk_0.pkl'))
order_train = order_train.loc[order_train.eval_set == "train", ['order_id', 'product_id', 'reordered']]
product_periods = pd.read_pickle(os.path.join(path, 'product_periods_stat.pkl')).fillna(9999)
# product_periods.prev1 = product_periods['last'] / product_periods.prev1
# product_periods.prev2 = product_periods['last'] / product_periods.prev2
# product_periods['mean'] = product_periods['last'] / product_periods['mean']
# product_periods['median'] = product_periods['last'] / product_periods['median']
print(order_train.columns)
###########################
weights = order_train.groupby('order_id')['reordered'].sum().to_frame('weights')
weights.reset_index(inplace=True)
prob = pd.merge(order_prior, orders, on='order_id')
print(prob.columns)
prob = prob.groupby(['product_id', 'user_id'])\
.agg({'reordered':'sum', 'user_id': 'size'})
print(prob.columns)
prob.rename(columns={'sum': 'reordered',
'user_id': 'total'}, inplace=True)
prob.reordered = (prob.reordered > 0).astype(np.float32)
prob.total = (prob.total > 0).astype(np.float32)
prob['reorder_prob'] = prob.reordered / prob.total
prob = prob.groupby('product_id').agg({'reorder_prob': 'mean'}).rename(columns={'mean': 'reorder_prob'})\
.reset_index()
prod_stat = order_prior.groupby('product_id').agg({'reordered': ['sum', 'size'],
'add_to_cart_order':'mean'})
prod_stat.columns = prod_stat.columns.levels[1]
prod_stat.rename(columns={'sum':'prod_reorders',
'size':'prod_orders',
'mean': 'prod_add_to_card_mean'}, inplace=True)
prod_stat.reset_index(inplace=True)
prod_stat['reorder_ration'] = prod_stat['prod_reorders'] / prod_stat['prod_orders']
prod_stat = pd.merge(prod_stat, prob, on='product_id')
# prod_stat.drop(['prod_reorders'], axis=1, inplace=True)
user_stat = orders.loc[orders.eval_set == 'prior', :].groupby('user_id').agg({'order_number': 'max',
'days_since_prior_order': ['sum',
'mean',
'median']})
user_stat.columns = user_stat.columns.droplevel(0)
user_stat.rename(columns={'max': 'user_orders',
'sum': 'user_order_starts_at',
'mean': 'user_mean_days_since_prior',
'median': 'user_median_days_since_prior'}, inplace=True)
user_stat.reset_index(inplace=True)
orders_products = pd.merge(orders, order_prior, on="order_id")
user_order_stat = orders_products.groupby('user_id').agg({'user_id': 'size',
'reordered': 'sum',
"product_id": lambda x: x.nunique()})
user_order_stat.rename(columns={'user_id': 'user_total_products',
'product_id': 'user_distinct_products',
'reordered': 'user_reorder_ratio'}, inplace=True)
user_order_stat.reset_index(inplace=True)
user_order_stat.user_reorder_ratio = user_order_stat.user_reorder_ratio / user_order_stat.user_total_products
user_stat = pd.merge(user_stat, user_order_stat, on='user_id')
user_stat['user_average_basket'] = user_stat.user_total_products / user_stat.user_orders
########################### products
prod_usr = orders_products.groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr.rename(columns={'user_id':'prod_users_unq'}, inplace=True)
prod_usr.reset_index(inplace=True)
prod_usr_reordered = orders_products.loc[orders_products.reordered, :].groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr_reordered.rename(columns={'user_id': 'prod_users_unq_reordered'}, inplace=True)
prod_usr_reordered.reset_index(inplace=True)
order_stat = orders_products.groupby('order_id').agg({'order_id': 'size'})\
.rename(columns = {'order_id': 'order_size'}).reset_index()
orders_products = pd.merge(orders_products, order_stat, on='order_id')
orders_products['add_to_cart_order_inverted'] = orders_products.order_size - orders_products.add_to_cart_order
orders_products['add_to_cart_order_relative'] = orders_products.add_to_cart_order / orders_products.order_size
data_dow = orders_products.groupby(['user_id', 'product_id', 'order_dow']).agg({
'reordered': ['sum', 'size']})
data_dow.columns = data_dow.columns.droplevel(0)
data_dow.columns = ['reordered_dow', 'reordered_dow_size']
data_dow['reordered_dow_ration'] = data_dow.reordered_dow / data_dow.reordered_dow_size
data_dow.reset_index(inplace=True)
data = orders_products.groupby(['user_id', 'product_id']).agg({'user_id': 'size',
'order_number': ['min', 'max'],
'add_to_cart_order': ['mean', 'median'],
'days_since_prior_order': ['mean', 'median'],
'order_dow': ['mean', 'median'],
'order_hour_of_day': ['mean', 'median'],
'add_to_cart_order_inverted': ['mean', 'median'],
'add_to_cart_order_relative': ['mean', 'median'],
'reordered':['sum']})
data.columns = data.columns.droplevel(0)
data.columns = ['up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position', 'up_median_cart_position',
'days_since_prior_order_mean', 'days_since_prior_order_median', 'order_dow_mean', 'order_dow_median',
'order_hour_of_day_mean', 'order_hour_of_day_median',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_inverted_median',
'add_to_cart_order_relative_mean', 'add_to_cart_order_relative_median',
'reordered_sum'
]
data['user_product_reordered_ratio'] = (data.reordered_sum + 1.0) / data.up_orders
# data['first_order'] = data['up_orders'] > 0
# data['second_order'] = data['up_orders'] > 1
#
# data.groupby('product_id')['']
data.reset_index(inplace=True)
data = pd.merge(data, prod_stat, on='product_id')
data = pd.merge(data, user_stat, on='user_id')
data['up_order_rate'] = data.up_orders / data.user_orders
data['up_orders_since_last_order'] = data.user_orders - data.up_last_order
data['up_order_rate_since_first_order'] = data.user_orders / (data.user_orders - data.up_first_order + 1)
############################
user_dep_stat = pd.read_pickle('data/user_department_products.pkl')
user_aisle_stat = pd.read_pickle('data/user_aisle_products.pkl')
order_train = pd.merge(order_train, products, on='product_id')
order_train = pd.merge(order_train, orders, on='order_id')
order_train = pd.merge(order_train, user_dep_stat, on=['user_id', 'department_id'])
order_train = pd.merge(order_train, user_aisle_stat, on=['user_id', 'aisle_id'])
order_train = pd.merge(order_train, prod_usr, on='product_id')
order_train = pd.merge(order_train, prod_usr_reordered, on='product_id', how='left')
order_train.prod_users_unq_reordered.fillna(0, inplace=True)
order_train = pd.merge(order_train, data, on=['product_id', 'user_id'])
order_train = pd.merge(order_train, data_dow, on=['product_id', 'user_id', 'order_dow'], how='left')
order_train['aisle_reordered_ratio'] = order_train.aisle_reordered / order_train.user_orders
order_train['dep_reordered_ratio'] = order_train.dep_reordered / order_train.user_orders
order_train = pd.merge(order_train, product_periods, on=['user_id', 'product_id'])
order_train = pd.merge(order_train, product_embeddings, on=['product_id'])
# order_train = pd.merge(order_train, weights, on='order_id')
# order_train = pd.merge(order_train, order_prev, on=['order_id', 'product_id'], how='left')
# order_train.reordered_prev = order_train.reordered_prev.astype(np.float32) + 1.
# order_train['reordered_prev'].fillna(0, inplace=True)
# order_train[['add_to_cart_order_prev', 'order_dow_prev', 'order_hour_of_day_prev']].fillna(255, inplace=True)
print('data is joined')
# order_train.days_since_prior_order_mean -= order_train.days_since_prior_order
# order_train.days_since_prior_order_median -= order_train.days_since_prior_order
#
# order_train.order_dow_mean -= order_train.order_dow
# order_train.order_dow_median -= order_train.order_dow
#
# order_train.order_hour_of_day_mean -= order_train.order_hour_of_day
# order_train.order_hour_of_day_median -= order_train.order_hour_of_day
unique_orders = np.unique(order_train.order_id)
orders_train, orders_test = train_test_split(unique_orders, test_size=0.25, random_state=2017)
order_test = order_train.loc[np.in1d(order_train.order_id, orders_test)]
order_train = order_train.loc[np.in1d(order_train.order_id, orders_train)]
features = [
# 'reordered_dow_ration', 'reordered_dow', 'reordered_dow_size',
# 'reordered_prev', 'add_to_cart_order_prev', 'order_dow_prev', 'order_hour_of_day_prev',
'user_product_reordered_ratio', 'reordered_sum',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_relative_mean',
'reorder_prob',
'last', 'prev1', 'prev2', 'median', 'mean',
'dep_reordered_ratio', 'aisle_reordered_ratio',
'aisle_products',
'aisle_reordered',
'dep_products',
'dep_reordered',
'prod_users_unq', 'prod_users_unq_reordered',
'order_number', 'prod_add_to_card_mean',
'days_since_prior_order',
'order_dow', 'order_hour_of_day',
'reorder_ration',
'user_orders', 'user_order_starts_at', 'user_mean_days_since_prior',
# 'user_median_days_since_prior',
'user_average_basket', 'user_distinct_products', 'user_reorder_ratio', 'user_total_products',
'prod_orders', 'prod_reorders',
'up_order_rate', 'up_orders_since_last_order', 'up_order_rate_since_first_order',
'up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position',
# 'up_median_cart_position',
'days_since_prior_order_mean',
# 'days_since_prior_order_median',
'order_dow_mean',
# 'order_dow_median',
# 'order_hour_of_day_mean',
# 'order_hour_of_day_median'
]
features.extend(embedings)
print('not included', set(order_train.columns.tolist()) - set(features))
data = order_train[features].fillna(-1.).values.astype(np.float32)
data_categoties = order_train[['product_id', 'aisle_id', 'department_id']].values.astype(np.uint32)
labels = order_train[['reordered']].values.astype(np.float32).flatten()
# weights = order_train.weights.values.astype(np.float32)
# weights = 1./np.maximum(weights, 1.0)
data_val = order_test[features].fillna(-1.).values.astype(np.float32)
data_categoties_val = order_test[['product_id', 'aisle_id', 'department_id']].values.astype(np.uint32)
labels_val = order_test[['reordered']].values.astype(np.float32).flatten()
print(data.shape, data_categoties.shape, labels.shape)
# assert data.shape[0] == 8474661
config = json.dumps({'objective': 1,
'internals':
{
'compute_overlap': 3,
'double_precision': True,
'seed': 2017
},
'verbose':
{
'gpu': True,
'booster': True,
'data': True
},
'tree':
{
'eta': 0.01,
'max_depth': 10,
'gamma': 0.0,
'min_child_weight':20.0,
'min_leaf_size': 0,
'colsample_bytree': 0.6,
'colsample_bylevel': 0.6,
'lambda': 0.1,
'gamma_relative': 0.0001
}})
print(config)
data = arboretum.DMatrix(data, data_category=data_categoties, y=labels)
data_val = arboretum.DMatrix(data_val, data_category=data_categoties_val)
model = arboretum.Garden(config, data)
print('training...')
best_logloss = 1.0
best_rocauc = 0
best_iter_logloss = best_iter_rocauc = -1
with ThreadPoolExecutor(max_workers=4) as executor:
# grow trees
for i in range(20000):
print('tree', i)
model.grow_tree()
model.append_last_tree(data_val)
if i % 5 == 0:
pred = model.get_y(data)
pred_val = model.get_y(data_val)
logloss = executor.submit(sklearn.metrics.log_loss, labels, pred, eps=1e-6)
logloss_val = executor.submit(sklearn.metrics.log_loss, labels_val, pred_val, eps=1e-6)
rocauc = executor.submit(roc_auc_score, labels, pred)
rocauc_val = executor.submit(roc_auc_score, labels_val, pred_val)
# fscore_train = fscore(true_value_matrix, pred, order_index, product_index, len(orders_unique), len(products_unique), threshold=[0.16, 0.17, 0.18, 0.19, 0.2, 0.21])
# fscore_value = fscore(true_value_matrix_val, pred_val, order_index_val, product_index_val, len(orders_unique_val), len(products_unique_val), threshold=[0.16, 0.17, 0.18, 0.19, 0.2, 0.21])
logloss = logloss.result()
logloss_val = logloss_val.result()
rocauc = rocauc.result()
rocauc_val = rocauc_val.result()
print('train', logloss, rocauc,
'val', logloss_val, rocauc_val)
if rocauc_val > best_rocauc:
print('best roc auc ', rocauc_val)
best_rocauc = rocauc_val
best_iter_rocauc = i
if logloss_val < best_logloss:
print('best logloss', logloss_val)
best_logloss = logloss_val
best_iter_logloss = i
print('best roc auc iteration', best_rocauc, best_iter_rocauc)
print('best loggloss iteration', best_logloss, best_iter_logloss) | bataeves/kaggle | instacart/imba/arboretum_cv.py | Python | unlicense | 18,971 |
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import sys
# Define required packages.
requires = []
# Assume spidev is required on non-windows & non-mac platforms (i.e. linux).
if sys.platform != 'win32' and sys.platform != 'darwin':
requires.append('spidev')
setup(name = 'Adafruit_GPIO',
version = '0.8.0',
author = 'Tony DiCola',
author_email = '[email protected]',
description = 'Library to provide a cross-platform GPIO interface on the Raspberry Pi and Beaglebone Black using the RPi.GPIO and Adafruit_BBIO libraries.',
license = 'MIT',
url = 'https://github.com/adafruit/Adafruit_Python_GPIO/',
install_requires = requires,
packages = find_packages())
| WxOutside/software | telemetry/sensors/weatherPiArduino/Adafruit_Python_GPIO/setup.py | Python | unlicense | 846 |
#!/usr/bin/env python
#/******************************************************************************
# * $Id$
# *
# * Project: GDAL Make Histogram and Cumulative graph from Tab delimited tab as
# generated by gdal_hist.py
# * Purpose: Take a gdal_hist.py output and create a histogram plot using matplotlib
# * Author: Trent Hare, [email protected]
# *
# ******************************************************************************
# * Public domain licenes (unlicense)
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************/
import sys
import os
import math
import numpy as np
import pandas as pd
from pandas.tools.plotting import table
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def usage():
print 'Usage: slope_histogram_cumulative_graph.py -name "InSight E1" slope_histogram_table.tab outfile.png'
print " This program is geared to run on a table as generated by gdal_hist.py"
print 'slope_histogram_cumulative_graph.py -name "E_Marg_CE 01" DEM_1m_E_Marg_CE_adir_1m_hist.xls DEM_1m_E_Marg_CE_adir_1m_hist.png'
sys.exit(0)
#set None for commandline options
name = ""
infile = None
outfile = None
# =============================================================================
# Parse command line arguments.
# =============================================================================
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if arg == '-name':
i = i + 1
name = sys.argv[i]
elif infile is None:
infile = arg
elif outfile is None:
outfile = arg
else:
Usage()
i = i + 1
if infile is None:
usage()
if not(os.path.isfile(infile)):
input = sys.argv[1]
print "filename %s does not exist." % (infile)
sys.exit(1)
#load table
df = pd.DataFrame.from_csv(infile, sep='\t', header=1)
#initialize figure
fig, ax1 = plt.subplots()
#calculate unscaled values
#df.value = (df.value * 5) - 0.2
#df.ix[df.value < 0] = 0; df
#not to reverse histogram before calculating 'approx' stats
#min = round(df.value.min(),2)
#max = round(df.value.max(),2)
#mean = round(df.value.mean(),2)
#stddev = round(df.value.std(),2)
#rms = round(math.sqrt((mean * mean) + (stddev * stddev)),2)
#statsDict = {'Min':min,'Max':max,'Mean':mean \
#,'StdDev':stddev,'RMS':rms}
#statsSeries = pd.Series(statsDict,name='stats')
#statsSeries.sort()
#t = table(ax1, statsSeries, \
#loc='lower right', colWidths=[0.1] * 2)
#t.set_fontsize(18)
#props = t.properties()
#cells = props['child_artists']
#for c in cells:
#c.set_height(0.05)
#Plot frequency histogram from input table
ax1.fill(df.value,df['count'],'gray')
#df.plot(ax1=ax1, kind='area', color='gray', legend=True)
ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax1.get_yaxis().set_tick_params(direction='out')
#get min and max as found by pandas for plotting 'arrow' at X=15
#minY = round(df['count'].min(),0)
#maxY = round(df['count'].max(),0)
#grab existing ax1 axes
#ax = plt.axes()
#ax.arrow(15, minY, 0, maxY, head_width=0, head_length=0, fc='k', ec='k')
ax1.axvline(x=15, color='black', alpha=0.5)
#add cumulative plot on 'Y2' axis using save X axes
ax2 = ax1.twinx()
ax2.plot(df.value,df['cumulative'],'blue')
#df.plot(ax2=ax2, df.value,df['cumulative'],'blue')
ax2.get_yaxis().set_tick_params(direction='out')
#define labels
ax1.set_xlabel('Slope (degrees)')
ax1.set_ylabel('Count')
ax2.set_ylabel('Cumulative')
plt.suptitle(name + ' Slope Histogram and Cumulative Plot')
#save out PNG
plt.savefig(outfile)
print "Graph exported to %s" % (outfile)
| USGS-Astrogeology/GDAL_scripts | gdal_baseline_slope/python2/slope_histogram_cumulative_graph.py | Python | unlicense | 4,103 |
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for classifier models"""
import copy
class Classifier(object):
"""Domain object for a classifier.
A classifier is a machine learning model created using a particular
classification algorithm which is used for answer classification
task.
Attributes:
id: str. The unique id of the classifier.
exp_id: str. The exploration id to which this classifier belongs.
exp_version_when_created: str. The version of the exploration when
this classification model was created.
state_name: str. The name of the state to which the classifier belongs.
algorithm_id: str. The id of the algorithm used for generating
classifier.
cached_classifier_data: dict. The actual classifier model used for
classification purpose.
data_schema_version: int. Schema version of the data used by the
classifier. This depends on the algorithm ID.
"""
def __init__(self, classifier_id, exp_id, exp_version_when_created,
state_name, algorithm_id, cached_classifier_data,
data_schema_version):
"""Constructs an Classifier domain object.
Args:
classifier_id: str. The unique id of the classifier.
exp_id: str. The exploration id to which the classifier belongs.
exp_version_when_created: str. The version of the exploration when
this classification model was created.
state_name: str. The name of the state to which the classifier
belongs.
algorithm_id: str. The id of the algorithm used for generating
classifier.
cached_classifier_data: dict. The actual classifier model used for
classification purpose.
data_schema_version: int. Schema version of the
data used by the classifier.
"""
self.id = classifier_id
self.exp_id = exp_id
self.exp_version_when_created = exp_version_when_created
self.state_name = state_name
self.algorithm_id = algorithm_id
self.cached_classifier_data = copy.deepcopy(cached_classifier_data)
self.data_schema_version = data_schema_version
def to_dict(self):
"""Constructs a dict representation of Classifier domain object.
Returns:
A dict representation of Classifier domain object.
"""
return {
'classifier_id': self.id,
'exp_id': self.exp_id,
'exp_version_when_created': self.exp_version_when_created,
'state_name': self.state_name,
'algorithm_id': self.algorithm_id,
'cached_classifier_data': self.cached_classifier_data,
'data_schema_version': self.data_schema_version
}
| jestapinski/oppia | core/domain/classifier_domain.py | Python | apache-2.0 | 3,445 |
#!/usr/bin/env python
#
# kvmexit.py
#
# Display the exit_reason and its statistics of each vm exit
# for all vcpus of all virtual machines. For example:
# $./kvmexit.py
# PID TID KVM_EXIT_REASON COUNT
# 1273551 1273568 EXIT_REASON_MSR_WRITE 6
# 1274253 1274261 EXIT_REASON_EXTERNAL_INTERRUPT 1
# 1274253 1274261 EXIT_REASON_HLT 12
# ...
#
# Besides, we also allow users to specify one pid, tid(s), or one
# pid and its vcpu. See kvmexit_example.txt for more examples.
#
# @PID: each vitual machine's pid in the user space.
# @TID: the user space's thread of each vcpu of that virtual machine.
# @KVM_EXIT_REASON: the reason why the vm exits.
# @COUNT: the counts of the @KVM_EXIT_REASONS.
#
# REQUIRES: Linux 4.7+ (BPF_PROG_TYPE_TRACEPOINT support)
#
# Copyright (c) 2021 ByteDance Inc. All rights reserved.
#
# Author(s):
# Fei Li <[email protected]>
from __future__ import print_function
from time import sleep
from bcc import BPF
import argparse
import multiprocessing
import os
import subprocess
#
# Process Arguments
#
def valid_args_list(args):
args_list = args.split(",")
for arg in args_list:
try:
int(arg)
except:
raise argparse.ArgumentTypeError("must be valid integer")
return args_list
# arguments
examples = """examples:
./kvmexit # Display kvm_exit_reason and its statistics in real-time until Ctrl-C
./kvmexit 5 # Display in real-time after sleeping 5s
./kvmexit -p 3195281 # Collpase all tids for pid 3195281 with exit reasons sorted in descending order
./kvmexit -p 3195281 20 # Collpase all tids for pid 3195281 with exit reasons sorted in descending order, and display after sleeping 20s
./kvmexit -p 3195281 -v 0 # Display only vcpu0 for pid 3195281, descending sort by default
./kvmexit -p 3195281 -a # Display all tids for pid 3195281
./kvmexit -t 395490 # Display only for tid 395490 with exit reasons sorted in descending order
./kvmexit -t 395490 20 # Display only for tid 395490 with exit reasons sorted in descending order after sleeping 20s
./kvmexit -T '395490,395491' # Display for a union like {395490, 395491}
"""
parser = argparse.ArgumentParser(
description="Display kvm_exit_reason and its statistics at a timed interval",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("duration", nargs="?", default=99999999, type=int, help="show delta for next several seconds")
parser.add_argument("-p", "--pid", type=int, help="trace this PID only")
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("-t", "--tid", type=int, help="trace this TID only")
exgroup.add_argument("-T", "--tids", type=valid_args_list, help="trace a comma separated series of tids with no space in between")
exgroup.add_argument("-v", "--vcpu", type=int, help="trace this vcpu only")
exgroup.add_argument("-a", "--alltids", action="store_true", help="trace all tids for this pid")
args = parser.parse_args()
duration = int(args.duration)
#
# Setup BPF
#
# load BPF program
bpf_text = """
#include <linux/delay.h>
#define REASON_NUM 69
#define TGID_NUM 1024
struct exit_count {
u64 exit_ct[REASON_NUM];
};
BPF_PERCPU_ARRAY(init_value, struct exit_count, 1);
BPF_TABLE("percpu_hash", u64, struct exit_count, pcpu_kvm_stat, TGID_NUM);
struct cache_info {
u64 cache_pid_tgid;
struct exit_count cache_exit_ct;
};
BPF_PERCPU_ARRAY(pcpu_cache, struct cache_info, 1);
FUNC_ENTRY {
int cache_miss = 0;
int zero = 0;
u32 er = GET_ER;
if (er >= REASON_NUM) {
return 0;
}
u64 cur_pid_tgid = bpf_get_current_pid_tgid();
u32 tgid = cur_pid_tgid >> 32;
u32 pid = cur_pid_tgid;
if (THREAD_FILTER)
return 0;
struct exit_count *tmp_info = NULL, *initial = NULL;
struct cache_info *cache_p;
cache_p = pcpu_cache.lookup(&zero);
if (cache_p == NULL) {
return 0;
}
if (cache_p->cache_pid_tgid == cur_pid_tgid) {
//a. If the cur_pid_tgid hit this physical cpu consecutively, save it to pcpu_cache
tmp_info = &cache_p->cache_exit_ct;
} else {
//b. If another pid_tgid matches this pcpu for the last hit, OR it is the first time to hit this physical cpu.
cache_miss = 1;
// b.a Try to load the last cache struct if exists.
tmp_info = pcpu_kvm_stat.lookup(&cur_pid_tgid);
// b.b If it is the first time for the cur_pid_tgid to hit this pcpu, employ a
// per_cpu array to initialize pcpu_kvm_stat's exit_count with each exit reason's count is zero
if (tmp_info == NULL) {
initial = init_value.lookup(&zero);
if (initial == NULL) {
return 0;
}
pcpu_kvm_stat.update(&cur_pid_tgid, initial);
tmp_info = pcpu_kvm_stat.lookup(&cur_pid_tgid);
// To pass the verifier
if (tmp_info == NULL) {
return 0;
}
}
}
if (er < REASON_NUM) {
tmp_info->exit_ct[er]++;
if (cache_miss == 1) {
if (cache_p->cache_pid_tgid != 0) {
// b.*.a Let's save the last hit cache_info into kvm_stat.
pcpu_kvm_stat.update(&cache_p->cache_pid_tgid, &cache_p->cache_exit_ct);
}
// b.* As the cur_pid_tgid meets current pcpu_cache_array for the first time, save it.
cache_p->cache_pid_tgid = cur_pid_tgid;
bpf_probe_read(&cache_p->cache_exit_ct, sizeof(*tmp_info), tmp_info);
}
return 0;
}
return 0;
}
"""
# format output
exit_reasons = (
"EXCEPTION_NMI",
"EXTERNAL_INTERRUPT",
"TRIPLE_FAULT",
"INIT_SIGNAL",
"N/A",
"N/A",
"N/A",
"INTERRUPT_WINDOW",
"NMI_WINDOW",
"TASK_SWITCH",
"CPUID",
"N/A",
"HLT",
"INVD",
"INVLPG",
"RDPMC",
"RDTSC",
"N/A",
"VMCALL",
"VMCLEAR",
"VMLAUNCH",
"VMPTRLD",
"VMPTRST",
"VMREAD",
"VMRESUME",
"VMWRITE",
"VMOFF",
"VMON",
"CR_ACCESS",
"DR_ACCESS",
"IO_INSTRUCTION",
"MSR_READ",
"MSR_WRITE",
"INVALID_STATE",
"MSR_LOAD_FAIL",
"N/A",
"MWAIT_INSTRUCTION",
"MONITOR_TRAP_FLAG",
"N/A",
"MONITOR_INSTRUCTION",
"PAUSE_INSTRUCTION",
"MCE_DURING_VMENTRY",
"N/A",
"TPR_BELOW_THRESHOLD",
"APIC_ACCESS",
"EOI_INDUCED",
"GDTR_IDTR",
"LDTR_TR",
"EPT_VIOLATION",
"EPT_MISCONFIG",
"INVEPT",
"RDTSCP",
"PREEMPTION_TIMER",
"INVVPID",
"WBINVD",
"XSETBV",
"APIC_WRITE",
"RDRAND",
"INVPCID",
"VMFUNC",
"ENCLS",
"RDSEED",
"PML_FULL",
"XSAVES",
"XRSTORS",
"N/A",
"N/A",
"UMWAIT",
"TPAUSE"
)
#
# Do some checks
#
try:
# Currently, only adapte on intel architecture
cmd = "cat /proc/cpuinfo | grep vendor_id | head -n 1"
arch_info = subprocess.check_output(cmd, shell=True).strip()
if b"Intel" in arch_info:
pass
else:
raise Exception("Currently we only support Intel architecture, please do expansion if needs more.")
# Check if kvm module is loaded
if os.access("/dev/kvm", os.R_OK | os.W_OK):
pass
else:
raise Exception("Please insmod kvm module to use kvmexit tool.")
except Exception as e:
raise Exception("Failed to do precondition check, due to: %s." % e)
try:
if BPF.support_raw_tracepoint_in_module():
# Let's firstly try raw_tracepoint_in_module
func_entry = "RAW_TRACEPOINT_PROBE(kvm_exit)"
get_er = "ctx->args[0]"
else:
# If raw_tp_in_module is not supported, fall back to regular tp
func_entry = "TRACEPOINT_PROBE(kvm, kvm_exit)"
get_er = "args->exit_reason"
except Exception as e:
raise Exception("Failed to catch kvm exit reasons due to: %s" % e)
def find_tid(tgt_dir, tgt_vcpu):
for tid in os.listdir(tgt_dir):
path = tgt_dir + "/" + tid + "/comm"
fp = open(path, "r")
comm = fp.read()
if (comm.find(tgt_vcpu) != -1):
return tid
return -1
# set process/thread filter
thread_context = ""
header_format = ""
need_collapse = not args.alltids
if args.tid is not None:
thread_context = "TID %s" % args.tid
thread_filter = 'pid != %s' % args.tid
elif args.tids is not None:
thread_context = "TIDS %s" % args.tids
thread_filter = "pid != " + " && pid != ".join(args.tids)
header_format = "TIDS "
elif args.pid is not None:
thread_context = "PID %s" % args.pid
thread_filter = 'tgid != %s' % args.pid
if args.vcpu is not None:
thread_context = "PID %s VCPU %s" % (args.pid, args.vcpu)
# transfer vcpu to tid
tgt_dir = '/proc/' + str(args.pid) + '/task'
tgt_vcpu = "CPU " + str(args.vcpu)
args.tid = find_tid(tgt_dir, tgt_vcpu)
if args.tid == -1:
raise Exception("There's no v%s for PID %d." % (tgt_vcpu, args.pid))
thread_filter = 'pid != %s' % args.tid
elif args.alltids:
thread_context = "PID %s and its all threads" % args.pid
header_format = "TID "
else:
thread_context = "all threads"
thread_filter = '0'
header_format = "PID TID "
bpf_text = bpf_text.replace('THREAD_FILTER', thread_filter)
# For kernel >= 5.0, use RAW_TRACEPOINT_MODULE for performance consideration
bpf_text = bpf_text.replace('FUNC_ENTRY', func_entry)
bpf_text = bpf_text.replace('GET_ER', get_er)
b = BPF(text=bpf_text)
# header
print("Display kvm exit reasons and statistics for %s" % thread_context, end="")
if duration < 99999999:
print(" after sleeping %d secs." % duration)
else:
print("... Hit Ctrl-C to end.")
try:
sleep(duration)
except KeyboardInterrupt:
print()
# Currently, sort multiple tids in descending order is not supported.
if (args.pid or args.tid):
ct_reason = []
if args.pid:
tgid_exit = [0 for i in range(len(exit_reasons))]
# output
print("%s%-35s %s" % (header_format, "KVM_EXIT_REASON", "COUNT"))
pcpu_kvm_stat = b["pcpu_kvm_stat"]
pcpu_cache = b["pcpu_cache"]
for k, v in pcpu_kvm_stat.items():
tgid = k.value >> 32
pid = k.value & 0xffffffff
for i in range(0, len(exit_reasons)):
sum1 = 0
for inner_cpu in range(0, multiprocessing.cpu_count()):
cachePIDTGID = pcpu_cache[0][inner_cpu].cache_pid_tgid
# Take priority to check if it is in cache
if cachePIDTGID == k.value:
sum1 += pcpu_cache[0][inner_cpu].cache_exit_ct.exit_ct[i]
# If not in cache, find from kvm_stat
else:
sum1 += v[inner_cpu].exit_ct[i]
if sum1 == 0:
continue
if (args.pid and args.pid == tgid and need_collapse):
tgid_exit[i] += sum1
elif (args.tid and args.tid == pid):
ct_reason.append((sum1, i))
elif not need_collapse or args.tids:
print("%-8u %-35s %-8u" % (pid, exit_reasons[i], sum1))
else:
print("%-8u %-8u %-35s %-8u" % (tgid, pid, exit_reasons[i], sum1))
# Display only for the target tid in descending sort
if (args.tid and args.tid == pid):
ct_reason.sort(reverse=True)
for i in range(0, len(ct_reason)):
if ct_reason[i][0] == 0:
continue
print("%-35s %-8u" % (exit_reasons[ct_reason[i][1]], ct_reason[i][0]))
break
# Aggregate all tids' counts for this args.pid in descending sort
if args.pid and need_collapse:
for i in range(0, len(exit_reasons)):
ct_reason.append((tgid_exit[i], i))
ct_reason.sort(reverse=True)
for i in range(0, len(ct_reason)):
if ct_reason[i][0] == 0:
continue
print("%-35s %-8u" % (exit_reasons[ct_reason[i][1]], ct_reason[i][0]))
| brendangregg/bcc | tools/kvmexit.py | Python | apache-2.0 | 12,070 |
#!/usr/bin/python
import argparse
import sys
import os
import subprocess
import signal
import getpass
import simplejson
from termcolor import colored
import ConfigParser
import StringIO
import functools
import time
import random
import string
from configobj import ConfigObj
import tempfile
import pwd, grp
import traceback
import uuid
import yaml
import re
from zstacklib import *
import jinja2
import socket
import struct
import fcntl
import commands
import threading
import itertools
import platform
from datetime import datetime, timedelta
import multiprocessing
mysql_db_config_script='''
echo "modify my.cnf"
if [ -f /etc/mysql/mariadb.conf.d/50-server.cnf ]; then
#ubuntu 16.04
mysql_conf=/etc/mysql/mariadb.conf.d/50-server.cnf
elif [ -f /etc/mysql/my.cnf ]; then
# Ubuntu 14.04
mysql_conf=/etc/mysql/my.cnf
elif [ -f /etc/my.cnf ]; then
# centos
mysql_conf=/etc/my.cnf
fi
sed -i 's/^bind-address/#bind-address/' $mysql_conf
sed -i 's/^skip-networking/#skip-networking/' $mysql_conf
sed -i 's/^bind-address/#bind-address/' $mysql_conf
grep 'binlog_format=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "binlog_format=mixed"
sed -i '/\[mysqld\]/a binlog_format=mixed\' $mysql_conf
fi
grep 'log_bin_trust_function_creators=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "log_bin_trust_function_creators=1"
sed -i '/\[mysqld\]/a log_bin_trust_function_creators=1\' $mysql_conf
fi
grep 'expire_logs=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "expire_logs=30"
sed -i '/\[mysqld\]/a expire_logs=30\' $mysql_conf
fi
grep 'max_binlog_size=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "max_binlog_size=500m"
sed -i '/\[mysqld\]/a max_binlog_size=500m\' $mysql_conf
fi
grep 'log-bin=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "log-bin=mysql-binlog"
sed -i '/\[mysqld\]/a log-bin=mysql-binlog\' $mysql_conf
fi
grep 'max_connections' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "max_connections=1024"
sed -i '/\[mysqld\]/a max_connections=1024\' $mysql_conf
else
echo "max_connections=1024"
sed -i 's/max_connections.*/max_connections=1024/g' $mysql_conf
fi
grep '^character-set-server' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "binlog_format=mixed"
sed -i '/\[mysqld\]/a character-set-server=utf8\' $mysql_conf
fi
grep '^skip-name-resolve' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
sed -i '/\[mysqld\]/a skip-name-resolve\' $mysql_conf
fi
grep 'tmpdir' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
mysql_tmp_path="/var/lib/mysql/tmp"
if [ ! -x "$mysql_tmp_path" ]; then
mkdir "$mysql_tmp_path"
chown mysql:mysql "$mysql_tmp_path"
chmod 1777 "$mysql_tmp_path"
fi
echo "tmpdir=/var/lib/mysql/tmp"
sed -i '/\[mysqld\]/a tmpdir=/var/lib/mysql/tmp\' $mysql_conf
fi
'''
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def loop_until_timeout(timeout, interval=1):
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
current_time = time.time()
expired = current_time + timeout
while current_time < expired:
if f(*args, **kwargs):
return True
time.sleep(interval)
current_time = time.time()
return False
return inner
return wrap
def find_process_by_cmdline(cmdlines):
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
with open(os.path.join('/proc', pid, 'cmdline'), 'r') as fd:
cmdline = fd.read()
is_find = True
for c in cmdlines:
if c not in cmdline:
is_find = False
break
if not is_find:
continue
return pid
except IOError:
continue
return None
def ssh_run_full(ip, cmd, params=[], pipe=True):
remote_path = '/tmp/%s.sh' % uuid.uuid4()
script = '''/bin/bash << EOF
cat << EOF1 > %s
%s
EOF1
/bin/bash %s %s
ret=$?
rm -f %s
exit $ret
EOF''' % (remote_path, cmd, remote_path, ' '.join(params), remote_path)
scmd = ShellCmd('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "%s"' % (ip, script), pipe=pipe)
scmd(False)
return scmd
def ssh_run(ip, cmd, params=[]):
scmd = ssh_run_full(ip, cmd, params)
if scmd.return_code != 0:
scmd.raise_error()
return scmd.stdout
def ssh_run_no_pipe(ip, cmd, params=[]):
scmd = ssh_run_full(ip, cmd, params, False)
if scmd.return_code != 0:
scmd.raise_error()
return scmd.stdout
class CtlError(Exception):
pass
def warn(msg):
sys.stdout.write(colored('WARNING: %s\n' % msg, 'yellow'))
def error(msg):
sys.stderr.write(colored('ERROR: %s\n' % msg, 'red'))
sys.exit(1)
def error_not_exit(msg):
sys.stderr.write(colored('ERROR: %s\n' % msg, 'red'))
def info(*msg):
if len(msg) == 1:
out = '%s\n' % ''.join(msg)
else:
out = ''.join(msg)
sys.stdout.write(out)
def get_detail_version():
detailed_version_file = os.path.join(ctl.zstack_home, "VERSION")
if os.path.exists(detailed_version_file):
with open(detailed_version_file, 'r') as fd:
detailed_version = fd.read()
return detailed_version
else:
return None
def check_ip_port(host, port):
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host, int(port)))
return result == 0
def compare_version(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version2), normalize(version1))
def get_zstack_version(db_hostname, db_port, db_user, db_password):
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select version from schema_version order by version desc"
ret = query.query()
versions = [r['version'] for r in ret]
versions.sort(cmp=compare_version)
version = versions[0]
return version
def get_default_gateway_ip():
'''This function will return default route gateway ip address'''
with open("/proc/net/route") as gateway:
try:
for item in gateway:
fields = item.strip().split()
if fields[1] != '00000000' or not int(fields[3], 16) & 2:
continue
if fields[7] == '00000000':
return socket.inet_ntoa(struct.pack("=L", int(fields[2], 16)))
except ValueError:
return None
def get_default_ip():
cmd = ShellCmd("""dev=`ip route|grep default|head -n 1|awk -F "dev" '{print $2}' | awk -F " " '{print $1}'`; ip addr show $dev |grep "inet "|awk '{print $2}'|head -n 1 |awk -F '/' '{print $1}'""")
cmd(False)
return cmd.stdout.strip()
def get_yum_repo_from_property():
yum_repo = ctl.read_property('Ansible.var.zstack_repo')
if not yum_repo:
return yum_repo
# avoid http server didn't start when install package
if 'zstack-mn' in yum_repo:
yum_repo = yum_repo.replace("zstack-mn","zstack-local")
if 'qemu-kvm-ev-mn' in yum_repo:
yum_repo = yum_repo.replace("qemu-kvm-ev-mn","qemu-kvm-ev")
return yum_repo
def get_host_list(table_name):
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select * from %s" % table_name
host_vo = query.query()
return host_vo
def get_vrouter_list():
ip_list = []
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select ip from VmNicVO where deviceId = 0 and vmInstanceUuid in (select uuid from VirtualRouterVmVO)"
vrouter_ip_list = query.query()
for ip in vrouter_ip_list:
ip_list.append(ip['ip'])
return ip_list
def get_ha_mn_list(conf_file):
with open(conf_file, 'r') as fd:
ha_conf_content = yaml.load(fd.read())
mn_list = ha_conf_content['host_list'].split(',')
return mn_list
def stop_mevoco(host_post_info):
command = "zstack-ctl stop_node && zstack-ctl stop_ui"
logger.debug("[ HOST: %s ] INFO: starting run shell command: '%s' " % (host_post_info.host, command))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(host_post_info.private_key, host_post_info.host, command))
if status != 0:
logger.error("[ HOST: %s ] INFO: shell command: '%s' failed" % (host_post_info.host, command))
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
else:
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
def start_mevoco(host_post_info):
command = "zstack-ctl start_node && zstack-ctl start_ui"
logger.debug("[ HOST: %s ] INFO: starting run shell command: '%s' " % (host_post_info.host, command))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(host_post_info.private_key, host_post_info.host, command))
if status != 0:
logger.error("[ HOST: %s ] FAIL: shell command: '%s' failed" % (host_post_info.host, command))
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
else:
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
class ExceptionWrapper(object):
def __init__(self, msg):
self.msg = msg
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if globals().get('verbose', False) and exc_type and exc_val and exc_tb:
error_not_exit(''.join(traceback.format_exception(exc_type, exc_val, exc_tb)))
if exc_type == CtlError:
return
if exc_val:
error('%s\n%s' % (str(exc_val), self.msg))
def on_error(msg):
return ExceptionWrapper(msg)
def error_if_tool_is_missing(tool):
if shell_return('which %s' % tool) != 0:
raise CtlError('cannot find tool "%s", please install it and re-run' % tool)
def expand_path(path):
if path.startswith('~'):
return os.path.expanduser(path)
else:
return os.path.abspath(path)
def check_host_info_format(host_info):
'''check install ha and install multi mn node info format'''
if '@' not in host_info:
error("Host connect information should follow format: 'root:password@host_ip', please check your input!")
else:
# get user and password
if ':' not in host_info.split('@')[0]:
error("Host connect information should follow format: 'root:password@host_ip', please check your input!")
else:
user = host_info.split('@')[0].split(':')[0]
password = host_info.split('@')[0].split(':')[1]
if user != "" and user != "root":
print "Only root user can be supported, please change user to root"
if user == "":
user = "root"
# get ip and port
if ':' not in host_info.split('@')[1]:
ip = host_info.split('@')[1]
port = '22'
else:
ip = host_info.split('@')[1].split(':')[0]
port = host_info.split('@')[1].split(':')[1]
return (user, password, ip, port)
def check_host_password(password, ip):
command ='timeout 10 sshpass -p %s ssh -q -o UserKnownHostsFile=/dev/null -o PubkeyAuthentication=no -o ' \
'StrictHostKeyChecking=no root@%s echo ""' % (password, ip)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("Connect to host: '%s' with password '%s' failed! Please check password firstly and make sure you have "
"disabled UseDNS in '/etc/ssh/sshd_config' on %s" % (ip, password, ip))
def get_ip_by_interface(device_name):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915,
struct.pack('256s', device_name[:15])
)[20:24])
def start_remote_mn( host_post_info):
command = "zstack-ctl start_node && zstack-ctl start_ui"
(status, output) = commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(UpgradeHACmd.private_key_name, host_post_info.host, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
class SpinnerInfo(object):
spinner_status = {}
def __init__(self):
self.output = ""
self.name = ""
class ZstackSpinner(object):
def __init__(self, spinner_info):
self.output = spinner_info.output
self.name = spinner_info.name
self.spinner = itertools.cycle("|/~\\")
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
def run(self):
time.sleep(.2)
while SpinnerInfo.spinner_status[self.name]:
sys.stdout.write("\r %s: ... %s " % (self.output, next(self.spinner)))
sys.stdout.flush()
time.sleep(.1)
print "\r %s: ... %s" % (self.output, colored("PASS","green"))
class Ansible(object):
def __init__(self, yaml, host='localhost', debug=False, ssh_key='none'):
self.yaml = yaml
self.host = host
self.debug = debug
self.ssh_key = ssh_key
def __call__(self, *args, **kwargs):
error_if_tool_is_missing('ansible-playbook')
cmd = '''
yaml_file=`mktemp`
cat <<EOF >> $$yaml_file
$yaml
EOF
ansible_cmd="ansible-playbook $$yaml_file -i '$host,'"
if [ $debug -eq 1 ]; then
ansible_cmd="$$ansible_cmd -vvvv"
fi
if [ "$ssh_key" != "none" ]; then
ansible_cmd="$$ansible_cmd --private-key=$ssh_key"
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i $ssh_key $host 'echo hi > /dev/null'
else
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no $host 'echo hi > /dev/null'
fi
if [ $$? -ne 0 ]; then
ansible_cmd="$$ansible_cmd --ask-pass"
fi
eval $$ansible_cmd
ret=$$?
rm -f $$yaml_file
exit $$ret
'''
t = string.Template(cmd)
cmd = t.substitute({
'yaml': self.yaml,
'host': self.host,
'debug': int(self.debug),
'ssh_key': self.ssh_key
})
with on_error('Ansible failure'):
try:
shell_no_pipe(cmd)
except CtlError:
raise Exception('see prior Ansible log for detailed information')
def ansible(yaml, host='localhost', debug=False, ssh_key=None):
Ansible(yaml, host, debug, ssh_key or 'none')()
def reset_dict_value(dict_name, value):
return dict.fromkeys(dict_name, value)
def check_zstack_user():
try:
pwd.getpwnam('zstack')
except KeyError:
raise CtlError('cannot find user account "zstack", your installation seems incomplete')
try:
grp.getgrnam('zstack')
except KeyError:
raise CtlError('cannot find user account "zstack", your installation seems incomplete')
class UseUserZstack(object):
def __init__(self):
self.root_uid = None
self.root_gid = None
check_zstack_user()
def __enter__(self):
self.root_uid = os.getuid()
self.root_gid = os.getgid()
self.root_home = os.environ['HOME']
os.setegid(grp.getgrnam('zstack').gr_gid)
os.seteuid(pwd.getpwnam('zstack').pw_uid)
os.environ['HOME'] = os.path.expanduser('~zstack')
def __exit__(self, exc_type, exc_val, exc_tb):
os.seteuid(self.root_uid)
os.setegid(self.root_gid)
os.environ['HOME'] = self.root_home
def use_user_zstack():
return UseUserZstack()
class PropertyFile(object):
def __init__(self, path, use_zstack=True):
self.path = path
self.use_zstack = use_zstack
if not os.path.isfile(self.path):
raise CtlError('cannot find property file at %s' % self.path)
with on_error("errors on reading %s" % self.path):
self.config = ConfigObj(self.path, write_empty_values=True)
def read_all_properties(self):
with on_error("errors on reading %s" % self.path):
return self.config.items()
def delete_properties(self, keys):
for k in keys:
if k in self.config:
del self.config[k]
with use_user_zstack():
self.config.write()
def read_property(self, key):
with on_error("errors on reading %s" % self.path):
return self.config.get(key, None)
def write_property(self, key, value):
with on_error("errors on writing (%s=%s) to %s" % (key, value, self.path)):
if self.use_zstack:
with use_user_zstack():
self.config[key] = value
self.config.write()
else:
self.config[key] = value
self.config.write()
def write_properties(self, lst):
with on_error("errors on writing list of key-value%s to %s" % (lst, self.path)):
if self.use_zstack:
with use_user_zstack():
for key, value in lst:
self.config[key] = value
self.config.write()
else:
for key, value in lst:
self.config[key] = value
self.config.write()
class CtlParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error:%s\n' % message)
self.print_help()
sys.exit(1)
class Ctl(object):
DEFAULT_ZSTACK_HOME = '/usr/local/zstack/apache-tomcat/webapps/zstack/'
USER_ZSTACK_HOME_DIR = os.path.expanduser('~zstack')
LAST_ALIVE_MYSQL_IP = "MYSQL_LATEST_IP"
LAST_ALIVE_MYSQL_PORT = "MYSQL_LATEST_PORT"
LOGGER_DIR = "/var/log/zstack/"
LOGGER_FILE = "zstack-ctl.log"
def __init__(self):
self.commands = {}
self.command_list = []
self.main_parser = CtlParser(prog='zstackctl', description="ZStack management tool", formatter_class=argparse.RawTextHelpFormatter)
self.main_parser.add_argument('-v', help="verbose, print execution details", dest="verbose", action="store_true", default=False)
self.zstack_home = None
self.properties_file_path = None
self.verbose = False
self.extra_arguments = None
def register_command(self, cmd):
assert cmd.name, "command name cannot be None"
assert cmd.description, "command description cannot be None"
self.commands[cmd.name] = cmd
self.command_list.append(cmd)
def _locate_zstack_home(self):
env_path = os.path.expanduser(SetEnvironmentVariableCmd.PATH)
if os.path.isfile(env_path):
env = PropertyFile(env_path)
self.zstack_home = env.read_property('ZSTACK_HOME')
if not self.zstack_home:
self.zstack_home = os.environ.get('ZSTACK_HOME', None)
if not self.zstack_home:
warn('ZSTACK_HOME is not set, default to %s' % self.DEFAULT_ZSTACK_HOME)
self.zstack_home = self.DEFAULT_ZSTACK_HOME
if not os.path.isdir(self.zstack_home):
raise CtlError('cannot find ZSTACK_HOME at %s, please set it in .bashrc or use zstack-ctl setenv ZSTACK_HOME=path' % self.zstack_home)
os.environ['ZSTACK_HOME'] = self.zstack_home
self.properties_file_path = os.path.join(self.zstack_home, 'WEB-INF/classes/zstack.properties')
self.ssh_private_key = os.path.join(self.zstack_home, 'WEB-INF/classes/ansible/rsaKeys/id_rsa')
self.ssh_public_key = os.path.join(self.zstack_home, 'WEB-INF/classes/ansible/rsaKeys/id_rsa.pub')
if not os.path.isfile(self.properties_file_path):
warn('cannot find %s, your ZStack installation may have crashed' % self.properties_file_path)
def get_env(self, name):
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
return env.read_property(name)
def delete_env(self, name):
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
env.delete_properties([name])
def put_envs(self, vs):
if not os.path.exists(SetEnvironmentVariableCmd.PATH):
shell('su - zstack -c "mkdir -p %s"' % os.path.dirname(SetEnvironmentVariableCmd.PATH))
shell('su - zstack -c "touch %s"' % SetEnvironmentVariableCmd.PATH)
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
env.write_properties(vs)
def run(self):
create_log(Ctl.LOGGER_DIR, Ctl.LOGGER_FILE)
if os.getuid() != 0:
raise CtlError('zstack-ctl needs root privilege, please run with sudo')
metavar_list = []
for n,cmd in enumerate(self.command_list):
if cmd.hide is False:
metavar_list.append(cmd.name)
else:
self.command_list[n].description = None
metavar_string = '{' + ','.join(metavar_list) + '}'
subparsers = self.main_parser.add_subparsers(help="All sub-commands", dest="sub_command_name", metavar=metavar_string)
for cmd in self.command_list:
if cmd.description is not None:
cmd.install_argparse_arguments(subparsers.add_parser(cmd.name, help=cmd.description + '\n\n'))
else:
cmd.install_argparse_arguments(subparsers.add_parser(cmd.name))
args, self.extra_arguments = self.main_parser.parse_known_args(sys.argv[1:])
self.verbose = args.verbose
globals()['verbose'] = self.verbose
cmd = self.commands[args.sub_command_name]
if cmd.need_zstack_home():
self._locate_zstack_home()
if cmd.need_zstack_user():
check_zstack_user()
cmd(args)
def internal_run(self, cmd_name, args=''):
cmd = self.commands[cmd_name]
assert cmd, 'cannot find command %s' % cmd_name
params = [cmd_name]
params.extend(args.split())
args_obj, _ = self.main_parser.parse_known_args(params)
if cmd.need_zstack_home():
self._locate_zstack_home()
if cmd.need_zstack_user():
check_zstack_user()
cmd(args_obj)
def read_property_list(self, key):
prop = PropertyFile(self.properties_file_path)
ret = []
for name, value in prop.read_all_properties():
if name.startswith(key):
ret.append((name, value))
return ret
def read_all_properties(self):
prop = PropertyFile(self.properties_file_path)
return prop.read_all_properties()
def read_property(self, key):
prop = PropertyFile(self.properties_file_path)
val = prop.read_property(key)
# our code assume all values are strings
if isinstance(val, list):
return ','.join(val)
else:
return val
def write_properties(self, properties):
prop = PropertyFile(self.properties_file_path)
with on_error('property must be in format of "key=value", no space before and after "="'):
prop.write_properties(properties)
def write_property(self, key, value):
prop = PropertyFile(self.properties_file_path)
with on_error('property must be in format of "key=value", no space before and after "="'):
prop.write_property(key, value)
def get_db_url(self):
db_url = self.read_property("DB.url")
if not db_url:
db_url = self.read_property('DbFacadeDataSource.jdbcUrl')
if not db_url:
raise CtlError("cannot find DB url in %s. please set DB.url" % self.properties_file_path)
return db_url
def get_live_mysql_portal(self):
hostname_ports, user, password = self.get_database_portal()
last_ip = ctl.get_env(self.LAST_ALIVE_MYSQL_IP)
last_port = ctl.get_env(self.LAST_ALIVE_MYSQL_PORT)
if last_ip and last_port and (last_ip, last_port) in hostname_ports:
first = (last_ip, last_port)
lst = [first]
for it in hostname_ports:
if it != first:
lst.append(it)
hostname_ports = lst
errors = []
for hostname, port in hostname_ports:
if password:
sql = 'mysql --host=%s --port=%s --user=%s --password=%s -e "select 1"' % (hostname, port, user, password)
else:
sql = 'mysql --host=%s --port=%s --user=%s -e "select 1"' % (hostname, port, user)
cmd = ShellCmd(sql)
cmd(False)
if cmd.return_code == 0:
# record the IP and port, so next time we will try them first
ctl.put_envs([
(self.LAST_ALIVE_MYSQL_IP, hostname),
(self.LAST_ALIVE_MYSQL_PORT, port)
])
return hostname, port, user, password
errors.append('failed to connect to the mysql server[hostname:%s, port:%s, user:%s, password:%s]: %s %s' % (
hostname, port, user, password, cmd.stderr, cmd.stdout
))
raise CtlError('\n'.join(errors))
def get_database_portal(self):
db_user = self.read_property("DB.user")
if not db_user:
db_user = self.read_property('DbFacadeDataSource.user')
if not db_user:
raise CtlError("cannot find DB user in %s. please set DB.user" % self.properties_file_path)
db_password = self.read_property("DB.password")
if db_password is None:
db_password = self.read_property('DbFacadeDataSource.password')
if db_password is None:
raise CtlError("cannot find DB password in %s. please set DB.password" % self.properties_file_path)
db_url = self.get_db_url()
host_name_ports = []
def parse_hostname_ports(prefix):
ips = db_url.lstrip(prefix).lstrip('/').split('/')[0]
ips = ips.split(',')
for ip in ips:
if ":" in ip:
hostname, port = ip.split(':')
host_name_ports.append((hostname, port))
else:
host_name_ports.append((ip, '3306'))
if db_url.startswith('jdbc:mysql:loadbalance:'):
parse_hostname_ports('jdbc:mysql:loadbalance:')
elif db_url.startswith('jdbc:mysql:'):
parse_hostname_ports('jdbc:mysql:')
return host_name_ports, db_user, db_password
def check_if_management_node_has_stopped(self, force=False):
db_hostname, db_port, db_user, db_password = self.get_live_mysql_portal()
def get_nodes():
query = MySqlCommandLineQuery()
query.user = db_user
query.password = db_password
query.host = db_hostname
query.port = db_port
query.table = 'zstack'
query.sql = 'select hostname,heartBeat from ManagementNodeVO'
return query.query()
def check():
nodes = get_nodes()
if nodes:
node_ips = [n['hostname'] for n in nodes]
raise CtlError('there are some management nodes%s are still running. Please stop all of them before performing the database upgrade.'
'If you are sure they have stopped, use option --force and run this command again.\n'
'If you are upgrade by all in on installer, use option -F and run all in one installer again.\n'
'WARNING: the database may crash if you run this command with --force but without stopping management nodes' % node_ips)
def bypass_check():
nodes = get_nodes()
if nodes:
node_ips = [n['hostname'] for n in nodes]
info("it seems some nodes%s are still running. As you have specified option --force, let's wait for 10s to make sure those are stale records. Please be patient." % node_ips)
time.sleep(10)
new_nodes = get_nodes()
for n in new_nodes:
for o in nodes:
if o['hostname'] == n['hostname'] and o['heartBeat'] != n['heartBeat']:
raise CtlError("node[%s] is still Running! Its heart-beat changed from %s to %s in last 10s. Please make sure you really stop it" %
(n['hostname'], o['heartBeat'], n['heartBeat']))
if force:
bypass_check()
else:
check()
ctl = Ctl()
def script(cmd, args=None, no_pipe=False):
if args:
t = string.Template(cmd)
cmd = t.substitute(args)
fd, script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(cmd)
try:
if ctl.verbose:
info('execute script:\n%s\n' % cmd)
if no_pipe:
shell_no_pipe('bash %s' % script_path)
else:
shell('bash %s' % script_path)
finally:
os.remove(script_path)
class ShellCmd(object):
def __init__(self, cmd, workdir=None, pipe=True):
self.cmd = cmd
if pipe:
self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=workdir)
else:
self.process = subprocess.Popen(cmd, shell=True, cwd=workdir)
self.return_code = None
self.stdout = None
self.stderr = None
def raise_error(self):
err = []
err.append('failed to execute shell command: %s' % self.cmd)
err.append('return code: %s' % self.process.returncode)
err.append('stdout: %s' % self.stdout)
err.append('stderr: %s' % self.stderr)
raise CtlError('\n'.join(err))
def __call__(self, is_exception=True):
if ctl.verbose:
info('executing shell command[%s]:' % self.cmd)
(self.stdout, self.stderr) = self.process.communicate()
if is_exception and self.process.returncode != 0:
self.raise_error()
self.return_code = self.process.returncode
if ctl.verbose:
info(simplejson.dumps({
"shell" : self.cmd,
"return_code" : self.return_code,
"stdout": self.stdout,
"stderr": self.stderr
}, ensure_ascii=True, sort_keys=True, indent=4))
return self.stdout
def shell(cmd, is_exception=True):
return ShellCmd(cmd)(is_exception)
def shell_no_pipe(cmd, is_exception=True):
return ShellCmd(cmd, pipe=False)(is_exception)
def shell_return(cmd):
scmd = ShellCmd(cmd)
scmd(False)
return scmd.return_code
class Command(object):
def __init__(self):
self.name = None
self.description = None
self.hide = False
self.cleanup_routines = []
self.quiet = False
def install_argparse_arguments(self, parser):
pass
def install_cleanup_routine(self, func):
self.cleanup_routines.append(func)
def need_zstack_home(self):
return True
def need_zstack_user(self):
return True
def __call__(self, *args, **kwargs):
try:
self.run(*args)
if not self.quiet:
logger.info('Start running command [ zstack-ctl %s ]' % ' '.join(sys.argv[1:]))
finally:
for c in self.cleanup_routines:
c()
def run(self, args):
raise CtlError('the command is not implemented')
def create_check_mgmt_node_command(timeout=10, mn_node='127.0.0.1'):
USE_CURL = 0
USE_WGET = 1
NO_TOOL = 2
def use_tool():
cmd = ShellCmd('which wget')
cmd(False)
if cmd.return_code == 0:
return USE_WGET
else:
cmd = ShellCmd('which curl')
cmd(False)
if cmd.return_code == 0:
return USE_CURL
else:
return NO_TOOL
what_tool = use_tool()
if what_tool == USE_CURL:
return ShellCmd('''curl --noproxy --connect-timeout 1 --retry %s --retry-delay 0 --retry-max-time %s --max-time %s -H "Content-Type: application/json" -d '{"org.zstack.header.apimediator.APIIsReadyToGoMsg": {}}' http://%s:8080/zstack/api''' % (timeout, timeout, timeout, mn_node))
elif what_tool == USE_WGET:
return ShellCmd('''wget --no-proxy -O- --tries=%s --timeout=1 --header=Content-Type:application/json --post-data='{"org.zstack.header.apimediator.APIIsReadyToGoMsg": {}}' http://%s:8080/zstack/api''' % (timeout, mn_node))
else:
return None
def find_process_by_cmdline(keyword):
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
with open(os.path.join('/proc', pid, 'cmdline'), 'r') as fd:
cmdline = fd.read()
if keyword not in cmdline:
continue
return pid
except IOError:
continue
return None
class MySqlCommandLineQuery(object):
def __init__(self):
self.user = None
self.password = None
self.host = 'localhost'
self.port = 3306
self.sql = None
self.table = None
def query(self):
assert self.user, 'user cannot be None'
assert self.sql, 'sql cannot be None'
assert self.table, 'table cannot be None'
sql = "%s\G" % self.sql
if self.password:
cmd = '''mysql -u %s -p%s --host %s --port %s -t %s -e "%s"''' % (self.user, self.password, self.host,
self.port, self.table, sql)
else:
cmd = '''mysql -u %s --host %s --port %s -t %s -e "%s"''' % (self.user, self.host, self.port, self.table, sql)
output = shell(cmd)
output = output.strip(' \t\n\r')
ret = []
if not output:
return ret
current = None
for l in output.split('\n'):
if current is None and not l.startswith('*********'):
raise CtlError('cannot parse mysql output generated by the sql "%s", output:\n%s' % (self.sql, output))
if l.startswith('*********'):
if current:
ret.append(current)
current = {}
else:
l = l.strip()
key, value = l.split(':', 1)
current[key.strip()] = value[1:]
if current:
ret.append(current)
return ret
class ShowStatusCmd(Command):
def __init__(self):
super(ShowStatusCmd, self).__init__()
self.name = 'status'
self.description = 'show ZStack status and information.'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, [email protected], to show the management node status on a remote machine')
parser.add_argument('--quiet', '-q', help='Do not log this action.', action='store_true', default=False)
def _stop_remote(self, args):
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl status"' % args.host)
def run(self, args):
self.quiet = args.quiet
if args.host:
self._stop_remote(args)
return
log_path = os.path.join(ctl.zstack_home, "../../logs/management-server.log")
log_path = os.path.normpath(log_path)
info_list = [
"ZSTACK_HOME: %s" % ctl.zstack_home,
"zstack.properties: %s" % ctl.properties_file_path,
"log4j2.xml: %s" % os.path.join(os.path.dirname(ctl.properties_file_path), 'log4j2.xml'),
"PID file: %s" % os.path.join(os.path.expanduser('~zstack'), "management-server.pid"),
"log file: %s" % log_path
]
def check_zstack_status():
cmd = create_check_mgmt_node_command()
def write_status(status):
info_list.append('MN status: %s' % status)
if not cmd:
write_status('cannot detect status, no wget and curl installed')
return
cmd(False)
pid = get_management_node_pid()
if cmd.return_code != 0:
if pid:
write_status('%s, the management node seems to become zombie as it stops responding APIs but the '
'process(PID: %s) is still running. Please stop the node using zstack-ctl stop_node' %
(colored('Unknown', 'yellow'), pid))
else:
write_status(colored('Stopped', 'red'))
return
if 'false' in cmd.stdout:
write_status('Starting, should be ready in a few seconds')
elif 'true' in cmd.stdout:
write_status(colored('Running', 'green') + ' [PID:%s]' % pid)
else:
write_status('Unknown')
def show_version():
try:
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
except:
info('version: %s' % colored('unknown, MySQL is not running', 'yellow'))
return
if db_password:
cmd = ShellCmd('''mysql -u %s -p%s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_password, db_hostname, db_port))
else:
cmd = ShellCmd('''mysql -u %s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_hostname, db_port))
cmd(False)
if cmd.return_code != 0:
info('version: %s' % colored('unknown, MySQL is not running', 'yellow'))
return
out = cmd.stdout
if 'schema_version' not in out:
version = '0.6'
else:
version = get_zstack_version(db_hostname, db_port, db_user, db_password)
detailed_version = get_detail_version()
if detailed_version is not None:
info('version: %s (%s)' % (version, detailed_version))
else:
info('version: %s' % version)
check_zstack_status()
info('\n'.join(info_list))
ctl.internal_run('ui_status', args='-q')
show_version()
class DeployDBCmd(Command):
DEPLOY_DB_SCRIPT_PATH = "WEB-INF/classes/deploydb.sh"
ZSTACK_PROPERTY_FILE = "WEB-INF/classes/zstack.properties"
def __init__(self):
super(DeployDBCmd, self).__init__()
self.name = "deploydb"
self.description = (
"deploy a new ZStack database, create a user 'zstack' with password specified in '--zstack-password',\n"
"and update zstack.properties if --no-update is not set.\n"
"\nDANGER: this will erase the existing ZStack database.\n"
"NOTE: If the database is running on a remote host, please make sure you have granted privileges to the root user by:\n"
"\n\tGRANT ALL PRIVILEGES ON *.* TO 'root'@'%%' IDENTIFIED BY 'your_root_password' WITH GRANT OPTION;\n"
"\tFLUSH PRIVILEGES;\n"
)
ctl.register_command(self)
def update_db_config(self):
update_db_config_script = mysql_db_config_script
fd, update_db_config_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(update_db_config_script)
info('update_db_config_script_path is: %s' % update_db_config_script_path)
ShellCmd('bash %s' % update_db_config_script_path)()
os.remove(update_db_config_script_path)
def install_argparse_arguments(self, parser):
parser.add_argument('--root-password', help='root user password of MySQL. [DEFAULT] empty password')
parser.add_argument('--zstack-password', help='password of user "zstack". [DEFAULT] empty password')
parser.add_argument('--host', help='IP or DNS name of MySQL host; default is localhost', default='localhost')
parser.add_argument('--port', help='port of MySQL host; default is 3306', type=int, default=3306)
parser.add_argument('--no-update', help='do NOT update database information to zstack.properties; if you do not know what this means, do not use it', action='store_true', default=False)
parser.add_argument('--drop', help='drop existing zstack database', action='store_true', default=False)
parser.add_argument('--keep-db', help='keep existing zstack database and not raise error.', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysql')
script_path = os.path.join(ctl.zstack_home, self.DEPLOY_DB_SCRIPT_PATH)
if not os.path.exists(script_path):
error('cannot find %s, your ZStack installation may have been corrupted, please reinstall it' % script_path)
property_file_path = os.path.join(ctl.zstack_home, self.ZSTACK_PROPERTY_FILE)
if not os.path.exists(property_file_path):
error('cannot find %s, your ZStack installation may have been corrupted, please reinstall it' % property_file_path)
if args.root_password:
check_existing_db = 'mysql --user=root --password=%s --host=%s --port=%s -e "use zstack"' % (args.root_password, args.host, args.port)
else:
check_existing_db = 'mysql --user=root --host=%s --port=%s -e "use zstack"' % (args.host, args.port)
self.update_db_config()
cmd = ShellCmd(check_existing_db)
cmd(False)
if not args.root_password:
args.root_password = "''"
if not args.zstack_password:
args.zstack_password = "''"
if cmd.return_code == 0 and not args.drop:
if args.keep_db:
info('detected existing zstack database and keep it; if you want to drop it, please append parameter --drop, instead of --keep-db\n')
else:
raise CtlError('detected existing zstack database; if you are sure to drop it, please append parameter --drop or use --keep-db to keep the database')
else:
cmd = ShellCmd('bash %s root %s %s %s %s' % (script_path, args.root_password, args.host, args.port, args.zstack_password))
cmd(False)
if cmd.return_code != 0:
if ('ERROR 1044' in cmd.stdout or 'ERROR 1044' in cmd.stderr) or ('Access denied' in cmd.stdout or 'Access denied' in cmd.stderr):
raise CtlError(
"failed to deploy database, access denied; if your root password is correct and you use IP rather than localhost,"
"it's probably caused by the privileges are not granted to root user for remote access; please see instructions in 'zstack-ctl -h'."
"error details: %s, %s\n" % (cmd.stdout, cmd.stderr)
)
else:
cmd.raise_error()
if not args.no_update:
if args.zstack_password == "''":
args.zstack_password = ''
properties = [
("DB.user", "zstack"),
("DB.password", args.zstack_password),
("DB.url", 'jdbc:mysql://%s:%s' % (args.host, args.port)),
]
ctl.write_properties(properties)
info('Successfully deployed ZStack database and updated corresponding DB information in %s' % property_file_path)
class TailLogCmd(Command):
def __init__(self):
super(TailLogCmd, self).__init__()
self.name = 'taillog'
self.description = "shortcut to print management node log to stdout"
ctl.register_command(self)
def run(self, args):
log_path = os.path.join(ctl.zstack_home, "../../logs/management-server.log")
log_path = os.path.normpath(log_path)
if not os.path.isfile(log_path):
raise CtlError('cannot find %s' % log_path)
script = ShellCmd('tail -f %s' % log_path, pipe=False)
script()
class ConfigureCmd(Command):
def __init__(self):
super(ConfigureCmd, self).__init__()
self.name = 'configure'
self.description = "configure zstack.properties"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, [email protected], to set properties in zstack.properties on the remote machine')
parser.add_argument('--duplicate-to-remote', help='SSH URL, for example, [email protected], to copy zstack.properties on this machine to the remote machine')
parser.add_argument('--use-file', help='path to a file that will be used to as zstack.properties')
def _configure_remote_node(self, args):
shell_no_pipe('ssh %s "/usr/bin/zstack-ctl configure %s"' % (args.host, ' '.join(ctl.extra_arguments)))
def _duplicate_remote_node(self, args):
tmp_file_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
tmp_file_name = os.path.join('/tmp/', tmp_file_name)
with open(ctl.properties_file_path, 'r') as fd:
txt = fd.read()
cmd = '''ssh -T %s << EOF
cat <<EOT > %s
%s
EOT
if [ $? != 0 ]; then
print "cannot create temporary properties file"
exit 1
fi
/usr/bin/zstack-ctl configure --use-file %s
ret=$?
rm -f %s
exit $ret
EOF
'''
shell_no_pipe(cmd % (args.duplicate_to_remote, tmp_file_name, txt, tmp_file_name, tmp_file_name))
info("successfully copied %s to remote machine %s" % (ctl.properties_file_path, args.duplicate_to_remote))
def _use_file(self, args):
path = os.path.expanduser(args.use_file)
if not os.path.isfile(path):
raise CtlError('cannot find file %s' % path)
shell('cp -f %s %s' % (path, ctl.properties_file_path))
def run(self, args):
if args.use_file:
self._use_file(args)
return
if args.duplicate_to_remote:
self._duplicate_remote_node(args)
return
if not ctl.extra_arguments:
raise CtlError('please input properties that are in format of "key=value" split by space')
if args.host:
self._configure_remote_node(args)
return
properties = [l.split('=', 1) for l in ctl.extra_arguments]
ctl.write_properties(properties)
def get_management_node_pid():
DEFAULT_PID_FILE_PATH = os.path.join(os.path.expanduser('~zstack'), "management-server.pid")
pid = find_process_by_cmdline('appName=zstack')
if pid:
return pid
pid_file_path = ctl.read_property('pidFilePath')
if not pid_file_path:
pid_file_path = DEFAULT_PID_FILE_PATH
if not os.path.exists(pid_file_path):
return None
def is_zstack_process(pid):
cmdline = os.path.join('/proc/%s/cmdline' % pid)
with open(cmdline, 'r') as fd:
content = fd.read()
return 'appName=zstack' in content
with open(pid_file_path, 'r') as fd:
pid = fd.read()
try:
pid = int(pid)
proc_pid = '/proc/%s' % pid
if os.path.exists(proc_pid):
if is_zstack_process(pid):
return pid
else:
return None
except Exception:
return None
return None
class StopAllCmd(Command):
def __init__(self):
super(StopAllCmd, self).__init__()
self.name = 'stop'
self.description = 'stop all ZStack related services including zstack management node, web UI' \
' if those services are installed'
ctl.register_command(self)
def run(self, args):
def stop_mgmt_node():
info(colored('Stopping ZStack management node, it may take a few minutes...', 'blue'))
ctl.internal_run('stop_node')
def stop_ui():
virtualenv = '/var/lib/zstack/virtualenv/zstack-dashboard'
if not os.path.exists(virtualenv):
info('skip stopping web UI, it is not installed')
return
info(colored('Stopping ZStack web UI, it may take a few minutes...', 'blue'))
ctl.internal_run('stop_ui')
stop_ui()
stop_mgmt_node()
class StartAllCmd(Command):
def __init__(self):
super(StartAllCmd, self).__init__()
self.name = 'start'
self.description = 'start all ZStack related services including zstack management node, web UI' \
' if those services are installed'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--daemon', help='Start ZStack in daemon mode. Only used with systemd.', action='store_true', default=True)
def run(self, args):
def start_mgmt_node():
info(colored('Starting ZStack management node, it may take a few minutes...', 'blue'))
if args.daemon:
ctl.internal_run('start_node', '--daemon')
else:
ctl.internal_run('start_node')
def start_ui():
virtualenv = '/var/lib/zstack/virtualenv/zstack-dashboard'
if not os.path.exists(virtualenv):
info('skip starting web UI, it is not installed')
return
info(colored('Starting ZStack web UI, it may take a few minutes...', 'blue'))
ctl.internal_run('start_ui')
start_mgmt_node()
start_ui()
class StartCmd(Command):
START_SCRIPT = '../../bin/startup.sh'
SET_ENV_SCRIPT = '../../bin/setenv.sh'
MINIMAL_CPU_NUMBER = 4
#MINIMAL_MEM_SIZE unit is KB, here is 6GB, in linxu, 6GB is 5946428 KB
#Save some memory for kdump etc. The actual limitation is 5000000KB
MINIMAL_MEM_SIZE = 5000000
def __init__(self):
super(StartCmd, self).__init__()
self.name = 'start_node'
self.description = 'start the ZStack management node on this machine'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, [email protected], to start the management node on a remote machine')
parser.add_argument('--timeout', help='Wait for ZStack Server startup timeout, default is 300 seconds.', default=300)
parser.add_argument('--daemon', help='Start ZStack in daemon mode. Only used with systemd.', action='store_true', default=False)
def _start_remote(self, args):
info('it may take a while because zstack-ctl will wait for management node ready to serve API')
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl start_node --timeout=%s"' % (args.host, args.timeout))
def check_cpu_mem(self):
if multiprocessing.cpu_count() < StartCmd.MINIMAL_CPU_NUMBER:
error("CPU number should not less than %d" % StartCmd.MINIMAL_CPU_NUMBER)
status, output = commands.getstatusoutput("cat /proc/meminfo | grep MemTotal | awk -F \":\" '{print $2}' | awk -F \" \" '{print $1}'")
if status == 0:
if int(output) < StartCmd.MINIMAL_MEM_SIZE:
error("Memory size should not less than %d KB" % StartCmd.MINIMAL_MEM_SIZE)
else:
warn("Can't get system memory size from /proc/meminfo")
def check_hostname(self):
hn = shell('hostname').strip()
if '.' in hn:
error("The hostname cannot contain '.', current hostname is '%s'.\n"
"Please use the following commands to modify hostname and reset rabbitmq:\n"
" # hostnamectl set-hostname $NEW_HOSTNAME\n"
" # zstack-ctl reset_rabbitmq" % hn)
def run(self, args):
self.check_cpu_mem()
self.check_hostname()
if args.host:
self._start_remote(args)
return
# clean the error log before booting
boot_error_log = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'bootError.log')
shell('rm -f %s' % boot_error_log)
pid = get_management_node_pid()
if pid:
info('the management node[pid:%s] is already running' % pid)
return
else:
shell('rm -f %s' % os.path.join(os.path.expanduser('~zstack'), "management-server.pid"))
def check_java_version():
ver = shell('java -version 2>&1 | grep -w version')
if '1.8' not in ver:
raise CtlError('ZStack requires Java8, your current version is %s\n'
'please run "update-alternatives --config java" to set Java to Java8')
def check_8080():
if shell_return('netstat -nap | grep :8080[[:space:]] | grep LISTEN > /dev/null') == 0:
raise CtlError('8080 is occupied by some process. Please use netstat to find out and stop it')
def check_9090():
if shell_return('netstat -nap | grep :9090[[:space:]] | grep LISTEN | grep -v prometheus > /dev/null') == 0:
raise CtlError('9090 is occupied by some process. Please use netstat to find out and stop it')
def check_msyql():
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
if not check_ip_port(db_hostname, db_port):
raise CtlError('unable to connect to %s:%s, please check if the MySQL is running and the firewall rules' % (db_hostname, db_port))
with on_error('unable to connect to MySQL'):
shell('mysql --host=%s --user=%s --password=%s --port=%s -e "select 1"' % (db_hostname, db_user, db_password, db_port))
def open_iptables_port(protocol, port_list):
distro = platform.dist()[0]
if type(port_list) is not list:
error("port list should be list")
for port in port_list:
if distro == 'centos':
shell('iptables-save | grep -- "-A INPUT -p %s -m %s --dport %s -j ACCEPT" > /dev/null || '
'(iptables -I INPUT -p %s -m %s --dport %s -j ACCEPT && service iptables save)' % (protocol, protocol, port, protocol, protocol, port))
elif distro == 'Ubuntu':
shell('iptables-save | grep -- "-A INPUT -p %s -m %s --dport %s -j ACCEPT" > /dev/null || '
'(iptables -I INPUT -p %s -m %s --dport %s -j ACCEPT && /etc/init.d/iptables-persistent save)' % (protocol, protocol, port, protocol, protocol, port))
else:
shell('iptables-save | grep -- "-A INPUT -p %s -m %s --dport %s -j ACCEPT" > /dev/null || '
'iptables -I INPUT -p %s -m %s --dport %s -j ACCEPT ' % (protocol, protocol, port, protocol, protocol, port))
def check_rabbitmq():
RABBIT_PORT = 5672
def check_username_password_if_need(ip, username, password):
if not username or not password:
return
cmd = ShellCmd('curl -u %s:%s http://%s:15672/api/whoami' % (username, password, ip))
cmd(False)
if cmd.return_code == 7:
warn('unable to connect to the rabbitmq management plugin at %s:15672. The possible reasons are:\n'
' 1) the plugin is not installed, you can install it by "rabbitmq-plugins enable rabbitmq_management,"\n'
' then restart the rabbitmq by "service rabbitmq-server restart"\n'
' 2) the port 15672 is blocked by the firewall\n'
'without the plugin, we cannot check the validity of the rabbitmq username/password configured in zstack.properties' % ip)
elif cmd.return_code != 0:
cmd.raise_error()
else:
if 'error' in cmd.stdout:
raise CtlError('unable to connect to the rabbitmq server[ip:%s] with username/password configured in zstack.properties.\n'
'If you have reset the rabbimtq server, get the username/password from zstack.properties and do followings on the rabbitmq server:\n'
'1) rabbitmqctl add_user $username $password\n'
'2) rabbitmqctl set_user_tags $username administrator\n'
'3) rabbitmqctl set_permissions -p / $username ".*" ".*" ".*"\n' % ip)
with on_error('unable to get RabbitMQ server IPs from %s, please check CloudBus.serverIp.0'):
ips = ctl.read_property_list('CloudBus.serverIp.')
if not ips:
raise CtlError('no RabbitMQ IPs defined in %s, please specify it use CloudBus.serverIp.0=the_ip' % ctl.properties_file_path)
rabbit_username = ctl.read_property('CloudBus.rabbitmqUsername')
rabbit_password = ctl.read_property('CloudBus.rabbitmqPassword')
if rabbit_password and not rabbit_username:
raise CtlError('CloudBus.rabbitmqPassword is set but CloudBus.rabbitmqUsername is missing in zstack.properties')
elif not rabbit_password and rabbit_username:
raise CtlError('CloudBus.rabbitmqUsername is set but CloudBus.rabbitmqPassword is missing in zstack.properties')
success = False
workable_ip = None
for key, ip in ips:
if ":" in ip:
ip, port = ip.split(':')
else:
port = RABBIT_PORT
if check_ip_port(ip, port):
workable_ip = ip
success = True
else:
warn('cannot connect to the RabbitMQ server[ip:%s, port:%s]' % (ip, RABBIT_PORT))
if not success:
raise CtlError('cannot connect to all RabbitMQ servers[ip:%s, port:%s] defined in %s, please reset rabbitmq by: "zstack-ctl reset_rabbitmq"' %
(ips, RABBIT_PORT, ctl.properties_file_path))
else:
check_username_password_if_need(workable_ip, rabbit_username, rabbit_password)
def prepare_setenv():
setenv_path = os.path.join(ctl.zstack_home, self.SET_ENV_SCRIPT)
catalina_opts = [
'-Djava.net.preferIPv4Stack=true',
'-Dcom.sun.management.jmxremote=true',
'-Djava.security.egd=file:/dev/./urandom',
]
if ctl.extra_arguments:
catalina_opts.extend(ctl.extra_arguments)
upgrade_params = ctl.get_env('ZSTACK_UPGRADE_PARAMS')
if upgrade_params:
catalina_opts.extend(upgrade_params.split(' '))
co = ctl.get_env('CATALINA_OPTS')
if co:
info('use CATALINA_OPTS[%s] set in environment zstack environment variables; check out them by "zstack-ctl getenv"' % co)
catalina_opts.extend(co.split(' '))
def has_opt(prefix):
for opt in catalina_opts:
if opt.startswith(prefix):
return True
return False
if not has_opt('-Xms'):
catalina_opts.append('-Xms512M')
if not has_opt('-Xmx'):
catalina_opts.append('-Xmx4096M')
with open(setenv_path, 'w') as fd:
fd.write('export CATALINA_OPTS=" %s"' % ' '.join(catalina_opts))
def start_mgmt_node():
shell('sudo -u zstack sh %s -DappName=zstack' % os.path.join(ctl.zstack_home, self.START_SCRIPT))
info("successfully started Tomcat container; now it's waiting for the management node ready for serving APIs, which may take a few seconds")
def wait_mgmt_node_start():
log_path = os.path.join(ctl.zstack_home, "../../logs/management-server.log")
timeout = int(args.timeout)
@loop_until_timeout(timeout)
def check():
if os.path.exists(boot_error_log):
with open(boot_error_log, 'r') as fd:
raise CtlError('the management server fails to boot; details can be found in the log[%s],'
'here is a brief of the error:\n%s' % (log_path, fd.read()))
cmd = create_check_mgmt_node_command(1)
cmd(False)
return cmd.return_code == 0
if not check():
raise CtlError('no management-node-ready message received within %s seconds, please check error in log file %s' % (timeout, log_path))
user = getpass.getuser()
if user != 'root':
raise CtlError('please use sudo or root user')
check_java_version()
check_8080()
check_9090()
check_msyql()
check_rabbitmq()
prepare_setenv()
open_iptables_port('udp',['123'])
start_mgmt_node()
#sleep a while, since zstack won't start up so quickly
time.sleep(5)
try:
wait_mgmt_node_start()
except CtlError as e:
try:
info("the management node failed to start, stop it now ...")
ctl.internal_run('stop_node')
except:
pass
raise e
if not args.daemon:
shell('which systemctl >/dev/null 2>&1; [ $? -eq 0 ] && systemctl start zstack', is_exception = False)
info('successfully started management node')
ctl.delete_env('ZSTACK_UPGRADE_PARAMS')
class StopCmd(Command):
STOP_SCRIPT = "../../bin/shutdown.sh"
def __init__(self):
super(StopCmd, self).__init__()
self.name = 'stop_node'
self.description = 'stop the ZStack management node on this machine'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, [email protected], to stop the management node on a remote machine')
parser.add_argument('--force', '-f', help='force kill the java process, without waiting.', action="store_true", default=False)
def _stop_remote(self, args):
if args.force:
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl stop_node --force"' % args.host)
else:
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl stop_node"' % args.host)
def run(self, args):
if args.host:
self._stop_remote(args)
return
pid = get_management_node_pid()
if not pid:
info('the management node has been stopped')
return
timeout = 30
if not args.force:
@loop_until_timeout(timeout)
def wait_stop():
return get_management_node_pid() is None
shell('bash %s' % os.path.join(ctl.zstack_home, self.STOP_SCRIPT))
if wait_stop():
info('successfully stopped management node')
return
pid = get_management_node_pid()
if pid:
if not args.force:
info('unable to stop management node within %s seconds, kill it' % timeout)
with on_error('unable to kill -9 %s' % pid):
shell('kill -9 %s' % pid)
class RestartNodeCmd(Command):
def __init__(self):
super(RestartNodeCmd, self).__init__()
self.name = 'restart_node'
self.description = 'restart the management node'
ctl.register_command(self)
def run(self, args):
ctl.internal_run('stop_node')
ctl.internal_run('start_node')
class SaveConfigCmd(Command):
DEFAULT_PATH = '~/.zstack/'
def __init__(self):
super(SaveConfigCmd, self).__init__()
self.name = 'save_config'
self.description = 'save ZStack configuration from ZSTACK_HOME to specified folder'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--save-to', help='the folder where ZStack configurations should be saved')
def run(self, args):
path = args.save_to
if not path:
path = self.DEFAULT_PATH
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
properties_file_path = os.path.join(path, 'zstack.properties')
shell('yes | cp %s %s' % (ctl.properties_file_path, properties_file_path))
ssh_private_key_path = os.path.join(path, 'id_rsa')
ssh_public_key_path = os.path.join(path, 'id_rsa.pub')
shell('yes | cp %s %s' % (ctl.ssh_private_key, ssh_private_key_path))
shell('yes | cp %s %s' % (ctl.ssh_public_key, ssh_public_key_path))
info('successfully saved %s to %s' % (ctl.properties_file_path, properties_file_path))
class RestoreConfigCmd(Command):
DEFAULT_PATH = '~/.zstack/'
def __init__(self):
super(RestoreConfigCmd, self).__init__()
self.name = "restore_config"
self.description = 'restore ZStack configuration from specified folder to ZSTACK_HOME'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--restore-from', help='the folder where ZStack configurations should be found')
def run(self, args):
path = args.restore_from
if not path:
path = self.DEFAULT_PATH
path = os.path.expanduser(path)
if os.path.isdir(path):
properties_file_path = os.path.join(path, 'zstack.properties')
elif os.path.isfile(path):
properties_file_path = path
else:
raise CtlError('cannot find zstack.properties at %s' % path)
shell('yes | cp %s %s' % (properties_file_path, ctl.properties_file_path))
ssh_private_key_path = os.path.join(path, 'id_rsa')
ssh_public_key_path = os.path.join(path, 'id_rsa.pub')
shell('yes | cp %s %s' % (ssh_private_key_path, ctl.ssh_private_key))
shell('yes | cp %s %s' % (ssh_public_key_path, ctl.ssh_public_key))
info('successfully restored zstack.properties and ssh identity keys from %s to %s' % (properties_file_path, ctl.properties_file_path))
class InstallDbCmd(Command):
def __init__(self):
super(InstallDbCmd, self).__init__()
self.name = "install_db"
self.description = (
"install MySQL database on a target machine which can be a remote machine or the local machine."
"\nNOTE: you may need to set --login-password to password of previous MySQL root user, if the machine used to have MySQL installed and removed."
"\nNOTE: if you hasn't setup public key for ROOT user on the remote machine, this command will prompt you for password of SSH ROOT user for the remote machine."
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='host IP, for example, 192.168.0.212, please specify the real IP rather than "localhost" or "127.0.0.1" when installing on local machine; otherwise management nodes on other machines cannot access the DB.', required=True)
parser.add_argument('--root-password', help="new password of MySQL root user; an empty password is used if both this option and --login-password option are omitted")
parser.add_argument('--login-password', help="login password of MySQL root user; an empty password is used if this option is omitted."
"\n[NOTE] this option is needed only when the machine has MySQL previously installed and removed; the old MySQL root password will be left in the system,"
"you need to input it in order to reset root password for the new installed MySQL.", default=None)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
parser.add_argument('--no-backup', help='do NOT backup the database. If the database is very large and you have manually backup it, using this option will fast the upgrade process. [DEFAULT] false', default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
def run(self, args):
if not args.yum:
args.yum = get_yum_repo_from_property()
script = ShellCmd("ip addr |grep 'inet '|grep -v '127.0.0.1'|awk '{print $2}'|awk -F '/' '{print $1}'")
script(True)
current_host_ips = script.stdout.split('\n')
yaml = '''---
- hosts: $host
remote_user: root
vars:
root_password: $root_password
login_password: $login_password
yum_repo: "$yum_repo"
tasks:
- name: pre-install script
script: $pre_install_script
- name: install MySQL for RedHat 6 through user defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mysql mysql-server
register: install_result
- name: install MySQL for RedHat 6 through system defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo == 'false'
shell: "yum clean metadata; yum --nogpgcheck install -y mysql mysql-server "
register: install_result
- name: install MySQL for RedHat 7 from local
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mariadb mariadb-server iptables-services
register: install_result
- name: install MySQL for RedHat 7 from local
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y mariadb mariadb-server iptables-services
register: install_result
- name: install MySQL for Ubuntu
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- mariadb-client
- mariadb-server
- iptables-persistent
register: install_result
- name: open 3306 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 3306 -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 3306 -j ACCEPT && service iptables save)
- name: open 3306 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 3306 -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 3306 -j ACCEPT && /etc/init.d/iptables-persistent save)
- name: run post-install script
script: $post_install_script
- name: enable MySQL daemon on RedHat 6
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7'
service: name=mysqld state=restarted enabled=yes
- name: enable MySQL daemon on RedHat 7
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7'
service: name=mariadb state=restarted enabled=yes
- name: enable MySQL on Ubuntu
when: ansible_os_family == 'Debian'
service: name=mysql state=restarted enabled=yes
- name: change root password
shell: $change_password_cmd
register: change_root_result
ignore_errors: yes
- name: grant remote access
when: change_root_result.rc == 0
shell: $grant_access_cmd
- name: rollback MySQL installation on RedHat 6
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and change_root_result.rc != 0 and install_result.changed == True
shell: rpm -ev mysql mysql-server
- name: rollback MySQL installation on RedHat 7
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and change_root_result.rc != 0 and install_result.changed == True
shell: rpm -ev mariadb mariadb-server
- name: rollback MySql installation on Ubuntu
when: ansible_os_family == 'Debian' and change_root_result.rc != 0 and install_result.changed == True
apt: pkg={{item}} state=absent update_cache=yes
with_items:
- mysql-client
- mysql-server
- name: failure
fail: >
msg="failed to change root password of MySQL, see prior error in task 'change root password'; the possible cause
is the machine used to have MySQL installed and removed, the previous password of root user is remaining on the
machine; try using --login-password. We have rolled back the MySQL installation so you can safely run install_db
again with --login-password set."
when: change_root_result.rc != 0 and install_result.changed == False
'''
if not args.root_password and not args.login_password:
args.root_password = '''"''"'''
more_cmd = ' '
for ip in current_host_ips:
if not ip:
continue
more_cmd += "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '' WITH GRANT OPTION;" % ip
grant_access_cmd = '''/usr/bin/mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '' WITH GRANT OPTION; GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '' WITH GRANT OPTION; %s FLUSH PRIVILEGES;"''' % (args.host, more_cmd)
else:
if not args.root_password:
args.root_password = args.login_password
more_cmd = ' '
for ip in current_host_ips:
if not ip:
continue
more_cmd += "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION;" % (ip, args.root_password)
grant_access_cmd = '''/usr/bin/mysql -u root -p%s -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION; GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION; %s FLUSH PRIVILEGES;"''' % (args.root_password, args.root_password, args.host, args.root_password, more_cmd)
if args.login_password is not None:
change_root_password_cmd = '/usr/bin/mysqladmin -u root -p{{login_password}} password {{root_password}}'
else:
change_root_password_cmd = '/usr/bin/mysqladmin -u root password {{root_password}}'
pre_install_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
###################
#Check DNS hijacking
###################
hostname=`hostname`
pintret=`ping -c 1 -W 2 $hostname 2>/dev/null | head -n1`
echo $pintret | grep 'PING' > /dev/null
[ $? -ne 0 ] && exit 0
ip=`echo $pintret | cut -d' ' -f 3 | cut -d'(' -f 2 | cut -d')' -f 1`
ip_1=`echo $ip | cut -d'.' -f 1`
[ "127" = "$ip_1" ] && exit 0
ip addr | grep $ip > /dev/null
[ $? -eq 0 ] && exit 0
echo "The hostname($hostname) of your machine is resolved to IP($ip) which is none of IPs of your machine.
It's likely your DNS server has been hijacking, please try fixing it or add \"ip_of_your_host $hostname\" to /etc/hosts.
DNS hijacking will cause MySQL and RabbitMQ not working."
exit 1
'''
fd, pre_install_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(pre_install_script)
def cleanup_pre_install_script():
os.remove(pre_install_script_path)
self.install_cleanup_routine(cleanup_pre_install_script)
post_install_script = mysql_db_config_script
fd, post_install_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(post_install_script)
def cleanup_post_install_script():
os.remove(post_install_script_path)
self.install_cleanup_routine(cleanup_post_install_script)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
'host': args.host,
'change_password_cmd': change_root_password_cmd,
'root_password': args.root_password,
'login_password': args.login_password,
'grant_access_cmd': grant_access_cmd,
'pre_install_script': pre_install_script_path,
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo,
'post_install_script': post_install_script_path
})
ansible(yaml, args.host, args.debug, args.ssh_key)
class UpgradeHACmd(Command):
'''This feature only support zstack offline image currently'''
host_post_info_list = []
current_dir = os.path.dirname(os.path.realpath(__file__))
conf_dir = "/var/lib/zstack/ha/"
private_key_name = conf_dir + "ha_key"
conf_file = conf_dir + "ha.yaml"
logger_dir = "/var/log/zstack/"
logger_file = "ha.log"
community_iso = "/opt/ZStack-Community-x86_64-DVD-1.4.0.iso"
bridge = ""
SpinnerInfo.spinner_status = {'upgrade_repo':False,'stop_mevoco':False, 'upgrade_mevoco':False,'upgrade_db':False,
'backup_db':False, 'check_init':False, 'start_mevoco':False}
ha_config_content = None
def __init__(self):
super(UpgradeHACmd, self).__init__()
self.name = "upgrade_ha"
self.description = "upgrade high availability environment for ZStack-Enterprise."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--zstack-enterprise-installer','--enterprise',
help="The new zstack-enterprise installer package, get it from http://cdn.zstack.io/product_downloads/zstack-enterprise/",
required=True)
parser.add_argument('--iso',
help="get it from http://cdn.zstack.io/product_downloads/iso/",
required=True)
def upgrade_repo(self, iso, tmp_iso, host_post_info):
command = (
"yum clean --enablerepo=zstack-local metadata && pkg_list=`rsync | grep \"not installed\" | awk"
" '{ print $2 }'` && for pkg in $pkg_list; do yum --disablerepo=* --enablerepo=zstack-local install "
"-y $pkg; done;")
run_remote_command(command, host_post_info)
command = "mkdir -p %s" % tmp_iso
run_remote_command(command, host_post_info)
command = "mount -o loop %s %s" % (iso, tmp_iso)
run_remote_command(command, host_post_info)
command = "rsync -au --delete %s /opt/zstack-dvd/" % tmp_iso
run_remote_command(command, host_post_info)
command = "umount %s" % tmp_iso
run_remote_command(command, host_post_info)
command = "rm -rf %s" % tmp_iso
run_remote_command(command, host_post_info)
def check_file_exist(self, file, host_post_info_list):
if os.path.isabs(file) is False:
error("Make sure you pass file name with absolute path")
else:
if os.path.isfile(file) is False:
error("Didn't find file %s" % file)
else:
for host_post_info in host_post_info_list:
if file_dir_exist("path=%s" % file, host_post_info) is False:
copy_arg = CopyArg()
copy_arg.src = file
copy_arg.dest = file
copy(copy_arg, host_post_info)
# do not enable due to lot of customer version
def check_file_md5sum(self):
pass
def check_mn_running(self,host_post_info):
cmd = create_check_mgmt_node_command(timeout=4, mn_node=host_post_info.host)
cmd(False)
if cmd.return_code != 0:
error("Check management node %s status failed, make sure the status is running before upgrade" % host_post_info.host)
else:
if 'false' in cmd.stdout:
error('The management node %s is starting, please wait a few seconds to upgrade' % host_post_info.host)
elif 'true' in cmd.stdout:
return 0
else:
error('The management node %s status is: Unknown, please start the management node before upgrade' % host_post_info.host)
def upgrade_mevoco(self, mevoco_installer, host_post_info):
mevoco_dir = os.path.dirname(mevoco_installer)
mevoco_bin = os.path.basename(mevoco_installer)
command = "rm -rf /tmp/zstack_upgrade.lock && cd %s && bash %s -u -i " % (mevoco_dir, mevoco_bin)
logger.debug("[ HOST: %s ] INFO: starting run shell command: '%s' " % (host_post_info.host, command))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(UpgradeHACmd.private_key_name, host_post_info.host, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
def run(self, args):
# create log
create_log(UpgradeHACmd.logger_dir, UpgradeHACmd.logger_file)
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
if os.path.isfile(UpgradeHACmd.conf_file) is not True:
error("Didn't find HA config file %s, please contact support for upgrade" % UpgradeHACmd.conf_file)
host_inventory = UpgradeHACmd.conf_dir + 'host'
yum_repo = get_yum_repo_from_property()
private_key_name = UpgradeHACmd.conf_dir+ "ha_key"
if args.iso is None:
community_iso = UpgradeHACmd.community_iso
else:
community_iso = args.iso
mn_list = get_ha_mn_list(UpgradeHACmd.conf_file)
host1_ip = mn_list[0]
host2_ip = mn_list[1]
if len(mn_list) > 2:
host3_ip = mn_list[2]
# init host1 parameter
self.host1_post_info = HostPostInfo()
self.host1_post_info.host = host1_ip
self.host1_post_info.host_inventory = host_inventory
self.host1_post_info.private_key = private_key_name
self.host1_post_info.yum_repo = yum_repo
self.host1_post_info.post_url = ""
# init host2 parameter
self.host2_post_info = HostPostInfo()
self.host2_post_info.host = host2_ip
self.host2_post_info.host_inventory = host_inventory
self.host2_post_info.private_key = private_key_name
self.host2_post_info.yum_repo = yum_repo
self.host2_post_info.post_url = ""
if len(mn_list) > 2:
# init host3 parameter
self.host3_post_info = HostPostInfo()
self.host3_post_info.host = host3_ip
self.host3_post_info.host_inventory = host_inventory
self.host3_post_info.private_key = private_key_name
self.host3_post_info.yum_repo = yum_repo
self.host3_post_info.post_url = ""
UpgradeHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info]
if len(mn_list) > 2:
UpgradeHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info, self.host3_post_info]
for host in UpgradeHACmd.host_post_info_list:
# to do check mn all running
self.check_mn_running(host)
for file in [args.mevoco_installer, community_iso]:
self.check_file_exist(file, UpgradeHACmd.host_post_info_list)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to upgrade repo"
spinner_info.name = "upgrade_repo"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_repo'] = True
ZstackSpinner(spinner_info)
rand_dir_name = uuid.uuid4()
tmp_iso = "/tmp/%s/iso/" % rand_dir_name
for host_post_info in UpgradeHACmd.host_post_info_list:
self.upgrade_repo(community_iso, tmp_iso, host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Stopping mevoco"
spinner_info.name = "stop_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['stop_mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in UpgradeHACmd.host_post_info_list:
stop_mevoco(host_post_info)
# backup db before upgrade
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to backup database"
spinner_info.name = "backup_db"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['backup_db'] = True
ZstackSpinner(spinner_info)
(status, output) = commands.getstatusoutput("zstack-ctl dump_mysql >> /dev/null 2>&1")
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to upgrade mevoco"
spinner_info.name = "upgrade_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in UpgradeHACmd.host_post_info_list:
self.upgrade_mevoco(args.mevoco_installer, host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to upgrade database"
spinner_info.name = "upgrade_db"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_db'] = True
ZstackSpinner(spinner_info)
(status, output) = commands.getstatusoutput("zstack-ctl upgrade_db")
if status != 0:
error("Upgrade mysql failed: %s" % output)
else:
logger.debug("SUCC: shell command: 'zstack-ctl upgrade_db' successfully" )
spinner_info = SpinnerInfo()
spinner_info.output = "Starting mevoco"
spinner_info.name = "start_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start_mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in UpgradeHACmd.host_post_info_list:
start_remote_mn(host_post_info)
SpinnerInfo.spinner_status['start_mevoco'] = False
time.sleep(.2)
info(colored("\nUpgrade HA successfully!","blue"))
class AddManagementNodeCmd(Command):
SpinnerInfo.spinner_status = {'check_init':False,'add_key':False,'deploy':False,'config':False,'start':False,'install_ui':False}
install_pkgs = ['openssl']
logger_dir = '/var/log/zstack/'
logger_file = 'zstack-ctl.log'
def __init__(self):
super(AddManagementNodeCmd, self).__init__()
self.name = "add_multi_management"
self.description = "add multi management node."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host-list','--hosts',nargs='+',
help="All hosts connect info follow below format: 'root:passwd1@host1_ip root:passwd2@host2_ip ...' ",
required=True)
parser.add_argument('--force-reinstall','-f',action="store_true", default=False)
parser.add_argument('--ssh-key',
help="the path of private key for SSH login $host; if provided, Ansible will use the "
"specified key as private key to SSH login the $host, default will use zstack private key",
default=None)
def add_public_key_to_host(self, key_path, host_info):
command ='timeout 10 sshpass -p %s ssh-copy-id -o UserKnownHostsFile=/dev/null -o PubkeyAuthentication=no' \
' -o StrictHostKeyChecking=no -i %s root@%s' % (host_info.remote_pass, key_path, host_info.host)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("Copy public key '%s' to host: '%s' failed:\n %s" % (key_path, host_info.host, output))
def deploy_mn_on_host(self,args, host_info, key):
if args.force_reinstall is True:
command = 'zstack-ctl install_management_node --host=%s --ssh-key="%s" --force-reinstall' % (host_info.host, key)
else:
command = 'zstack-ctl install_management_node --host=%s --ssh-key="%s"' % (host_info.host, key)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("deploy mn on host %s failed:\n %s" % (host_info.host, output))
def install_ui_on_host(self, key, host_info):
command = 'zstack-ctl install_ui --host=%s --ssh-key=%s' % (host_info.host, key)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("deploy ui on host %s failed:\n %s" % (host_info.host, output))
def config_mn_on_host(self, key, host_info):
command = "scp -i %s %s root@%s:%s" % (key, ctl.properties_file_path, host_info.host, ctl.properties_file_path)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("copy config to host %s failed:\n %s" % (host_info.host, output))
command = "ssh -q -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s zstack-ctl configure " \
"management.server.ip=%s && zstack-ctl save_config" % (key, host_info.host, host_info.host)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("config management server %s failed:\n %s" % (host_info.host, output))
def start_mn_on_host(self, host_info, key):
command = "ssh -q -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s zstack-ctl " \
"start_node " % (key, host_info.host)
(status, output) = commands.getstatusoutput(command)
command = "ln -s /opt/zstack-dvd/ /usr/local/zstack/apache-tomcat/webapps/zstack/static/zstack-dvd"
run_remote_command(command, host_info, True, True)
if status != 0:
error("start node on host %s failed:\n %s" % (host_info.host, output))
command = "ssh -q -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s zstack-ctl " \
"start_ui" % (key, host_info.host)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("start ui on host %s failed:\n %s" % (host_info.host, output))
def install_packages(self, pkg_list, host_info):
distro = platform.dist()[0]
if distro == "centos":
for pkg in pkg_list:
yum_install_package(pkg, host_info)
elif distro == "Ubuntu":
apt_install_packages(pkg_list, host_info)
def run(self, args):
create_log(AddManagementNodeCmd.logger_dir, AddManagementNodeCmd.logger_file)
host_info_list = []
if args.ssh_key is None:
args.ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa.pub"
private_key = args.ssh_key.split('.')[0]
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
for host in args.host_list:
inventory_file = ctl.zstack_home + "/../../../ansible/hosts"
host_info = HostPostInfo()
host_info.private_key = private_key
host_info.host_inventory = inventory_file
(host_info.remote_user, host_info.remote_pass, host_info.host, host_info.remote_port) = check_host_info_format(host)
check_host_password(host_info.remote_pass, host_info.host)
command = "cat %s | grep %s || echo %s >> %s" % (inventory_file, host_info.host, host_info.host, inventory_file)
(status, output) = commands.getstatusoutput(command)
if status != 0 :
error(output)
host_info_list.append(host_info)
for host_info in host_info_list:
spinner_info = SpinnerInfo()
spinner_info.output = "Add public key to host %s" % host_info.host
spinner_info.name = 'add_key'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['add_key'] = True
ZstackSpinner(spinner_info)
self.add_public_key_to_host(args.ssh_key, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Deploy management node to host %s" % host_info.host
spinner_info.name = 'deploy'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['deploy'] = True
ZstackSpinner(spinner_info)
self.deploy_mn_on_host(args, host_info, private_key)
self.install_packages(AddManagementNodeCmd.install_pkgs, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Config management node on host %s" % host_info.host
spinner_info.name = 'config'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['config'] = True
ZstackSpinner(spinner_info)
self.config_mn_on_host(private_key, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Install UI on host %s" % host_info.host
spinner_info.name = 'install_ui'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['install_ui'] = True
ZstackSpinner(spinner_info)
self.install_ui_on_host(private_key, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Start management node on host %s" % host_info.host
spinner_info.name = 'start'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start'] = True
ZstackSpinner(spinner_info)
self.start_mn_on_host(host_info,private_key)
SpinnerInfo.spinner_status['start'] = False
time.sleep(0.2)
info(colored("\nAll management nodes add successfully",'blue'))
class RecoverHACmd(Command):
'''This feature only support zstack offline image currently'''
host_post_info_list = []
current_dir = os.path.dirname(os.path.realpath(__file__))
conf_dir = "/var/lib/zstack/ha/"
conf_file = conf_dir + "ha.yaml"
host_inventory = conf_dir + 'host'
private_key = conf_dir + 'ha_key'
logger_dir = "/var/log/zstack/"
logger_file = "ha.log"
bridge = ""
SpinnerInfo.spinner_status = {'cluster':False, 'mysql':False,'mevoco':False, 'check_init':False, 'cluster':False}
ha_config_content = None
def __init__(self):
super(RecoverHACmd, self).__init__()
self.name = "recover_ha"
self.description = "Recover high availability environment for Mevoco."
ctl.register_command(self)
def stop_mysql_service(self, host_post_info):
command = "service mysql stop"
run_remote_command(command, host_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]]", host_post_info,
return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", host_post_info)
def reboot_cluster_service(self, host_post_info):
service_status("haproxy", "state=started", host_post_info)
service_status("keepalived", "state=started", host_post_info)
service_status("rabbitmq-server", "state=started", host_post_info)
def recover_mysql(self, host_post_info, host_post_info_list):
for host_info in host_post_info_list:
self.stop_mysql_service(host_info)
command = "service mysql bootstrap"
status, output = run_remote_command(command,host_post_info,True,True)
if status is False:
return False
for host_info in host_post_info_list:
if host_info.host != host_post_info.host:
command = "service mysql start"
status, output = run_remote_command(command,host_info,True,True)
if status is False:
return False
command = "service mysql restart"
status, output = run_remote_command(command,host_post_info,True,True)
return status
def sync_prometheus(self, host_post_info):
# sync prometheus data
sync_arg = SyncArg()
sync_arg.src = '/var/lib/zstack/prometheus/'
sync_arg.dest = '/var/lib/zstack/prometheus/'
sync(sync_arg, host_post_info)
def run(self, args):
create_log(UpgradeHACmd.logger_dir, UpgradeHACmd.logger_file)
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
host3_exist = False
if os.path.isfile(RecoverHACmd.conf_file) is not True:
error("Didn't find HA config file %s, please use traditional 'zstack-ctl install_ha' to recover your cluster" % RecoverHACmd.conf_file)
if os.path.exists(RecoverHACmd.conf_file):
with open(RecoverHACmd.conf_file, 'r') as f:
RecoverHACmd.ha_config_content = yaml.load(f)
if RecoverHACmd.ha_config_content['host_list'] is None:
error("Didn't find host_list in config file %s" % RecoverHACmd.conf_file)
host_list = RecoverHACmd.ha_config_content['host_list'].split(',')
if len(host_list) == 2:
host1_ip = host_list[0]
host2_ip = host_list[1]
if len(host_list) == 3:
host3_exist = True
host3_ip = host_list[2]
if os.path.exists(RecoverHACmd.conf_file) and RecoverHACmd.ha_config_content is not None :
if "bridge_name" in RecoverHACmd.ha_config_content:
RecoverHACmd.bridge = RecoverHACmd.ha_config_content['bridge_name']
else:
error("Didn't find 'bridge_name' in config file %s" % RecoverHACmd.conf_file)
local_ip = get_ip_by_interface(RecoverHACmd.bridge)
host_post_info_list = []
# init host1 parameter
host1_post_info = HostPostInfo()
host1_post_info.host = host1_ip
host1_post_info.host_inventory = RecoverHACmd.host_inventory
host1_post_info.private_key = RecoverHACmd.private_key
host_post_info_list.append(host1_post_info)
host2_post_info = HostPostInfo()
host2_post_info.host = host2_ip
host2_post_info.host_inventory = RecoverHACmd.host_inventory
host2_post_info.private_key = RecoverHACmd.private_key
host_post_info_list.append(host2_post_info)
if host3_exist is True:
host3_post_info = HostPostInfo()
host3_post_info.host = host3_ip
host3_post_info.host_inventory = RecoverHACmd.host_inventory
host3_post_info.private_key = RecoverHACmd.private_key
host_post_info_list.append(host3_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to recovery mysql"
spinner_info.name = "mysql"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['mysql'] = True
ZstackSpinner(spinner_info)
mysql_recover_status = False
for host_post_info in host_post_info_list:
recover_status = self.recover_mysql(host_post_info, host_post_info_list)
if recover_status is True:
mysql_recover_status = True
break
if mysql_recover_status is False:
error("Recover mysql failed! Please check log /var/log/zstack/ha.log")
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to recovery cluster"
spinner_info.name = "cluster"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['cluster'] = True
ZstackSpinner(spinner_info)
for host_post_info in host_post_info_list:
self.reboot_cluster_service(host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to sync monitor data"
spinner_info.name = "prometheus"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['prometheus'] = True
ZstackSpinner(spinner_info)
for host_post_info in host_post_info_list:
if host_post_info.host != local_ip:
self.sync_prometheus(host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting Mevoco"
spinner_info.name = "mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in host_post_info_list:
start_remote_mn(host_post_info)
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
time.sleep(.3)
info(colored("The cluster has been recovered successfully!", "blue"))
class InstallHACmd(Command):
'''This feature only support zstack offline image currently'''
host_post_info_list = []
current_dir = os.path.dirname(os.path.realpath(__file__))
conf_dir = "/var/lib/zstack/ha/"
conf_file = conf_dir + "ha.yaml"
logger_dir = "/var/log/zstack/"
logger_file = "ha.log"
bridge = ""
SpinnerInfo.spinner_status = {'mysql':False,'rabbitmq':False, 'haproxy_keepalived':False,
'Mevoco':False, 'stop_mevoco':False, 'check_init':False, 'recovery_cluster':False}
ha_config_content = None
def __init__(self):
super(InstallHACmd, self).__init__()
self.name = "install_ha"
self.description = "install high availability environment for Mevoco."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host1-info','--h1',
help="The first host connect info follow below format: 'root:password@ip_address' ",
required=True)
parser.add_argument('--host2-info','--h2',
help="The second host connect info follow below format: 'root:password@ip_address' ",
required=True)
parser.add_argument('--host3-info','--h3',
help="The third host connect info follow below format: 'root:password@ip_address' ",
default=False)
parser.add_argument('--vip',
help="The virtual IP address for HA setup",
default=None)
parser.add_argument('--gateway',
help="The gateway IP address for HA setup",
default=None)
parser.add_argument('--bridge',
help="The bridge device name, default is br_eth0",
)
parser.add_argument('--mysql-root-password','--root-pass',
help="Password of MySQL root user", default="zstack123")
parser.add_argument('--mysql-user-password','--user-pass',
help="Password of MySQL user zstack", default="zstack123")
parser.add_argument('--rabbit-password','--rabbit-pass',
help="RabbitMQ password; if set, the password will be created on RabbitMQ for username "
"specified by --rabbit-username. [DEFAULT] rabbitmq default password",
default="zstack123")
parser.add_argument('--drop', action='store_true', default=False,
help="Force delete mysql data for re-deploy HA")
parser.add_argument('--keep-db', action='store_true', default=False,
help='keep existing zstack database and not raise error')
parser.add_argument('--recovery-from-this-host','--recover',
action='store_true', default=False,
help="This argument for admin to recovery mysql from the last shutdown mysql server")
parser.add_argument('--perfect-mode', action='store_true', default=False,
help="This mode will re-connect mysql faster")
def get_formatted_netmask(self, device_name):
'''This function will return formatted netmask. eg. 172.20.12.16/24 will return 24'''
netmask = socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM),
35099, struct.pack('256s', device_name))[20:24])
formatted_netmask = sum([bin(int(x)).count('1') for x in netmask.split('.')])
return formatted_netmask
def run(self, args):
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
if args.bridge is None:
InstallHACmd.bridge = 'br_eth0'
else:
InstallHACmd.bridge = args.bridge
if os.path.exists(InstallHACmd.conf_file):
with open(InstallHACmd.conf_file, 'r') as f:
InstallHACmd.ha_config_content = yaml.load(f)
if args.vip is None and args.recovery_from_this_host is False:
error("Install HA must assign a vip")
# check gw ip is available
if args.gateway is None:
if get_default_gateway_ip() is None:
error("Can't get the gateway IP address from system, please check your route table or pass specific " \
"gateway through \"--gateway\" argument")
else:
gateway_ip = get_default_gateway_ip()
else:
gateway_ip = args.gateway
(status, output) = commands.getstatusoutput('ping -c 1 %s' % gateway_ip)
if status != 0:
error("The gateway %s unreachable!" % gateway_ip)
# check input host info
host1_info = args.host1_info
host1_connect_info_list = check_host_info_format(host1_info)
args.host1 = host1_connect_info_list[2]
args.host1_password = host1_connect_info_list[1]
host2_info = args.host2_info
host2_connect_info_list = check_host_info_format(host2_info)
args.host2 = host2_connect_info_list[2]
args.host2_password = host2_connect_info_list[1]
if args.host3_info is not False:
host3_info = args.host3_info
host3_connect_info_list = check_host_info_format(host3_info)
args.host3 = host3_connect_info_list[2]
args.host3_password = host3_connect_info_list[1]
# check root password is available
if args.host1_password != args.host2_password:
error("Host1 password and Host2 password must be the same, Please change one of them!")
elif args.host3_info is not False:
if not args.host1_password == args.host2_password == args.host3_password:
error("All hosts root password must be the same. Please check your host password!")
check_host_password(args.host1_password, args.host1)
check_host_password(args.host2_password, args.host2)
if args.host3_info is not False:
check_host_password(args.host3_password, args.host3)
# check image type
zstack_local_repo = os.path.isfile("/etc/yum.repos.d/zstack-local.repo")
galera_repo = os.path.isfile("/etc/yum.repos.d/galera.repo")
if zstack_local_repo is False or galera_repo is False:
error("This feature only support ZStack community CentOS 7 image")
# check network configuration
interface_list = os.listdir('/sys/class/net/')
if InstallHACmd.bridge not in interface_list and args.recovery_from_this_host is False:
error("Make sure you have already run the 'zs-network-setting' to setup the network environment, or set the"
" bridge name with --bridge, default bridge name is br_eth0 ")
if InstallHACmd.bridge.split('br_')[1] not in interface_list:
error("bridge %s should add the interface %s, make sure you have setup the interface or specify the right"
" bridge name" % (InstallHACmd.bridge, InstallHACmd.bridge.split('br_')[1]))
# check keepalived label should not longer than 15 characters
if len(InstallHACmd.bridge) >= 13:
error("bridge name length cannot be longer than 13 characters")
# check user start this command on host1
if args.recovery_from_this_host is False:
local_ip = get_ip_by_interface(InstallHACmd.bridge)
if args.host1 != local_ip:
error("Please run this command at host1 %s, or change your host1 ip to local host ip" % args.host1)
# check user input wrong host2 ip
if args.host2 == args.host1:
error("The host1 and host2 should not be the same ip address!")
elif args.host3_info is not False:
if args.host2 == args.host3 or args.host1 == args.host3:
error("The host1, host2 and host3 should not be the same ip address!")
# create log
create_log(InstallHACmd.logger_dir, InstallHACmd.logger_file)
# create config
if not os.path.exists(InstallHACmd.conf_dir):
os.makedirs(InstallHACmd.conf_dir)
yum_repo = get_yum_repo_from_property()
private_key_name = InstallHACmd.conf_dir+ "ha_key"
public_key_name = InstallHACmd.conf_dir+ "ha_key.pub"
if os.path.isfile(public_key_name) is not True:
command = "echo -e 'y\n'|ssh-keygen -q -t rsa -N \"\" -f %s" % private_key_name
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("Generate private key %s failed! Generate manually or rerun the process!" % private_key_name)
with open(public_key_name) as public_key_file:
public_key = public_key_file.read()
# create inventory file
with open('%s/host' % InstallHACmd.conf_dir,'w') as f:
f.writelines([args.host1+'\n', args.host2+'\n'])
if args.host3_info is not False:
with open('%s/host' % InstallHACmd.conf_dir,'w') as f:
f.writelines([args.host1+'\n', args.host2+'\n', args.host3+'\n'])
#host_inventory = '%s,%s' % (args.host1, args.host2)
host_inventory = InstallHACmd.conf_dir + 'host'
# init host1 parameter
self.host1_post_info = HostPostInfo()
self.host1_post_info.host = args.host1
self.host1_post_info.host_inventory = host_inventory
self.host1_post_info.private_key = private_key_name
self.host1_post_info.yum_repo = yum_repo
self.host1_post_info.vip = args.vip
self.host1_post_info.gateway_ip = gateway_ip
self.host1_post_info.rabbit_password = args.rabbit_password
self.host1_post_info.mysql_password = args.mysql_root_password
self.host1_post_info.mysql_userpassword = args.mysql_user_password
self.host1_post_info.post_url = ""
self.host_post_info_list.append(self.host1_post_info)
# init host2 parameter
self.host2_post_info = HostPostInfo()
self.host2_post_info.host = args.host2
self.host2_post_info.host_inventory = host_inventory
self.host2_post_info.private_key = private_key_name
self.host2_post_info.yum_repo = yum_repo
self.host2_post_info.vip = args.vip
self.host2_post_info.gateway_ip = gateway_ip
self.host2_post_info.rabbit_password = args.rabbit_password
self.host2_post_info.mysql_password = args.mysql_root_password
self.host2_post_info.mysql_userpassword = args.mysql_user_password
self.host2_post_info.post_url = ""
self.host_post_info_list.append(self.host2_post_info)
if args.host3_info is not False:
# init host3 parameter
self.host3_post_info = HostPostInfo()
self.host3_post_info.host = args.host3
self.host3_post_info.host_inventory = host_inventory
self.host3_post_info.private_key = private_key_name
self.host3_post_info.yum_repo = yum_repo
self.host3_post_info.vip = args.vip
self.host3_post_info.gateway_ip = gateway_ip
self.host3_post_info.rabbit_password = args.rabbit_password
self.host3_post_info.mysql_password = args.mysql_root_password
self.host3_post_info.mysql_userpassword = args.mysql_user_password
self.host3_post_info.post_url = ""
self.host_post_info_list.append(self.host3_post_info)
# init all variables in map
local_map = {
"mysql_connect_timeout" : 60000,
"mysql_socket_timeout" : 60000
}
if args.perfect_mode is True:
local_map['mysql_connect_timeout'] = 2000
local_map['mysql_socket_timeout'] = 2000
add_public_key_command = 'if [ ! -d ~/.ssh ]; then mkdir -p ~/.ssh; chmod 700 ~/.ssh; fi && if [ ! -f ~/.ssh/authorized_keys ]; ' \
'then touch ~/.ssh/authorized_keys; chmod 600 ~/.ssh/authorized_keys; fi && pub_key="%s";grep ' \
'"%s" ~/.ssh/authorized_keys > /dev/null; if [ $? -eq 1 ]; ' \
'then echo "%s" >> ~/.ssh/authorized_keys; fi && exit 0;'\
% (public_key.strip('\n'), public_key.strip('\n'), public_key.strip('\n'))
# add ha public key to host1
ssh_add_public_key_command = "sshpass -p %s ssh -q -o UserKnownHostsFile=/dev/null -o " \
"PubkeyAuthentication=no -o StrictHostKeyChecking=no root@%s '%s'" % \
(args.host1_password, args.host1, add_public_key_command)
(status, output) = commands.getstatusoutput(ssh_add_public_key_command)
if status != 0:
error(output)
# add ha public key to host2
ssh_add_public_key_command = "sshpass -p %s ssh -q -o UserKnownHostsFile=/dev/null -o " \
"PubkeyAuthentication=no -o StrictHostKeyChecking=no root@%s '%s' " % \
(args.host2_password, args.host2, add_public_key_command)
(status, output) = commands.getstatusoutput(ssh_add_public_key_command)
if status != 0:
error(output)
# add ha public key to host3
if args.host3_info is not False:
ssh_add_public_key_command = "sshpass -p %s ssh -q -o UserKnownHostsFile=/dev/null -o " \
"PubkeyAuthentication=no -o StrictHostKeyChecking=no root@%s '%s' " % \
(args.host3_password, args.host3, add_public_key_command)
(status, output) = commands.getstatusoutput(ssh_add_public_key_command)
if status != 0:
error(output)
# sync ansible key in two host
copy_arg = CopyArg()
copy_arg.src = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/"
copy_arg.dest = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/"
copy(copy_arg,self.host2_post_info)
command = "chmod 600 %s" % copy_arg.src + "id_rsa"
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
copy(copy_arg,self.host3_post_info)
run_remote_command(command, self.host3_post_info)
# check whether to recovery the HA cluster
if args.recovery_from_this_host is True:
if os.path.exists(InstallHACmd.conf_file) and InstallHACmd.ha_config_content is not None and args.bridge is None:
if "bridge_name" in InstallHACmd.ha_config_content:
InstallHACmd.bridge = InstallHACmd.ha_config_content['bridge_name']
local_ip = get_ip_by_interface(InstallHACmd.bridge)
if local_ip != args.host1 and local_ip != args.host2:
if args.host3_info is not False:
if local_ip != args.host3:
error("Make sure you are running the 'zs-network-setting' command on host1 or host2 or host3")
else:
error("Make sure you are running the 'zs-network-setting' command on host1 or host2")
# stop mevoco
spinner_info = SpinnerInfo()
spinner_info.output = "Stop Mevoco on all management nodes"
spinner_info.name = "stop_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['stop_mevoco'] = True
ZstackSpinner(spinner_info)
for host_info in self.host_post_info_list:
stop_mevoco(host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to recovery mysql from this host"
spinner_info.name = "recovery_cluster"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['recovery_cluster'] = True
ZstackSpinner(spinner_info)
# kill mysql process to make sure mysql bootstrap can work
service_status("mysql", "state=stopped", self.host1_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]]", self.host1_post_info, return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", self.host1_post_info)
service_status("mysql", "state=stopped", self.host2_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]] ", self.host2_post_info, return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", self.host2_post_info)
if args.host3_info is not False:
service_status("mysql", "state=stopped", self.host3_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]]", self.host3_post_info, return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", self.host3_post_info)
command = "service mysql bootstrap"
(status, output) = commands.getstatusoutput(command)
if status != 0:
error(output)
else:
#command = "service mysql start"
if local_ip == self.host1_post_info.host:
# make sure vip will be on this host, so start haproxy firstly
service_status("haproxy","state=started", self.host1_post_info)
service_status("keepalived","state=started", self.host1_post_info)
service_status("rabbitmq-server","state=started", self.host1_post_info)
#run_remote_command(command, self.host2_post_info)
service_status("mysql","state=started", self.host2_post_info)
service_status("haproxy","state=started", self.host2_post_info)
service_status("keepalived","state=started", self.host2_post_info)
service_status("rabbitmq-server","state=started", self.host2_post_info)
if args.host3_info is not False:
#run_remote_command(command, self.host3_post_info)
service_status("mysql","state=started", self.host3_post_info)
service_status("haproxy","state=started", self.host3_post_info)
service_status("keepalived","state=started", self.host3_post_info)
service_status("rabbitmq-server","state=started", self.host3_post_info)
#command = "service mysql restart"
#run_remote_command(command, self.host1_post_info)
service_status("mysql","state=restarted", self.host1_post_info)
elif local_ip == self.host2_post_info.host:
service_status("haproxy","state=started", self.host2_post_info)
service_status("keepalived","state=started", self.host2_post_info)
service_status("rabbitmq-server","state=started", self.host2_post_info)
#run_remote_command(command, self.host1_post_info)
service_status("mysql","state=started", self.host1_post_info)
service_status("haproxy","state=started", self.host1_post_info)
service_status("keepalived","state=started", self.host1_post_info)
service_status("rabbitmq-server","state=started", self.host1_post_info)
if args.host3_info is not False:
#run_remote_command(command, self.host3_post_info)
service_status("mysql","state=started", self.host3_post_info)
service_status("haproxy","state=started", self.host3_post_info)
service_status("keepalived","state=started", self.host3_post_info)
service_status("rabbitmq-server","state=started", self.host3_post_info)
#command = "service mysql restart"
#run_remote_command(command, self.host2_post_info)
service_status("mysql","state=restarted", self.host2_post_info)
else:
# localhost must be host3
service_status("haproxy","state=started", self.host3_post_info)
service_status("keepalived","state=started", self.host3_post_info)
service_status("rabbitmq-server","state=started", self.host3_post_info)
#run_remote_command(command, self.host1_post_info)
service_status("mysql","state=started", self.host1_post_info)
service_status("haproxy","state=started", self.host1_post_info)
service_status("keepalived","state=started", self.host1_post_info)
service_status("rabbitmq-server","state=started", self.host1_post_info)
service_status("mysql","state=started", self.host2_post_info)
service_status("haproxy","state=started", self.host2_post_info)
service_status("keepalived","state=started", self.host2_post_info)
service_status("rabbitmq-server","state=started", self.host2_post_info)
#command = "service mysql restart"
#run_remote_command(command, self.host2_post_info)
service_status("mysql","state=restarted", self.host3_post_info)
# sync prometheus data
sync_arg = SyncArg()
sync_arg.src = '/var/lib/zstack/prometheus/'
sync_arg.dest = '/var/lib/zstack/prometheus/'
sync(sync_arg, self.host2_post_info)
if args.host3_info is not False:
sync(sync_arg, self.host3_post_info)
# start mevoco
spinner_info.output = "Starting Mevoco"
spinner_info.name = "mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['mevoco'] = True
ZstackSpinner(spinner_info)
for host_info in self.host_post_info_list:
start_mevoco(host_info)
SpinnerInfo.spinner_status['mevoco'] = False
time.sleep(.2)
info("The cluster has been recovered!")
sys.exit(0)
# generate ha config
host_list = "%s,%s" % (self.host1_post_info.host, self.host2_post_info.host)
if args.host3_info is not False:
host_list = "%s,%s,%s" % (self.host1_post_info.host, self.host2_post_info.host, self.host3_post_info.host)
ha_conf_file = open(InstallHACmd.conf_file, 'w')
ha_info = {'vip':args.vip, 'gateway':self.host1_post_info.gateway_ip, 'bridge_name':InstallHACmd.bridge,
'mevoco_url':'http://' + args.vip + ':8888', 'cluster_url':'http://'+ args.vip +':9132/zstack', 'host_list':host_list}
yaml.dump(ha_info, ha_conf_file, default_flow_style=False)
command = "mkdir -p %s" % InstallHACmd.conf_dir
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
copy_arg = CopyArg()
copy_arg.src = InstallHACmd.conf_dir
copy_arg.dest = InstallHACmd.conf_dir
copy(copy_arg,self.host2_post_info)
command = "chmod 600 %s" % InstallHACmd.conf_dir + "ha_key"
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg,self.host3_post_info)
run_remote_command(command, self.host3_post_info)
# get iptables from system config
service_status("iptables","state=restarted",self.host1_post_info)
service_status("iptables","state=restarted",self.host2_post_info)
if args.host3_info is not False:
service_status("iptables","state=restarted",self.host3_post_info)
# remove mariadb for avoiding conflict with mevoco install process
command = "rpm -q mariadb | grep 'not installed' || yum remove -y mariadb"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
command = "hostnamectl set-hostname zstack-1"
run_remote_command(command, self.host1_post_info)
command = "hostnamectl set-hostname zstack-2"
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
command = "hostnamectl set-hostname zstack-3"
run_remote_command(command, self.host3_post_info)
# remove old zstack-1 and zstack-2 in hosts file
update_file("/etc/hosts", "regexp='\.*zstack\.*' state=absent", self.host1_post_info)
update_file("/etc/hosts", "regexp='\.*zstack\.*' state=absent", self.host2_post_info)
update_file("/etc/hosts", "line='%s zstack-1'" % args.host1, self.host1_post_info)
update_file("/etc/hosts", "line='%s zstack-2'" % args.host2, self.host1_post_info)
if args.host3_info is not False:
update_file("/etc/hosts", "line='%s zstack-3'" % args.host3, self.host1_post_info)
update_file("/etc/hosts", "line='%s zstack-1'" % args.host1, self.host2_post_info)
update_file("/etc/hosts", "line='%s zstack-2'" % args.host2, self.host2_post_info)
if args.host3_info is not False:
update_file("/etc/hosts", "line='%s zstack-3'" % args.host3, self.host2_post_info)
if args.host3_info is not False:
update_file("/etc/hosts", "line='%s zstack-1'" % args.host1, self.host3_post_info)
update_file("/etc/hosts", "line='%s zstack-2'" % args.host2, self.host3_post_info)
update_file("/etc/hosts", "line='%s zstack-3'" % args.host3, self.host3_post_info)
#save iptables at last
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host2_post_info.host, self.host2_post_info.host)
run_remote_command(command, self.host1_post_info)
if args.host3_info is not False:
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host3_post_info.host, self.host3_post_info.host)
run_remote_command(command, self.host1_post_info)
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host1_post_info.host, self.host1_post_info.host)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host3_post_info.host, self.host3_post_info.host)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host1_post_info.host, self.host1_post_info.host)
run_remote_command(command, self.host3_post_info)
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host2_post_info.host, self.host2_post_info.host)
run_remote_command(command, self.host3_post_info)
# stop haproxy and keepalived service for avoiding terminal status disturb
command = "service keepalived stop && service haproxy stop || echo True"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
#pass all the variables to other HA deploy process
InstallHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info]
if args.host3_info is not False:
InstallHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info, self.host3_post_info]
# setup mysql ha
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to deploy Mysql HA"
spinner_info.name = 'mysql'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['mysql'] = True
ZstackSpinner(spinner_info)
MysqlHA()()
# setup rabbitmq ha
spinner_info = SpinnerInfo()
spinner_info.output ="Starting to deploy Rabbitmq HA"
spinner_info.name = 'rabbitmq'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['rabbitmq'] = True
ZstackSpinner(spinner_info)
RabbitmqHA()()
# setup haproxy and keepalived
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to deploy Haproxy and Keepalived"
spinner_info.name = 'haproxy_keepalived'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['haproxy_keepalived'] = True
ZstackSpinner(spinner_info)
HaproxyKeepalived()()
#install database on local management node
command = "zstack-ctl stop"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
if args.keep_db is True:
command = "zstack-ctl deploydb --keep-db --host=%s --port=3306 --zstack-password=%s --root-password=%s" \
% (args.host1, args.mysql_user_password, args.mysql_root_password)
run_remote_command(command, self.host1_post_info)
elif args.drop is True:
command = "zstack-ctl deploydb --drop --host=%s --port=3306 --zstack-password=%s --root-password=%s" \
% (args.host1, args.mysql_user_password, args.mysql_root_password)
run_remote_command(command, self.host1_post_info)
else:
command = "zstack-ctl deploydb --host=%s --port=3306 --zstack-password=%s --root-password=%s" \
% (args.host1, args.mysql_user_password, args.mysql_root_password)
run_remote_command(command, self.host1_post_info)
command = "zstack-ctl configure DB.url=jdbc:mysql://%s:53306/{database}?connectTimeout=%d\&socketTimeout=%d"\
% (args.vip, local_map['mysql_connect_timeout'], local_map['mysql_socket_timeout'])
run_remote_command(command, self.host1_post_info)
command = "zstack-ctl configure CloudBus.rabbitmqPassword=%s" % args.mysql_user_password
run_remote_command(command, self.host1_post_info)
# copy zstack-1 property to zstack-2 and update the management.server.ip
# update zstack-1 firstly
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.serverIp\.0' line='CloudBus.serverIp.0=%s'" % args.vip, self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.serverIp\.1' state=absent" , self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.rabbitmqUsername' line='CloudBus.rabbitmqUsername=zstack'",
self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.rabbitmqPassword' line='CloudBus.rabbitmqPassword=%s'"
% args.rabbit_password, self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.rabbitmqHeartbeatTimeout' line='CloudBus.rabbitmqHeartbeatTimeout=10'",
self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='management\.server\.ip' line='management.server.ip = %s'" %
args.host1, self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = ctl.properties_file_path
copy_arg.dest = ctl.properties_file_path
copy(copy_arg, self.host2_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='management\.server\.ip' line='management.server.ip = %s'"
% args.host2, self.host2_post_info)
if args.host3_info is not False:
copy(copy_arg, self.host3_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='management\.server\.ip' line='management.server.ip = %s'"
% args.host3, self.host3_post_info)
#finally, start zstack-1 and zstack-2
spinner_info = SpinnerInfo()
spinner_info.output = "Starting Mevoco"
spinner_info.name = "mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['mevoco'] = True
ZstackSpinner(spinner_info)
# Add zstack-ctl start to rc.local for auto recovery when system reboot
command = "service iptables save"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
command = "zstack-ctl install_ui"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
command = "zstack-ctl start"
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(private_key_name, args.host1, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (args.host1, output))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(private_key_name, args.host2, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (args.host2, output))
if args.host3_info is not False:
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(private_key_name, args.host3, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (args.host3, output))
SpinnerInfo.spinner_status['mevoco'] = False
time.sleep(0.2)
#sync imagestore key
copy_arg = CopyArg()
copy_arg.src = ctl.zstack_home + "/../../../imagestore/bin/certs/"
copy_arg.dest = ctl.zstack_home + "/../../../imagestore/bin/certs/"
copy(copy_arg, self.host2_post_info)
if args.host3_info is not False:
copy(copy_arg, self.host2_post_info)
print '''HA deploy finished!
Mysql user 'root' password: %s
Mysql user 'zstack' password: %s
Rabbitmq user 'zstack' password: %s
Mevoco is running, visit %s in Chrome or Firefox with default user/password : %s
You can check the cluster status at %s with user/passwd : %s
''' % (args.mysql_root_password, args.mysql_user_password, args.rabbit_password,
colored('http://%s:8888' % args.vip, 'blue'), colored('admin/password', 'yellow'),
colored('http://%s:9132/zstack' % args.vip, 'blue'), colored('zstack/zstack123', 'yellow'))
class HaproxyKeepalived(InstallHACmd):
def __init__(self):
super(HaproxyKeepalived, self).__init__()
self.name = "haproxy and keepalived init"
self.description = "haproxy and keepalived setup"
self.host_post_info_list = InstallHACmd.host_post_info_list
self.host1_post_info = self.host_post_info_list[0]
self.host2_post_info = self.host_post_info_list[1]
if len(self.host_post_info_list) == 3:
self.host3_post_info = self.host_post_info_list[2]
self.yum_repo = self.host1_post_info.yum_repo
self.vip = self.host1_post_info.vip
self.gateway = self.host1_post_info.gateway_ip
def __call__(self):
command = ("yum clean --enablerepo=zstack-local metadata && pkg_list=`rpm -q haproxy keepalived"
" | grep \"not installed\" | awk '{ print $2 }'` && for pkg in $pkg_list; do yum "
"--disablerepo=* --enablerepo=%s install -y $pkg; done;") % self.yum_repo
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
update_file("/etc/sysconfig/rsyslog","regexp='^SYSLOGD_OPTIONS=\"\"' line='SYSLOGD_OPTIONS=\"-r -m 0\"'", self.host1_post_info)
update_file("/etc/sysconfig/rsyslog","regexp='^SYSLOGD_OPTIONS=\"\"' line='SYSLOGD_OPTIONS=\"-r -m 0\"'", self.host2_post_info)
if len(self.host_post_info_list) == 3:
update_file("/etc/sysconfig/rsyslog","regexp='^SYSLOGD_OPTIONS=\"\"' line='SYSLOGD_OPTIONS=\"-r -m 0\"'", self.host3_post_info)
update_file("/etc/rsyslog.conf","line='$ModLoad imudp'", self.host1_post_info)
update_file("/etc/rsyslog.conf","line='$UDPServerRun 514'", self.host1_post_info)
update_file("/etc/rsyslog.conf","line='local2.* /var/log/haproxy.log'", self.host1_post_info)
update_file("/etc/rsyslog.conf","line='$ModLoad imudp'", self.host2_post_info)
update_file("/etc/rsyslog.conf","line='$UDPServerRun 514'", self.host2_post_info)
update_file("/etc/rsyslog.conf","line='local2.* /var/log/haproxy.log'", self.host2_post_info)
if len(self.host_post_info_list) == 3:
update_file("/etc/rsyslog.conf","line='$ModLoad imudp'", self.host3_post_info)
update_file("/etc/rsyslog.conf","line='$UDPServerRun 514'", self.host3_post_info)
update_file("/etc/rsyslog.conf","line='local2.* /var/log/haproxy.log'", self.host3_post_info)
command = "touch /var/log/haproxy.log"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
file_operation("/var/log/haproxy.log","owner=haproxy group=haproxy", self.host1_post_info)
file_operation("/var/log/haproxy.log","owner=haproxy group=haproxy", self.host2_post_info)
if len(self.host_post_info_list) == 3:
file_operation("/var/log/haproxy.log","owner=haproxy group=haproxy", self.host3_post_info)
service_status("rsyslog","state=restarted enabled=yes", self.host1_post_info)
service_status("rsyslog","state=restarted enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("rsyslog","state=restarted enabled=yes", self.host3_post_info)
haproxy_raw_conf = '''
global
log 127.0.0.1 local2 emerg alert crit err warning notice info debug
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 1m
timeout check 1m
timeout tunnel 60m
maxconn 6000
listen admin_stats 0.0.0.0:9132
mode http
stats uri /zstack
stats realm Global\ statistics
stats auth zstack:zstack123
listen proxy-mysql 0.0.0.0:53306
mode tcp
option tcplog
balance source
option httpchk OPTIONS * HTTP/1.1\\r\\nHost:\ www
server zstack-1 {{ host1 }}:3306 weight 10 check port 6033 inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:3306 backup weight 10 check port 6033 inter 3s rise 2 fall 2
option tcpka
listen proxy-rabbitmq 0.0.0.0:55672
mode tcp
balance source
timeout client 3h
timeout server 3h
server zstack-1 {{ host1 }}:5672 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5672 backup weight 10 check inter 3s rise 2 fall 2
option tcpka
# dashboard not installed, so haproxy will report error
listen proxy-ui 0.0.0.0:8888
mode http
option http-server-close
balance source
server zstack-1 {{ host1 }}:5000 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5000 weight 10 check inter 3s rise 2 fall 2
option tcpka
'''
if len(self.host_post_info_list) == 3:
haproxy_raw_conf = '''
global
log 127.0.0.1 local2 emerg alert crit err warning notice info debug
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 1m
timeout check 1m
timeout tunnel 60m
maxconn 6000
listen admin_stats 0.0.0.0:9132
mode http
stats uri /zstack
stats realm Global\ statistics
stats auth zstack:zstack123
listen proxy-mysql 0.0.0.0:53306
mode tcp
option tcplog
balance source
option httpchk OPTIONS * HTTP/1.1\\r\\nHost:\ www
server zstack-1 {{ host1 }}:3306 weight 10 check port 6033 inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:3306 backup weight 10 check port 6033 inter 3s rise 2 fall 2
server zstack-3 {{ host3 }}:3306 backup weight 10 check port 6033 inter 3s rise 2 fall 2
option tcpka
listen proxy-rabbitmq 0.0.0.0:55672
mode tcp
balance source
timeout client 3h
timeout server 3h
server zstack-1 {{ host1 }}:5672 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5672 backup weight 10 check inter 3s rise 2 fall 2
server zstack-3 {{ host3 }}:5672 backup weight 10 check inter 3s rise 2 fall 2
option tcpka
# dashboard not installed, so haproxy will report error
listen proxy-ui 0.0.0.0:8888
mode http
option http-server-close
balance source
server zstack-1 {{ host1 }}:5000 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5000 weight 10 check inter 3s rise 2 fall 2
server zstack-3 {{ host3 }}:5000 weight 10 check inter 3s rise 2 fall 2
option tcpka
'''
haproxy_conf_template = jinja2.Template(haproxy_raw_conf)
haproxy_host1_conf = haproxy_conf_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host
})
if len(self.host_post_info_list) == 3:
haproxy_host1_conf = haproxy_conf_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host,
'host3' : self.host3_post_info.host
})
# The host1 and host2 and host3 use the same config file
host1_config, haproxy_host1_conf_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(haproxy_host1_conf)
f1.close()
def cleanup_haproxy_config_file():
os.remove(haproxy_host1_conf_file)
self.install_cleanup_routine(cleanup_haproxy_config_file)
copy_arg = CopyArg()
copy_arg.src = haproxy_host1_conf_file
copy_arg.dest = "/etc/haproxy/haproxy.cfg"
copy(copy_arg,self.host1_post_info)
copy(copy_arg,self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg,self.host3_post_info)
#config haproxy firewall
command = "iptables -C INPUT -p tcp -m tcp --dport 53306 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 53306 -j ACCEPT; " \
"iptables -C INPUT -p tcp -m tcp --dport 58080 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 58080 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 55672 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 55672 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 80 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 80 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 9132 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 9132 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 8888 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 8888 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 6033 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 6033 -j ACCEPT; service iptables save "
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
#config keepalived
keepalived_raw_config = '''
! Configuration File for keepalived
global_defs {
router_id HAPROXY_LOAD
}
vrrp_script Monitor_Haproxy {
script "/usr/local/bin/keepalived-kill.sh"
interval 2
weight 5
}
vrrp_instance VI_1 {
# use the same state with host2, so no master node, recovery will not race to control the vip
state BACKUP
interface {{ bridge }}
virtual_router_id {{ vrouter_id }}
priority {{ priority }}
nopreempt
advert_int 1
authentication {
auth_type PASS
auth_pass {{ auth_passwd }}
}
track_script {
Monitor_Haproxy
}
virtual_ipaddress {
{{ vip }}/{{ netmask }} label {{ bridge }}:0
}
}
'''
virtual_router_id = random.randint(1, 255)
auth_pass = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(15))
master_priority = 92
slave_priority = 91
second_slave_priority = 90
keepalived_template = jinja2.Template(keepalived_raw_config)
keepalived_host1_config = keepalived_template.render({
'bridge' : InstallHACmd.bridge,
'vrouter_id': virtual_router_id,
'priority': master_priority,
'auth_passwd': auth_pass,
'vip': self.vip,
'netmask': self.get_formatted_netmask(InstallHACmd.bridge)
})
keepalived_host2_config = keepalived_template.render({
'bridge' : InstallHACmd.bridge,
'vrouter_id': virtual_router_id,
'priority': slave_priority,
'auth_passwd': auth_pass,
'vip': self.vip,
'netmask': self.get_formatted_netmask(InstallHACmd.bridge)
})
if len(self.host_post_info_list) == 3:
keepalived_host3_config = keepalived_template.render({
'vrouter_id': virtual_router_id,
'priority': second_slave_priority,
'auth_passwd': auth_pass,
'vip': self.vip,
'netmask': self.get_formatted_netmask(InstallHACmd.bridge)
})
host1_config, keepalived_host1_config_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(keepalived_host1_config)
f1.close()
host2_config, keepalived_host2_config_file = tempfile.mkstemp()
f2 = os.fdopen(host1_config, 'w')
f2.write(keepalived_host2_config)
f2.close()
if len(self.host_post_info_list) == 3:
host3_config, keepalived_host3_config_file = tempfile.mkstemp()
f3 = os.fdopen(host3_config, 'w')
f3.write(keepalived_host3_config)
f3.close()
def cleanup_keepalived_config_file():
os.remove(keepalived_host1_config_file)
os.remove(keepalived_host2_config_file)
if len(self.host_post_info_list) == 3:
os.remove(keepalived_host3_config_file)
self.install_cleanup_routine(cleanup_keepalived_config_file)
copy_arg = CopyArg()
copy_arg.src = keepalived_host1_config_file
copy_arg.dest = "/etc/keepalived/keepalived.conf"
copy(copy_arg, self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = keepalived_host2_config_file
copy_arg.dest = "/etc/keepalived/keepalived.conf"
copy(copy_arg, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy_arg = CopyArg()
copy_arg.src = keepalived_host3_config_file
copy_arg.dest = "/etc/keepalived/keepalived.conf"
copy(copy_arg, self.host3_post_info)
# copy keepalived-kill.sh to host
copy_arg = CopyArg()
copy_arg.src = "%s/conf/keepalived-kill.sh" % InstallHACmd.current_dir
copy_arg.dest = "/usr/local/bin/keepalived-kill.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg, self.host1_post_info)
copy(copy_arg, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg, self.host3_post_info)
# restart haproxy and keepalived
service_status("keepalived", "state=restarted enabled=yes", self.host1_post_info)
service_status("keepalived", "state=restarted enabled=yes", self.host2_post_info)
service_status("haproxy", "state=restarted enabled=yes", self.host1_post_info)
service_status("haproxy", "state=restarted enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("keepalived", "state=restarted enabled=yes", self.host3_post_info)
service_status("haproxy", "state=restarted enabled=yes", self.host3_post_info)
class MysqlHA(InstallHACmd):
def __init__(self):
super(MysqlHA, self).__init__()
self.host_post_info_list = InstallHACmd.host_post_info_list
self.host1_post_info = self.host_post_info_list[0]
self.host2_post_info = self.host_post_info_list[1]
if len(self.host_post_info_list) == 3:
self.host3_post_info = self.host_post_info_list[2]
self.yum_repo = self.host1_post_info.yum_repo
self.mysql_password = self.host1_post_info.mysql_password
def __call__(self):
command = ("yum clean --enablerepo=zstack-local metadata && pkg_list=`rpm -q MariaDB-Galera-server xinetd rsync openssl-libs "
" | grep \"not installed\" | awk '{ print $2 }'` && for pkg in $pkg_list; do yum "
"--disablerepo=* --enablerepo=%s,mariadb install -y $pkg; done;") % self.yum_repo
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
# Generate galera config file and copy to host1 host2
galera_raw_config= '''[mysqld]
skip-name-resolve=1
character-set-server=utf8
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
innodb_locks_unsafe_for_binlog=1
max_connections=2048
query_cache_size=0
query_cache_type=0
bind_address= {{ host1 }}
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name="galera_cluster"
wsrep_cluster_address="gcomm://{{ host2 }},{{ host1 }}"
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
'''
if len(self.host_post_info_list) == 3:
# Generate galera config file and copy to host1 host2 host3
galera_raw_config= '''[mysqld]
skip-name-resolve=1
character-set-server=utf8
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
innodb_locks_unsafe_for_binlog=1
max_connections=2048
query_cache_size=0
query_cache_type=0
bind_address= {{ host1 }}
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name="galera_cluster"
wsrep_cluster_address="gcomm://{{ host3 }},{{ host2 }},{{ host1 }}"
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
'''
galera_config_template = jinja2.Template(galera_raw_config)
galera_config_host1 = galera_config_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host
})
if len(self.host_post_info_list) == 3:
galera_config_host1 = galera_config_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host,
'host3' : self.host3_post_info.host
})
galera_config_host2 = galera_config_template.render({
'host1' : self.host2_post_info.host,
'host2' : self.host1_post_info.host
})
if len(self.host_post_info_list) == 3:
galera_config_host2 = galera_config_template.render({
'host1' : self.host2_post_info.host,
'host2' : self.host3_post_info.host,
'host3' : self.host1_post_info.host
})
if len(self.host_post_info_list) == 3:
galera_config_host3 = galera_config_template.render({
'host1' : self.host3_post_info.host,
'host2' : self.host1_post_info.host,
'host3' : self.host2_post_info.host
})
host1_config, galera_config_host1_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(galera_config_host1)
f1.close()
host2_config, galera_config_host2_file = tempfile.mkstemp()
f2 = os.fdopen(host2_config, 'w')
f2.write(galera_config_host2)
f2.close()
if len(self.host_post_info_list) == 3:
host3_config, galera_config_host3_file = tempfile.mkstemp()
f3 = os.fdopen(host3_config, 'w')
f3.write(galera_config_host3)
f3.close()
def cleanup_galera_config_file():
os.remove(galera_config_host1_file)
os.remove(galera_config_host2_file)
if len(self.host_post_info_list) == 3:
os.remove(galera_config_host3_file)
self.install_cleanup_routine(cleanup_galera_config_file)
copy_arg = CopyArg()
copy_arg.src = galera_config_host1_file
copy_arg.dest = "/etc/my.cnf.d/galera.cnf"
copy(copy_arg, self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = galera_config_host2_file
copy_arg.dest = "/etc/my.cnf.d/galera.cnf"
copy(copy_arg, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy_arg = CopyArg()
copy_arg.src = galera_config_host3_file
copy_arg.dest = "/etc/my.cnf.d/galera.cnf"
copy(copy_arg, self.host3_post_info)
# restart mysql service to enable galera config
command = "service mysql stop || true"
#service_status("mysql", "state=stopped", self.host1_post_info)
run_remote_command(command, self.host2_post_info)
#last stop node should be the first node to do bootstrap
run_remote_command(command, self.host1_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "service mysql bootstrap"
run_remote_command(command, self.host1_post_info)
run_remote_command("service mysql start && chkconfig mysql on", self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command("service mysql start && chkconfig mysql on", self.host3_post_info)
run_remote_command("service mysql restart && chkconfig mysql on", self.host1_post_info)
init_install = run_remote_command("mysql -u root --password='' -e 'exit' ", self.host1_post_info, return_status=True)
if init_install is True:
#command = "mysql -u root --password='' -Bse \"show status like 'wsrep_%%';\""
#galera_status = run_remote_command(command, self.host2_post_info)
#create zstack user
command =" mysql -u root --password='' -Bse 'grant ALL PRIVILEGES on *.* to zstack@\"localhost\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES on *.* to zstack@\"zstack-1\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES on *.* to zstack@\"%%\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES on *.* to root@\"%%\" Identified by \"%s\";" \
"grant ALL PRIVILEGES on *.* to root@\"localhost\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES ON *.* TO root@\"%%\" IDENTIFIED BY \"%s\" WITH GRANT OPTION; " \
"flush privileges;'" % (self.host1_post_info.mysql_userpassword, self.host1_post_info.mysql_userpassword,
self.host1_post_info.mysql_userpassword,self.host1_post_info.mysql_password,
self.host1_post_info.mysql_password, self.host1_post_info.mysql_password)
(status, output) = run_remote_command(command, self.host1_post_info, True, True)
if status is False:
time.sleep(5)
(status, output) = run_remote_command(command, self.host1_post_info, True, True)
if status is False:
error("Failed to set mysql 'zstack' and 'root' password, the reason is %s" % output)
# config mysqlchk_status.sh on zstack-1 and zstack-2
mysqlchk_raw_script = '''#!/bin/sh
MYSQL_HOST="{{ host1 }}"
MYSQL_PORT="3306"
MYSQL_USERNAME="{{ mysql_username }}"
MYSQL_PASSWORD="{{ mysql_password }}"
/usr/bin/mysql -h$MYSQL_HOST -u$MYSQL_USERNAME -p$MYSQL_PASSWORD -e "show databases;" > /dev/null
if [ "$?" -eq 0 ]
then
# mysql is fine, return http 200
/bin/echo -e "HTTP/1.1 200 OK"
/bin/echo -e "Content-Type: Content-Type: text/plain"
/bin/echo -e "MySQL is running."
else
# mysql is fine, return http 503
/bin/echo -e "HTTP/1.1 503 Service Unavailable"
/bin/echo -e "Content-Type: Content-Type: text/plain"
/bin/echo -e "MySQL is *down*."
fi
'''
mysqlchk_template = jinja2.Template(mysqlchk_raw_script)
mysqlchk_script_host1 = mysqlchk_template.render({
'host1' : self.host1_post_info.host,
'mysql_username' : "zstack",
'mysql_password' : self.host1_post_info.mysql_userpassword
})
mysqlchk_script_host2 = mysqlchk_template.render({
'host1' : self.host2_post_info.host,
'mysql_username' : "zstack",
'mysql_password' : self.host2_post_info.mysql_userpassword
})
if len(self.host_post_info_list) == 3:
mysqlchk_script_host3 = mysqlchk_template.render({
'host1' : self.host3_post_info.host,
'mysql_username' : "zstack",
'mysql_password' : self.host3_post_info.mysql_userpassword
})
host1_config, mysqlchk_script_host1_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(mysqlchk_script_host1)
f1.close()
host2_config, mysqlchk_script_host2_file = tempfile.mkstemp()
f2 = os.fdopen(host2_config, 'w')
f2.write(mysqlchk_script_host2)
f2.close()
if len(self.host_post_info_list) == 3:
host3_config, mysqlchk_script_host3_file = tempfile.mkstemp()
f3 = os.fdopen(host3_config, 'w')
f3.write(mysqlchk_script_host3)
f3.close()
def cleanup_mysqlchk_script():
os.remove(mysqlchk_script_host1_file)
os.remove(mysqlchk_script_host2_file)
if len(self.host_post_info_list) == 3:
os.remove(mysqlchk_script_host3_file)
self.install_cleanup_routine(cleanup_mysqlchk_script)
copy_arg = CopyArg()
copy_arg.src = mysqlchk_script_host1_file
copy_arg.dest = "/usr/local/bin/mysqlchk_status.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = mysqlchk_script_host2_file
copy_arg.dest = "/usr/local/bin/mysqlchk_status.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy_arg = CopyArg()
copy_arg.src = mysqlchk_script_host3_file
copy_arg.dest = "/usr/local/bin/mysqlchk_status.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host3_post_info)
# check network
check_network_raw_script='''#!/bin/bash
MYSQL_HOST="{{ host }}"
MYSQL_PORT="3306"
MYSQL_USERNAME="root"
MYSQL_PASSWORD="{{ mysql_root_password }}"
# Checking partner ...
ping -c 4 -w 4 $1 > /dev/null 2>&1
if [ $? -ne 0 ]; then
# Checking gateway ...
ping -c 4 -w 4 $2 > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Network ERROR! Kill MySQL NOW!" >> /var/log/check-network.log
pgrep -f mysql | xargs kill -9
else
echo "Setting the primary of Galera." >> /var/log/check-network.log
/usr/bin/mysql -h$MYSQL_HOST -u$MYSQL_USERNAME -p$MYSQL_PASSWORD -e "SET GLOBAL wsrep_provider_options='pc.bootstrap=YES';" > /dev/null
fi
fi
TIMEST=`date`
echo $TIMEST >> /var/log/check-network.log
'''
galera_check_network = jinja2.Template(check_network_raw_script)
galera_check_network_host1 = galera_check_network.render({
'host' : self.host1_post_info.host,
'mysql_root_password' : self.host1_post_info.mysql_password
})
galera_check_network_host2 = galera_check_network.render({
'host' : self.host2_post_info.host,
'mysql_root_password' : self.host1_post_info.mysql_password
})
host1_config, galera_check_network_host1_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(galera_check_network_host1)
f1.close()
host2_config, galera_check_network_host2_file = tempfile.mkstemp()
f2 = os.fdopen(host2_config, 'w')
f2.write(galera_check_network_host2)
f2.close()
def cleanup_gelerachk_script():
os.remove(galera_check_network_host1_file)
os.remove(galera_check_network_host2_file)
self.install_cleanup_routine(cleanup_gelerachk_script)
copy_arg = CopyArg()
copy_arg.src = galera_check_network_host1_file
copy_arg.dest = "/usr/local/zstack/check-network.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = galera_check_network_host2_file
copy_arg.dest = "/usr/local/zstack/check-network.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host2_post_info)
# set cron task for network status
cron("check_node_2_status1","job=\"/usr/local/zstack/check-network.sh %s %s\"" % (self.host2_post_info.host,
self.host2_post_info.gateway_ip),
self.host1_post_info)
cron("check_node_2_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\"" % (self.host2_post_info.host,
self.host2_post_info.gateway_ip),
self.host1_post_info)
cron("check_node_1_status1","job=\"/usr/local/zstack/check-network.sh %s %s\"" % (self.host1_post_info.host,
self.host1_post_info.gateway_ip),
self.host2_post_info)
cron("check_node_1_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\"" % (self.host1_post_info.host,
self.host1_post_info.gateway_ip),
self.host2_post_info)
if len(self.host_post_info_list) == 3:
cron("check_node_1_status1","job=\"/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host1_post_info.host, self.host1_post_info.gateway_ip), self.host2_post_info)
cron("check_node_1_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host1_post_info.host, self.host1_post_info.gateway_ip), self.host2_post_info)
cron("check_node_2_status1","job=\"/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host2_post_info.host, self.host2_post_info.gateway_ip), self.host1_post_info)
cron("check_node_2_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host2_post_info.host, self.host2_post_info.gateway_ip), self.host1_post_info)
#config xinetd for service check
copy_arg = CopyArg()
copy_arg.src = "%s/conf/mysql-check" % InstallHACmd.current_dir
copy_arg.dest = "/etc/xinetd.d/mysql-check"
copy(copy_arg,self.host1_post_info)
copy(copy_arg,self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg,self.host3_post_info)
# add service name
update_file("/etc/services", "line='mysqlcheck 6033/tcp #MYSQL status check'", self.host1_post_info)
update_file("/etc/services", "line='mysqlcheck 6033/tcp #MYSQL status check'", self.host2_post_info)
if len(self.host_post_info_list) == 3:
update_file("/etc/services", "line='mysqlcheck 6033/tcp #MYSQL status check'", self.host3_post_info)
# start service
command = "systemctl daemon-reload"
run_remote_command(command,self.host1_post_info)
run_remote_command(command,self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command,self.host3_post_info)
service_status("xinetd","state=restarted enabled=yes",self.host1_post_info)
service_status("xinetd","state=restarted enabled=yes",self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("xinetd","state=restarted enabled=yes",self.host3_post_info)
# add crontab for backup mysql
cron("backup_zstack_db","minute='0' hour='1,13' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host1_post_info)
cron("backup_zstack_db","minute='0' hour='7,19' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host2_post_info)
if len(self.host_post_info_list) == 3:
cron("backup_zstack_db","minute='0' hour='1' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host1_post_info)
cron("backup_zstack_db","minute='0' hour='9' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host2_post_info)
cron("backup_zstack_db","minute='0' hour='17' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host3_post_info)
service_status("crond","state=started enabled=yes",self.host1_post_info)
service_status("crond","state=started enabled=yes",self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("crond","state=started enabled=yes",self.host3_post_info)
class RabbitmqHA(InstallHACmd):
def __init__(self):
super(RabbitmqHA, self).__init__()
self.name = "rabbitmq ha"
self.description = "rabbitmq HA setup"
self.host_post_info_list = InstallHACmd.host_post_info_list
self.host1_post_info = self.host_post_info_list[0]
self.host2_post_info = self.host_post_info_list[1]
if len(self.host_post_info_list) == 3:
self.host3_post_info = self.host_post_info_list[2]
self.yum_repo = self.host1_post_info.yum_repo
self.rabbit_password= self.host1_post_info.rabbit_password
def __call__(self):
command = ("yum clean --enablerepo=zstack-local metadata && pkg_list=`rpm -q rabbitmq-server"
" | grep \"not installed\" | awk '{ print $2 }'` && for pkg in $pkg_list; do yum "
"--disablerepo=* --enablerepo=%s,mariadb install -y $pkg; done;") % self.yum_repo
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
# clear erlang process for new deploy
command = "echo True || pkill -f .*erlang.* > /dev/null 2>&1 && rm -rf /var/lib/rabbitmq/* "
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
# to stop rabbitmq-server for new installation
service_status("rabbitmq-server","state=stopped", self.host1_post_info, True)
service_status("rabbitmq-server", "state=stopped", self.host2_post_info, True)
if len(self.host_post_info_list) == 3:
service_status("rabbitmq-server", "state=stopped", self.host3_post_info, True)
# to start rabbitmq-server
service_status("rabbitmq-server","state=started enabled=yes", self.host1_post_info)
service_status("rabbitmq-server", "state=started enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("rabbitmq-server", "state=started enabled=yes", self.host3_post_info)
# add zstack user in this cluster
command = "rabbitmqctl add_user zstack %s" % self.rabbit_password
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "rabbitmqctl set_user_tags zstack administrator"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "rabbitmqctl change_password zstack %s" % self.rabbit_password
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = 'rabbitmqctl set_permissions -p \/ zstack ".*" ".*" ".*"'
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "rabbitmq-plugins enable rabbitmq_management"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
service_status("rabbitmq-server","state=restarted enabled=yes", self.host1_post_info)
service_status("rabbitmq-server", "state=restarted enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("rabbitmq-server", "state=restarted enabled=yes", self.host3_post_info)
class ResetRabbitCmd(Command):
def __init__(self):
super(ResetRabbitCmd, self).__init__()
self.name = "reset_rabbitmq"
self.description = "Reinstall RabbitMQ message broker on local machine based on current configuration in zstack.properties."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
pass
def run(self, args):
rabbitmq_ip = ctl.read_property('CloudBus.serverIp.0')
rabbitmq_user = ctl.read_property('CloudBus.rabbitmqUsername')
rabbitmq_passwd = ctl.read_property('CloudBus.rabbitmqPassword')
shell("service rabbitmq-server stop; rpm -ev rabbitmq-server; rm -rf /var/lib/rabbitmq")
if args.yum is not None:
ctl.internal_run('install_rabbitmq', "--host=%s --rabbit-username=%s --rabbit-password=%s --yum=%s" % (rabbitmq_ip, rabbitmq_user, rabbitmq_passwd, args.yum))
else:
ctl.internal_run('install_rabbitmq', "--host=%s --rabbit-username=%s --rabbit-password=%s" % (rabbitmq_ip, rabbitmq_user, rabbitmq_passwd))
class InstallRabbitCmd(Command):
def __init__(self):
super(InstallRabbitCmd, self).__init__()
self.name = "install_rabbitmq"
self.description = "install RabbitMQ message broker on local or remote machine."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='host IP, for example, 192.168.0.212, please specify the real IP rather than "localhost" or "127.0.0.1" when installing on local machine; otherwise management nodes on other machines cannot access the RabbitMQ.', required=True)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--no-update', help="don't update the IP address to 'CloudBus.serverIp.0' in zstack.properties", action="store_true", default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
parser.add_argument('--rabbit-username', help="RabbitMQ username; if set, the username will be created on RabbitMQ. [DEFAULT] rabbitmq default username", default=None)
parser.add_argument('--rabbit-password', help="RabbitMQ password; if set, the password will be created on RabbitMQ for username specified by --rabbit-username. [DEFAULT] rabbitmq default password", default=None)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
def run(self, args):
if (args.rabbit_password is None and args.rabbit_username) or (args.rabbit_username and args.rabbit_password is None):
raise CtlError('--rabbit-username and --rabbit-password must be both set or not set')
if not args.yum:
args.yum = get_yum_repo_from_property()
yaml = '''---
- hosts: $host
remote_user: root
vars:
yum_repo: "$yum_repo"
tasks:
- name: pre-install script
script: $pre_install_script
- name: install RabbitMQ on RedHat OS from user defined yum repo
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y rabbitmq-server libselinux-python iptables-services
- name: install RabbitMQ on RedHat OS from online
when: ansible_os_family == 'RedHat' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y rabbitmq-server libselinux-python iptables-services
- name: install iptables-persistent for Ubuntu
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- iptables-persistent
- name: install RabbitMQ on Ubuntu OS
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- rabbitmq-server
- name: open 5672 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5672 -j ACCEPT
- name: open 5673 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5673 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5673 -j ACCEPT
- name: open 15672 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 15672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 15672 -j ACCEPT
- name: save iptables
when: ansible_os_family != 'RedHat'
shell: /etc/init.d/iptables-persistent save
- name: open 5672 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5672 -j ACCEPT
- name: open 5673 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5673 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5673 -j ACCEPT
- name: open 15672 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 15672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 15672 -j ACCEPT
- name: save iptables
when: ansible_os_family == 'RedHat'
shell: service iptables save
- name: install rabbitmq management plugin
shell: rabbitmq-plugins enable rabbitmq_management
- name: enable RabbitMQ
service: name=rabbitmq-server state=restarted enabled=yes
- name: post-install script
script: $post_install_script
'''
pre_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
###################
#Check DNS hijacking
###################
hostname=`hostname`
pintret=`ping -c 1 -W 2 $hostname 2>/dev/null | head -n1`
echo $pintret | grep 'PING' > /dev/null
[ $? -ne 0 ] && exit 0
ip=`echo $pintret | cut -d' ' -f 3 | cut -d'(' -f 2 | cut -d')' -f 1`
ip_1=`echo $ip | cut -d'.' -f 1`
[ "127" = "$ip_1" ] && exit 0
ip addr | grep $ip > /dev/null
[ $? -eq 0 ] && exit 0
echo "The hostname($hostname) of your machine is resolved to IP($ip) which is none of IPs of your machine.
It's likely your DNS server has been hijacking, please try fixing it or add \"ip_of_your_host $hostname\" to /etc/hosts.
DNS hijacking will cause MySQL and RabbitMQ not working."
exit 1
'''
fd, pre_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(pre_script)
def cleanup_prescript():
os.remove(pre_script_path)
self.install_cleanup_routine(cleanup_prescript)
if args.rabbit_username and args.rabbit_password:
post_script = '''set -x
rabbitmqctl list_users|grep 'zstack'
if [ $$? -ne 0 ]; then
set -e
rabbitmqctl add_user $username $password
rabbitmqctl set_user_tags $username administrator
rabbitmqctl set_permissions -p / $username ".*" ".*" ".*"
fi
'''
t = string.Template(post_script)
post_script = t.substitute({
'username': args.rabbit_username,
'password': args.rabbit_password
})
else:
post_script = ''
fd, post_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(post_script)
def cleanup_postscript():
os.remove(post_script_path)
self.install_cleanup_routine(cleanup_postscript)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
'host': args.host,
'pre_install_script': pre_script_path,
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo,
'post_install_script': post_script_path
})
ansible(yaml, args.host, args.debug, args.ssh_key)
if not args.no_update:
ctl.write_property('CloudBus.serverIp.0', args.host)
info('updated CloudBus.serverIp.0=%s in %s' % (args.host, ctl.properties_file_path))
if args.rabbit_username and args.rabbit_password:
ctl.write_property('CloudBus.rabbitmqUsername', args.rabbit_username)
info('updated CloudBus.rabbitmqUsername=%s in %s' % (args.rabbit_username, ctl.properties_file_path))
ctl.write_property('CloudBus.rabbitmqPassword', args.rabbit_password)
info('updated CloudBus.rabbitmqPassword=%s in %s' % (args.rabbit_password, ctl.properties_file_path))
class ChangeMysqlPasswordCmd(Command):
def __init__(self):
super(ChangeMysqlPasswordCmd, self).__init__()
self.name = "change_mysql_password"
self.description = (
"Change mysql password for root or normal user"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--root-password','-root',
help="Current mysql root password",
required=True)
parser.add_argument('--user-name','-user',
help="The user you want to change password",
required=True)
parser.add_argument('--new-password','-new',
help="New mysql password of root or normal user",
required=True)
parser.add_argument('--remote-ip','-ip',
help="Mysql ip address if didn't install on localhost",
)
def check_username_password(self,args):
if args.remote_ip is not None:
status, output = commands.getstatusoutput("mysql -u root -p%s -h '%s' -e 'show databases;'" % (args.root_password, args.remote_ip))
else:
status, output = commands.getstatusoutput("mysql -u root -p%s -e 'show databases;'" % args.root_password)
if status != 0:
error(output)
def run(self, args):
self.check_username_password(args)
if args.user_name == 'zstack':
if args.remote_ip is not None:
sql = "mysql -u root -p'%s' -h '%s' -e \"UPDATE mysql.user SET Password=PASSWORD(\'%s\') , Host = \'%s\' WHERE USER=\'%s\';FLUSH PRIVILEGES;\"" % (args.root_password, args.remote_ip, args.new_password,args.remote_ip, args.user_name)
else:
sql = "mysql -u root -p'%s' -e \"UPDATE mysql.user SET Password=PASSWORD(\'%s\') WHERE USER=\'%s\';FLUSH PRIVILEGES;\"" % (args.root_password, args.new_password, args.user_name)
status, output = commands.getstatusoutput(sql)
if status != 0:
error(output)
info("Change mysql password for user '%s' successfully! " % args.user_name)
info(colored("Please change 'DB.password' in 'zstack.properties' then restart zstack to make the changes effective" , 'yellow'))
elif args.user_name == 'root':
if args.remote_ip is not None:
status, output = commands.getstatusoutput("mysqladmin -u %s -p'%s' password %s -h %s" % (args.user_name, args.root_password, args.new_password, args.remote_ip))
else:
status, output = commands.getstatusoutput("mysqladmin -u %s -p'%s' password %s" % (args.user_name, args.root_password, args.new_password))
if status != 0:
error(output)
info("Change mysql password for user '%s' successfully!" % args.user_name)
else:
error("Only support change 'zstack' and 'root' password")
class DumpMysqlCmd(Command):
def __init__(self):
super(DumpMysqlCmd, self).__init__()
self.name = "dump_mysql"
self.description = (
"Dump mysql database for backup"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--file-name',
help="The filename you want to save the database, default is 'zstack-backup-db'",
default="zstack-backup-db")
parser.add_argument('--keep-amount',type=int,
help="The amount of backup files you want to keep, older backup files will be deleted, default number is 60",
default=60)
def run(self, args):
(db_hostname, db_port, db_user, db_password) = ctl.get_live_mysql_portal()
file_name = args.file_name
keep_amount = args.keep_amount
backup_timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
db_backup_dir = "/var/lib/zstack/mysql-backup/"
if os.path.exists(db_backup_dir) is False:
os.mkdir(db_backup_dir)
db_backup_name = db_backup_dir + file_name + "-" + backup_timestamp
if db_hostname == "localhost" or db_hostname == "127.0.0.1":
if db_password is None or db_password == "":
db_connect_password = ""
else:
db_connect_password = "-p" + db_password
command = "mysqldump --add-drop-database --databases -u %s %s -P %s zstack zstack_rest | gzip > %s "\
% (db_user, db_connect_password, db_port, db_backup_name + ".gz")
(status, output) = commands.getstatusoutput(command)
if status != 0:
error(output)
else:
if db_password is None or db_password == "":
db_connect_password = ""
else:
db_connect_password = "-p" + db_password
command = "mysqldump --add-drop-database --databases -u %s %s --host %s -P %s zstack zstack_rest | gzip > %s " \
% (db_user, db_connect_password, db_hostname, db_port, db_backup_name + ".gz")
(status, output) = commands.getstatusoutput(command)
if status != 0:
error(output)
print "Backup mysql successful! You can check the file at %s.gz" % db_backup_name
# remove old file
if len(os.listdir(db_backup_dir)) > keep_amount:
backup_files_list = [s for s in os.listdir(db_backup_dir) if os.path.isfile(os.path.join(db_backup_dir, s))]
backup_files_list.sort(key=lambda s: os.path.getmtime(os.path.join(db_backup_dir, s)))
for expired_file in backup_files_list:
if expired_file not in backup_files_list[-keep_amount:]:
os.remove(db_backup_dir + expired_file)
class RestoreMysqlCmd(Command):
status, all_local_ip = commands.getstatusoutput("ip a")
def __init__(self):
super(RestoreMysqlCmd, self).__init__()
self.name = "restore_mysql"
self.description = (
"Restore mysql data from backup file"
)
self.hide = True
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--from-file', '-f',
help="The backup filename under /var/lib/zstack/mysql-backup/ ",
required=True)
parser.add_argument('--mysql-root-password',
help="mysql root password",
default=None)
def test_mysql_connection(self, db_connect_password, db_port, db_hostname):
command = "mysql -uroot %s -P %s %s -e 'show databases' >> /dev/null 2>&1" \
% (db_connect_password, db_port, db_hostname)
try:
shell_no_pipe(command)
except:
error("Can't connect mysql with root password '%s', please specify databse root password with --mysql-root-password" % db_connect_password.split('-p')[1])
def run(self, args):
(db_hostname, db_port, db_user, db_password) = ctl.get_live_mysql_portal()
# only root user can restore database
db_password = args.mysql_root_password
db_backup_name = args.from_file
if os.path.exists(db_backup_name) is False:
error("Didn't find file: %s ! Stop recover database! " % db_backup_name)
error_if_tool_is_missing('gunzip')
info("Backup mysql before restore data ...")
shell_no_pipe('zstack-ctl dump_mysql')
shell_no_pipe('zstack-ctl stop_node')
info("Starting recover data ...")
if db_password is None or db_password == "":
db_connect_password = ""
else:
db_connect_password = "-p" + db_password
if db_hostname == "localhost" or db_hostname == "127.0.0.1" or (db_hostname in RestoreMysqlCmd.all_local_ip):
db_hostname = ""
else:
db_hostname = "--host %s" % db_hostname
self.test_mysql_connection(db_connect_password, db_port, db_hostname)
for database in ['zstack','zstack_rest']:
command = "mysql -uroot %s -P %s %s -e 'drop database if exists %s; create database %s' >> /dev/null 2>&1" \
% (db_connect_password, db_port, db_hostname, database, database)
shell_no_pipe(command)
command = "gunzip < %s | mysql -uroot %s %s -P %s %s" \
% (db_backup_name, db_connect_password, db_hostname, db_port, database)
shell_no_pipe(command)
#shell_no_pipe('zstack-ctl start_node')
info("Recover data successfully! You can start node by: zstack-ctl start")
class CollectLogCmd(Command):
zstack_log_dir = "/var/log/zstack/"
vrouter_log_dir = "/home/vyos/zvr/"
host_log_list = ['zstack.log','zstack-kvmagent.log','zstack-iscsi-filesystem-agent.log',
'zstack-agent/collectd.log','zstack-agent/server.log']
bs_log_list = ['zstack-sftpbackupstorage.log','ceph-backupstorage.log','zstack-store/zstore.log',
'fusionstor-backupstorage.log']
ps_log_list = ['ceph-primarystorage.log','fusionstor-primarystorage.log']
# management-server.log is not in the same dir, will collect separately
mn_log_list = ['deploy.log', 'ha.log', 'zstack-console-proxy.log', 'zstack.log', 'zstack-cli', 'zstack-ui.log',
'zstack-dashboard.log', 'zstack-ctl.log']
collect_lines = 100000
logger_dir = '/var/log/zstack/'
logger_file = 'zstack-ctl.log'
failed_flag = False
def __init__(self):
super(CollectLogCmd, self).__init__()
self.name = "collect_log"
self.description = (
"Collect log for diagnose"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--db', help='collect database for diagnose ', action="store_true", default=False)
parser.add_argument('--mn-only', help='only collect management log', action="store_true", default=False)
parser.add_argument('--full', help='collect full management logs and host logs', action="store_true", default=False)
parser.add_argument('--host', help='only collect management log and specific host log')
def get_db(self, collect_dir):
command = "cp `zstack-ctl dump_mysql | awk '{ print $10 }'` %s" % collect_dir
shell(command, False)
def compress_and_fetch_log(self, local_collect_dir, tmp_log_dir, host_post_info):
command = "cd %s && tar zcf ../collect-log.tar.gz ." % tmp_log_dir
run_remote_command(command, host_post_info)
fetch_arg = FetchArg()
fetch_arg.src = "%s/../collect-log.tar.gz " % tmp_log_dir
fetch_arg.dest = local_collect_dir
fetch_arg.args = "fail_on_missing=yes flat=yes"
fetch(fetch_arg, host_post_info)
command = "rm -rf %s %s/../collect-log.tar.gz" % (tmp_log_dir, tmp_log_dir)
run_remote_command(command, host_post_info)
(status, output) = commands.getstatusoutput("cd %s && tar zxf collect-log.tar.gz" % local_collect_dir)
if status != 0:
warn("Uncompress %s/collect-log.tar.gz meet problem: %s" % (local_collect_dir, output))
(status, output) = commands.getstatusoutput("rm -f %s/collect-log.tar.gz" % local_collect_dir)
def get_system_log(self, host_post_info, tmp_log_dir):
# collect uptime and last reboot log and dmesg
host_info_log = tmp_log_dir + "host_info"
command = "uptime > %s && last reboot >> %s && free -h >> %s && cat /proc/cpuinfo >> %s && ip addr >> %s && df -h >> %s" % \
(host_info_log, host_info_log, host_info_log, host_info_log, host_info_log, host_info_log)
run_remote_command(command, host_post_info, True, True)
command = "cp /var/log/dmesg* /var/log/messages %s" % tmp_log_dir
run_remote_command(command, host_post_info)
def get_pkg_list(self, host_post_info, tmp_log_dir):
command = "rpm -qa | sort > %s/pkg_list" % tmp_log_dir
run_remote_command(command, host_post_info)
def get_vrouter_log(self, host_post_info, collect_dir):
#current vrouter log is very small, so collect all logs for debug
if check_host_reachable(host_post_info) is True:
info("Collecting log from vrouter: %s ..." % host_post_info.host)
local_collect_dir = collect_dir + 'vrouter-%s/' % host_post_info.host
tmp_log_dir = "%s/tmp-log/" % CollectLogCmd.vrouter_log_dir
command = "mkdir -p %s " % tmp_log_dir
run_remote_command(command, host_post_info)
command = "/opt/vyatta/sbin/vyatta-save-config.pl && cp /config/config.boot %s" % tmp_log_dir
run_remote_command(command, host_post_info)
command = "cp %s/*.log %s/*.json %s" % (CollectLogCmd.vrouter_log_dir, CollectLogCmd.vrouter_log_dir,tmp_log_dir)
run_remote_command(command, host_post_info)
self.compress_and_fetch_log(local_collect_dir, tmp_log_dir, host_post_info)
else:
warn("Vrouter %s is unreachable!" % host_post_info.host)
def get_host_log(self, host_post_info, collect_dir, collect_full_log=False):
if check_host_reachable(host_post_info) is True:
info("Collecting log from host: %s ..." % host_post_info.host)
tmp_log_dir = "%s/tmp-log/" % CollectLogCmd.zstack_log_dir
local_collect_dir = collect_dir + 'host-%s/' % host_post_info.host
try:
# file system broken shouldn't block collect log process
if not os.path.exists(local_collect_dir):
os.makedirs(local_collect_dir)
command = "mkdir -p %s " % tmp_log_dir
run_remote_command(command, host_post_info)
for log in CollectLogCmd.host_log_list:
if 'zstack-agent' in log:
command = "mkdir -p %s" % tmp_log_dir + '/zstack-agent/'
run_remote_command(command, host_post_info)
host_log = CollectLogCmd.zstack_log_dir + '/' + log
collect_log = tmp_log_dir + '/' + log
if file_dir_exist("path=%s" % host_log, host_post_info):
if collect_full_log:
for num in range(1, 16):
log_name = "%s.%s.gz" % (host_log, num)
command = "/bin/cp -rf %s %s/" % (log_name, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
command = "/bin/cp -rf %s %s/" % (host_log, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
else:
command = "tail -n %d %s > %s " % (CollectLogCmd.collect_lines, host_log, collect_log)
run_remote_command(command, host_post_info)
except SystemExit:
warn("collect log on host %s failed" % host_post_info.host)
logger.warn("collect log on host %s failed" % host_post_info.host)
command = 'rm -rf %s' % tmp_log_dir
CollectLogCmd.failed_flag = True
run_remote_command(command, host_post_info)
return 1
command = 'test "$(ls -A "%s" 2>/dev/null)" || echo The directory is empty' % tmp_log_dir
(status, output) = run_remote_command(command, host_post_info, return_status=True, return_output=True)
if "The directory is empty" in output:
warn("Didn't find log on host: %s " % (host_post_info.host))
command = 'rm -rf %s' % tmp_log_dir
run_remote_command(command, host_post_info)
return 0
self.get_system_log(host_post_info, tmp_log_dir)
self.get_pkg_list(host_post_info, tmp_log_dir)
self.compress_and_fetch_log(local_collect_dir,tmp_log_dir,host_post_info)
else:
warn("Host %s is unreachable!" % host_post_info.host)
def get_storage_log(self, host_post_info, collect_dir, storage_type, collect_full_log=False):
collect_log_list = []
if check_host_reachable(host_post_info) is True:
info("Collecting log from %s storage: %s ..." % (storage_type, host_post_info.host))
tmp_log_dir = "%s/tmp-log/" % CollectLogCmd.zstack_log_dir
local_collect_dir = collect_dir + storage_type + '-' + host_post_info.host+ '/'
try:
# file system broken shouldn't block collect log process
if not os.path.exists(local_collect_dir):
os.makedirs(local_collect_dir)
command = "rm -rf %s && mkdir -p %s " % (tmp_log_dir, tmp_log_dir)
run_remote_command(command, host_post_info)
if '_ps' in storage_type:
collect_log_list = CollectLogCmd.ps_log_list
elif '_bs' in storage_type:
collect_log_list = CollectLogCmd.bs_log_list
else:
warn("unknown storage type: %s" % storage_type)
for log in collect_log_list:
if 'zstack-store' in log:
command = "mkdir -p %s" % tmp_log_dir + '/zstack-store/'
run_remote_command(command, host_post_info)
storage_agent_log = CollectLogCmd.zstack_log_dir + '/' + log
collect_log = tmp_log_dir + '/' + log
if file_dir_exist("path=%s" % storage_agent_log, host_post_info):
if collect_full_log:
for num in range(1, 16):
log_name = "%s.%s.gz" % (storage_agent_log, num)
command = "/bin/cp -rf %s %s/" % (log_name, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
command = "/bin/cp -rf %s %s/" % (storage_agent_log, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
else:
command = "tail -n %d %s > %s " % (CollectLogCmd.collect_lines, storage_agent_log, collect_log)
run_remote_command(command, host_post_info)
except SystemExit:
logger.warn("collect log on storage: %s failed" % host_post_info.host)
command = 'rm -rf %s' % tmp_log_dir
CollectLogCmd.failed_flag = True
run_remote_command(command, host_post_info)
command = 'test "$(ls -A "%s" 2>/dev/null)" || echo The directory is empty' % tmp_log_dir
(status, output) = run_remote_command(command, host_post_info, return_status=True, return_output=True)
if "The directory is empty" in output:
warn("Didn't find log on storage host: %s " % host_post_info.host)
command = 'rm -rf %s' % tmp_log_dir
run_remote_command(command, host_post_info)
return 0
self.get_system_log(host_post_info, tmp_log_dir)
self.get_pkg_list(host_post_info, tmp_log_dir)
self.compress_and_fetch_log(local_collect_dir,tmp_log_dir, host_post_info)
else:
warn("%s storage %s is unreachable!" % (storage_type, host_post_info.host))
def get_host_ssh_info(self, host_ip, type):
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
if type == 'host':
query.sql = "select * from HostVO where managementIp='%s'" % host_ip
host_uuid = query.query()[0]['uuid']
query.sql = "select * from KVMHostVO where uuid='%s'" % host_uuid
ssh_info = query.query()[0]
username = ssh_info['username']
password = ssh_info['password']
ssh_port = ssh_info['port']
return (username, password, ssh_port)
elif type == "sftp_bs":
query.sql = "select * from SftpBackupStorageVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['username']
password = ssh_info['password']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "ceph_bs":
query.sql = "select * from CephBackupStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "fusionStor_bs":
query.sql = "select * from FusionstorPrimaryStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "imageStore_bs":
query.sql = "select * from ImageStoreBackupStorageVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['username']
password = ssh_info['password']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "ceph_ps":
query.sql = "select * from CephPrimaryStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "fusionStor_ps":
query.sql = "select * from FusionstorPrimaryStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "vrouter":
query.sql = "select value from GlobalConfigVO where name='vrouter.password'"
password = query.query()
username = "vyos"
ssh_port = 22
return (username, password, ssh_port)
else:
warn("unknown target type: %s" % type)
def get_management_node_log(self, collect_dir, host_post_info, collect_full_log=False):
'''management.log maybe not exist, so collect latest files, maybe a tarball'''
if check_host_reachable(host_post_info) is True:
mn_ip = host_post_info.host
info("Collecting log from management node %s ..." % mn_ip)
local_collect_dir = collect_dir + "/management-node-%s/" % mn_ip + '/'
if not os.path.exists(local_collect_dir):
os.makedirs(local_collect_dir)
tmp_log_dir = "%s/../../logs/tmp-log/" % ctl.zstack_home
command = 'rm -rf %s && mkdir -p %s' % (tmp_log_dir, tmp_log_dir)
run_remote_command(command, host_post_info)
command = "mn_log=`find %s/../../logs/management-serve* -maxdepth 1 -type f -printf" \
" '%%T+\\t%%p\\n' | sort -r | awk '{print $2; exit}'`; /bin/cp -rf $mn_log %s" % (ctl.zstack_home, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
if status is not True:
warn("get management-server log failed: %s" % output)
if collect_full_log:
for item in range(0, 15):
log_name = "management-server-" + (datetime.today() - timedelta(days=item)).strftime("%Y-%m-%d")
command = "/bin/cp -rf %s/../../logs/%s* %s/" % (ctl.zstack_home, log_name, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
for log in CollectLogCmd.mn_log_list:
if file_dir_exist("path=%s/%s" % (CollectLogCmd.zstack_log_dir, log), host_post_info):
command = "tail -n %d %s/%s > %s/%s " \
% (CollectLogCmd.collect_lines, CollectLogCmd.zstack_log_dir, log, tmp_log_dir, log)
run_remote_command(command, host_post_info)
self.get_system_log(host_post_info, tmp_log_dir)
self.get_pkg_list(host_post_info, tmp_log_dir)
self.compress_and_fetch_log(local_collect_dir, tmp_log_dir, host_post_info)
else:
warn("Management node %s is unreachable!" % host_post_info.host)
def get_local_mn_log(self, collect_dir, collect_full_log=False):
info("Collecting log from this management node ...")
mn_log_dir = collect_dir + 'management-node-%s' % get_default_ip()
if not os.path.exists(mn_log_dir):
os.makedirs(mn_log_dir)
command = "mn_log=`find %s/../..//logs/management-serve* -maxdepth 1 -type f -printf '%%T+\\t%%p\\n' | sort -r | " \
"awk '{print $2; exit}'`; /bin/cp -rf $mn_log %s/" % (ctl.zstack_home, mn_log_dir)
(status, output) = commands.getstatusoutput(command)
if status !=0:
warn("get management-server log failed: %s" % output)
if collect_full_log:
for item in range(0, 15):
log_name = "management-server-" + (datetime.today() - timedelta(days=item)).strftime("%Y-%m-%d")
command = "/bin/cp -rf %s/../../logs/%s* %s/" % (ctl.zstack_home, log_name, mn_log_dir)
(status, output) = commands.getstatusoutput(command)
for log in CollectLogCmd.mn_log_list:
if os.path.exists(CollectLogCmd.zstack_log_dir + log):
command = ( "tail -n %d %s/%s > %s/%s " % (CollectLogCmd.collect_lines, CollectLogCmd.zstack_log_dir, log, mn_log_dir, log))
(status, output) = commands.getstatusoutput(command)
if status != 0:
warn("get %s failed: %s" % (log, output))
host_info_log = mn_log_dir + "/host_info"
command = "uptime > %s && last reboot >> %s && free -h >> %s && cat /proc/cpuinfo >> %s && ip addr >> %s && df -h >> %s" % \
(host_info_log, host_info_log, host_info_log, host_info_log, host_info_log, host_info_log)
commands.getstatusoutput(command)
command = "cp /var/log/dmesg* /var/log/messages %s/" % mn_log_dir
commands.getstatusoutput(command)
command = "cp %s/*git-commit %s/" % (ctl.zstack_home, mn_log_dir)
commands.getstatusoutput(command)
command = " rpm -qa | sort > %s/pkg_list" % mn_log_dir
commands.getstatusoutput(command)
command = " rpm -qa | sort > %s/pkg_list" % mn_log_dir
commands.getstatusoutput(command)
def generate_tar_ball(self, run_command_dir, detail_version, time_stamp):
(status, output) = commands.getstatusoutput("cd %s && tar zcf collect-log-%s-%s.tar.gz collect-log-%s-%s"
% (run_command_dir, detail_version, time_stamp, detail_version, time_stamp))
if status != 0:
error("Generate tarball failed: %s " % output)
def generate_host_post_info(self, host_ip, type):
host_post_info = HostPostInfo()
# update inventory
with open(ctl.zstack_home + "/../../../ansible/hosts") as f:
old_hosts = f.read()
if host_ip not in old_hosts:
with open(ctl.zstack_home + "/../../../ansible/hosts", "w") as f:
new_hosts = host_ip + "\n" + old_hosts
f.write(new_hosts)
(host_user, host_password, host_port) = self.get_host_ssh_info(host_ip, type)
if host_user != 'root' and host_password is not None:
host_post_info.become = True
host_post_info.remote_user = host_user
host_post_info.remote_pass = host_password
host_post_info.remote_port = host_port
host_post_info.host = host_ip
host_post_info.host_inventory = ctl.zstack_home + "/../../../ansible/hosts"
host_post_info.private_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa"
host_post_info.post_url = ""
return host_post_info
def run(self, args):
run_command_dir = os.getcwd()
time_stamp = datetime.now().strftime("%Y-%m-%d_%H-%M")
# create log
create_log(CollectLogCmd.logger_dir, CollectLogCmd.logger_file)
if get_detail_version() is not None:
detail_version = get_detail_version().replace(' ','_')
else:
hostname, port, user, password = ctl.get_live_mysql_portal()
detail_version = get_zstack_version(hostname, port, user, password)
# collect_dir used to store the collect-log
collect_dir = run_command_dir + '/collect-log-%s-%s/' % (detail_version, time_stamp)
if not os.path.exists(collect_dir):
os.makedirs(collect_dir)
if os.path.exists(InstallHACmd.conf_file) is not True:
self.get_local_mn_log(collect_dir, args.full)
else:
# this only for HA due to db will lost mn info if mn offline
mn_list = get_ha_mn_list(InstallHACmd.conf_file)
for mn_ip in mn_list:
host_post_info = HostPostInfo()
host_post_info.remote_user = 'root'
# this will be changed in the future
host_post_info.remote_port = '22'
host_post_info.host = mn_ip
host_post_info.host_inventory = InstallHACmd.conf_dir + 'host'
host_post_info.post_url = ""
host_post_info.private_key = InstallHACmd.conf_dir + 'ha_key'
self.get_management_node_log(collect_dir, host_post_info, args.full)
if args.db is True:
self.get_db(collect_dir)
if args.mn_only is not True:
host_vo = get_host_list("HostVO")
#collect host log
for host in host_vo:
if args.host is not None:
host_ip = args.host
else:
host_ip = host['managementIp']
host_type = host['hypervisorType']
if host_type == "KVM":
self.get_host_log(self.generate_host_post_info(host_ip, "host"), collect_dir, args.full)
else:
warn("host %s is not a KVM host, skip..." % host_ip)
if args.host is not None:
break
#collect vrouter log
vrouter_ip_list = get_vrouter_list()
for vrouter_ip in vrouter_ip_list:
self.get_vrouter_log(self.generate_host_post_info(vrouter_ip, "vrouter"),collect_dir)
#collect bs log
sftp_bs_vo = get_host_list("SftpBackupStorageVO")
for bs in sftp_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "sftp_bs"), collect_dir, "sftp_bs")
ceph_bs_vo = get_host_list("CephBackupStorageMonVO")
for bs in ceph_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "ceph_bs"), collect_dir, "ceph_bs")
fusionStor_bs_vo = get_host_list("FusionstorBackupStorageMonVO")
for bs in fusionStor_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "fusionStor_bs"), collect_dir, "fusionStor_bs")
imageStore_bs_vo = get_host_list("ImageStoreBackupStorageVO")
for bs in imageStore_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "imageStore_bs"), collect_dir, "imageStore_bs")
#collect ps log
ceph_ps_vo = get_host_list("CephPrimaryStorageMonVO")
for ps in ceph_ps_vo:
ps_ip = ps['hostname']
self.get_storage_log(self.generate_host_post_info(ps_ip,"ceph_ps"), collect_dir, "ceph_ps")
fusionStor_ps_vo = get_host_list("FusionstorPrimaryStorageMonVO")
for ps in fusionStor_ps_vo:
ps_ip = ps['hostname']
self.get_storage_log(self.generate_host_post_info(ps_ip,"fusionStor_ps"), collect_dir, "fusionStor_ps")
self.generate_tar_ball(run_command_dir, detail_version, time_stamp)
if CollectLogCmd.failed_flag is True:
info("The collect log generate at: %s/collect-log-%s-%s.tar.gz" % (run_command_dir, detail_version, time_stamp))
info(colored("Please check the reason of failed task in log: %s\n" % (CollectLogCmd.logger_dir + CollectLogCmd.logger_file), 'yellow'))
else:
info("The collect log generate at: %s/collect-log-%s-%s.tar.gz" % (run_command_dir, detail_version, time_stamp))
class ChangeIpCmd(Command):
def __init__(self):
super(ChangeIpCmd, self).__init__()
self.name = "change_ip"
self.description = (
"update new management ip address to zstack property file"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--ip', help='The new IP address of management node.'
'This operation will update the new ip address to '
'zstack config file' , required=True)
parser.add_argument('--cloudbus_server_ip', help='The new IP address of CloudBus.serverIp.0, default will use value from --ip', required=False)
parser.add_argument('--mysql_ip', help='The new IP address of DB.url, default will use value from --ip', required=False)
parser.add_argument('--yum',
help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.",
default=None)
def run(self, args):
if args.ip == '0.0.0.0':
raise CtlError('for your data safety, please do NOT use 0.0.0.0 as the listen address')
if args.cloudbus_server_ip is not None:
cloudbus_server_ip = args.cloudbus_server_ip
else:
cloudbus_server_ip = args.ip
if args.mysql_ip is not None:
mysql_ip = args.mysql_ip
else:
mysql_ip = args.ip
zstack_conf_file = ctl.properties_file_path
ip_check = re.compile('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
for input_ip in [cloudbus_server_ip, mysql_ip]:
if not ip_check.match(input_ip):
info("The ip address you input: %s seems not a valid ip" % input_ip)
return 1
# Update /etc/hosts
if os.path.isfile(zstack_conf_file):
old_ip = ctl.read_property('management.server.ip')
if old_ip is not None:
if not ip_check.match(old_ip):
info("The ip address[%s] read from [%s] seems not a valid ip" % (old_ip, zstack_conf_file))
return 1
# read from env other than /etc/hostname in case of impact of DHCP SERVER
old_hostname = shell("hostname").replace("\n","")
new_hostname = args.ip.replace(".","-")
if old_hostname != "localhost" and old_hostname != "localhost.localdomain":
new_hostname = old_hostname
if old_ip != None:
shell('sed -i "/^%s .*$/d" /etc/hosts' % old_ip)
else:
shell('sed -i "/^.* %s$/d" /etc/hosts' % new_hostname)
shell('echo "%s %s" >> /etc/hosts' % (args.ip, new_hostname))
shell('hostnamectl set-hostname %s' % new_hostname)
shell('export HOSTNAME=%s' % new_hostname)
if old_ip != None:
info("Update /etc/hosts, old_ip:%s, new_ip:%s" % (old_ip, args.ip))
else:
info("Update /etc/hosts, new_ip:%s" % args.ip)
else:
info("Didn't find %s, skip update new ip" % zstack_conf_file )
return 1
# Update zstack config file
if os.path.isfile(zstack_conf_file):
shell("yes | cp %s %s.bak" % (zstack_conf_file, zstack_conf_file))
ctl.write_properties([
('CloudBus.serverIp.0', cloudbus_server_ip),
])
info("Update cloudbus server ip %s in %s " % (cloudbus_server_ip, zstack_conf_file))
ctl.write_properties([
('management.server.ip', args.ip),
])
info("Update management server ip %s in %s " % (args.ip, zstack_conf_file))
db_url = ctl.read_property('DB.url')
db_old_ip = re.findall(r'[0-9]+(?:\.[0-9]{1,3}){3}', db_url)
db_new_url = db_url.split(db_old_ip[0])[0] + mysql_ip + db_url.split(db_old_ip[0])[1]
ctl.write_properties([
('DB.url', db_new_url),
])
info("Update mysql new url %s in %s " % (db_new_url, zstack_conf_file))
else:
info("Didn't find %s, skip update new ip" % zstack_conf_file )
return 1
# Reset RabbitMQ
info("Starting reset rabbitmq...")
if args.yum is not None:
ret = shell_return("zstack-ctl reset_rabbitmq --yum=%s" % args.yum)
else:
ret = shell_return("zstack-ctl reset_rabbitmq")
if ret == 0:
info("Reset rabbitMQ successfully")
info("Change ip successfully")
else:
error("Change ip failed")
class InstallManagementNodeCmd(Command):
def __init__(self):
super(InstallManagementNodeCmd, self).__init__()
self.name = "install_management_node"
self.description = (
"install ZStack management node from current machine to a remote machine with zstack.properties."
"\nNOTE: please configure current node before installing node on other machines"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='target host IP, for example, 192.168.0.212, to install ZStack management node to a remote machine', required=True)
parser.add_argument('--install-path', help='the path on remote machine where Apache Tomcat will be installed, which must be an absolute path; [DEFAULT]: /usr/local/zstack', default='/usr/local/zstack')
parser.add_argument('--source-dir', help='the source folder containing Apache Tomcat package and zstack.war, if omitted, it will default to a path related to $ZSTACK_HOME')
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--force-reinstall', help="delete existing Apache Tomcat and resinstall ZStack", action="store_true", default=False)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
def run(self, args):
if not os.path.isabs(args.install_path):
raise CtlError('%s is not an absolute path' % args.install_path)
if not args.source_dir:
args.source_dir = os.path.join(ctl.zstack_home, "../../../")
if not os.path.isdir(args.source_dir):
raise CtlError('%s is not an directory' % args.source_dir)
if not args.yum:
args.yum = get_yum_repo_from_property()
apache_tomcat = None
zstack = None
apache_tomcat_zip_name = None
for file in os.listdir(args.source_dir):
full_path = os.path.join(args.source_dir, file)
if file.startswith('apache-tomcat') and file.endswith('zip') and os.path.isfile(full_path):
apache_tomcat = full_path
apache_tomcat_zip_name = file
if file == 'zstack.war':
zstack = full_path
if not apache_tomcat:
raise CtlError('cannot find Apache Tomcat ZIP in %s, please use --source-dir to specify the directory containing the ZIP' % args.source_dir)
if not zstack:
raise CtlError('cannot find zstack.war in %s, please use --source-dir to specify the directory containing the WAR file' % args.source_dir)
pypi_path = os.path.join(ctl.zstack_home, "static/pypi/")
if not os.path.isdir(pypi_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % pypi_path)
pypi_tar_path = os.path.join(ctl.zstack_home, "static/pypi.tar.bz")
static_path = os.path.join(ctl.zstack_home, "static")
shell('cd %s; tar jcf pypi.tar.bz pypi' % static_path)
yaml = '''---
- hosts: $host
remote_user: root
vars:
root: $install_path
yum_repo: "$yum_repo"
tasks:
- name: check remote env on RedHat OS 6
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7'
script: $pre_script_on_rh6
- name: prepare remote environment
script: $pre_script
- name: install dependencies on RedHat OS from user defined repo
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y dmidecode java-1.8.0-openjdk wget python-devel gcc autoconf tar gzip unzip python-pip openssh-clients sshpass bzip2 ntp ntpdate sudo libselinux-python python-setuptools iptables-services
- name: install dependencies on RedHat OS from system repos
when: ansible_os_family == 'RedHat' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y dmidecode java-1.8.0-openjdk wget python-devel gcc autoconf tar gzip unzip python-pip openssh-clients sshpass bzip2 ntp ntpdate sudo libselinux-python python-setuptools iptables-services
- name: set java 8 as default runtime
when: ansible_os_family == 'RedHat'
shell: update-alternatives --install /usr/bin/java java /usr/lib/jvm/jre-1.8.0/bin/java 0; update-alternatives --set java /usr/lib/jvm/jre-1.8.0/bin/java
- name: add ppa source for openjdk-8 on Ubuntu 14.04
when: ansible_os_family == 'Debian' and ansible_distribution_version == '14.04'
shell: add-apt-repository ppa:openjdk-r/ppa -y; apt-get update
- name: install openjdk on Ubuntu 14.04
when: ansible_os_family == 'Debian' and ansible_distribution_version == '14.04'
apt: pkg={{item}} update_cache=yes
with_items:
- openjdk-8-jdk
- name: install openjdk on Ubuntu 16.04
when: ansible_os_family == 'Debian' and ansible_distribution_version == '16.04'
apt: pkg={{item}} update_cache=yes
with_items:
- openjdk-8-jdk
- name: set java 8 as default runtime
when: ansible_os_family == 'Debian' and ansible_distribution_version == '14.04'
shell: update-alternatives --install /usr/bin/java java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java 0; update-alternatives --install /usr/bin/javac javac /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/javac 0; update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java; update-alternatives --set javac /usr/lib/jvm/java-8-openjdk-amd64/bin/javac
- name: install dependencies Debian OS
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- wget
- python-dev
- gcc
- autoconf
- tar
- gzip
- unzip
- python-pip
- sshpass
- bzip2
- ntp
- ntpdate
- sudo
- python-setuptools
- stat: path=/usr/bin/mysql
register: mysql_path
- name: install MySQL client for RedHat 6 from user defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo != 'false' and (mysql_path.stat.exists == False)
shell: yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mysql
- name: install MySQL client for RedHat 6 from system repo
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo == 'false' and (mysql_path.stat.exists == False)
shell: yum --nogpgcheck install -y mysql
- name: install MySQL client for RedHat 7 from user defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo != 'false' and (mysql_path.stat.exists == False)
shell: yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mariadb
- name: install MySQL client for RedHat 7 from system repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo == 'false' and (mysql_path.stat.exists == False)
shell: yum --nogpgcheck install -y mariadb
- name: install MySQL client for Ubuntu
when: ansible_os_family == 'Debian' and (mysql_path.stat.exists == False)
apt: pkg={{item}}
with_items:
- mysql-client
- name: copy pypi tar file
copy: src=$pypi_tar_path dest=$pypi_tar_path_dest
- name: untar pypi
shell: "cd /tmp/; tar jxf $pypi_tar_path_dest"
- name: install pip from local source
shell: "easy_install -i file://$pypi_path/simple --upgrade pip"
- name: install ansible from local source
pip: name="ansible" extra_args="-i file://$pypi_path/simple --ignore-installed --trusted-host localhost"
- name: install virtualenv
pip: name="virtualenv" extra_args="-i file://$pypi_path/simple --ignore-installed --trusted-host localhost"
- name: copy Apache Tomcat
copy: src=$apache_path dest={{root}}/$apache_tomcat_zip_name
- name: copy zstack.war
copy: src=$zstack_path dest={{root}}/zstack.war
- name: install ZStack
script: $post_script
- name: copy zstack.properties
copy: src=$properties_file dest={{root}}/apache-tomcat/webapps/zstack/WEB-INF/classes/zstack.properties
- name: setup zstack account
script: $setup_account
'''
pre_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $$? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$$releasever/os/\$$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$$releasever/updates/\$$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$$releasever/extras/\$$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$$releasever/\$$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$$releasever/os/\$$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$$releasever/updates/\$$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$$releasever/extras/\$$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$$releasever/\$$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
whereis zstack-ctl
if [ $$? -eq 0 ]; then
zstack-ctl stop_node
fi
apache_path=$install_path/apache-tomcat
if [[ -d $$apache_path ]] && [[ $force_resinstall -eq 0 ]]; then
echo "found existing Apache Tomcat directory $$apache_path; please use --force-reinstall to delete it and re-install"
exit 1
fi
rm -rf $install_path
mkdir -p $install_path
'''
t = string.Template(pre_script)
pre_script = t.substitute({
'force_resinstall': int(args.force_reinstall),
'install_path': args.install_path
})
fd, pre_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(pre_script)
pre_script_on_rh6 = '''
ZSTACK_INSTALL_LOG='/tmp/zstack_installation.log'
rpm -qi python-crypto >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Management node remote installation failed. You need to manually remove python-crypto by \n\n \`rpm -ev python-crypto\` \n\n in remote management node; otherwise it will conflict with ansible's pycrypto." >>$ZSTACK_INSTALL_LOG
exit 1
fi
'''
t = string.Template(pre_script_on_rh6)
fd, pre_script_on_rh6_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(pre_script_on_rh6)
def cleanup_pre_script():
os.remove(pre_script_path)
os.remove(pre_script_on_rh6_path)
self.install_cleanup_routine(cleanup_pre_script)
post_script = '''
set -e
filename=$apache_tomcat_zip_name
foldername="$${filename%.*}"
apache_path=$install_path/apache-tomcat
unzip $apache -d $install_path
ln -s $install_path/$$foldername $$apache_path
unzip $zstack -d $$apache_path/webapps/zstack
chmod a+x $$apache_path/bin/*
cat >> $$apache_path/bin/setenv.sh <<EOF
export CATALINA_OPTS=" -Djava.net.preferIPv4Stack=true -Dcom.sun.management.jmxremote=true"
EOF
install_script="$$apache_path/webapps/zstack/WEB-INF/classes/tools/install.sh"
eval "bash $$install_script zstack-ctl"
eval "bash $$install_script zstack-cli"
set +e
grep "ZSTACK_HOME" ~/.bashrc > /dev/null
if [ $$? -eq 0 ]; then
sed -i "s#export ZSTACK_HOME=.*#export ZSTACK_HOME=$$apache_path/webapps/zstack#" ~/.bashrc
else
echo "export ZSTACK_HOME=$$apache_path/webapps/zstack" >> ~/.bashrc
fi
which ansible-playbook &> /dev/null
if [ $$? -ne 0 ]; then
pip install -i file://$pypi_path/simple --trusted-host localhost ansible
fi
'''
t = string.Template(post_script)
post_script = t.substitute({
'install_path': args.install_path,
'apache': os.path.join(args.install_path, apache_tomcat_zip_name),
'zstack': os.path.join(args.install_path, 'zstack.war'),
'apache_tomcat_zip_name': apache_tomcat_zip_name,
'pypi_path': '/tmp/pypi/'
})
fd, post_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(post_script)
def cleanup_post_script():
os.remove(post_script_path)
self.install_cleanup_routine(cleanup_post_script)
setup_account = '''id -u zstack >/dev/null 2>&1
if [ $$? -eq 0 ]; then
usermod -d $install_path zstack
else
useradd -d $install_path zstack && mkdir -p $install_path && chown -R zstack.zstack $install_path
fi
grep 'zstack' /etc/sudoers >/dev/null || echo 'zstack ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
grep '^root' /etc/sudoers >/dev/null || echo 'root ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
sed -i '/requiretty$$/d' /etc/sudoers
chown -R zstack.zstack $install_path
mkdir /home/zstack && chown -R zstack.zstack /home/zstack
zstack-ctl setenv ZSTACK_HOME=$install_path/apache-tomcat/webapps/zstack
'''
t = string.Template(setup_account)
setup_account = t.substitute({
'install_path': args.install_path
})
fd, setup_account_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(setup_account)
def clean_up():
os.remove(setup_account_path)
self.install_cleanup_routine(clean_up)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
'host': args.host,
'install_path': args.install_path,
'apache_path': apache_tomcat,
'zstack_path': zstack,
'pre_script': pre_script_path,
'pre_script_on_rh6': pre_script_on_rh6_path,
'post_script': post_script_path,
'properties_file': ctl.properties_file_path,
'apache_tomcat_zip_name': apache_tomcat_zip_name,
'pypi_tar_path': pypi_tar_path,
'pypi_tar_path_dest': '/tmp/pypi.tar.bz',
'pypi_path': '/tmp/pypi/',
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo,
'setup_account': setup_account_path
})
ansible(yaml, args.host, args.debug, args.ssh_key)
info('successfully installed new management node on machine(%s)' % args.host)
class ShowConfiguration(Command):
def __init__(self):
super(ShowConfiguration, self).__init__()
self.name = "show_configuration"
self.description = "a shortcut that prints contents of zstack.properties to screen"
ctl.register_command(self)
def run(self, args):
shell_no_pipe('cat %s' % ctl.properties_file_path)
class SetEnvironmentVariableCmd(Command):
PATH = os.path.join(ctl.USER_ZSTACK_HOME_DIR, "zstack-ctl/ctl-env")
def __init__(self):
super(SetEnvironmentVariableCmd, self).__init__()
self.name = "setenv"
self.description = "set variables to zstack-ctl variable file at %s" % self.PATH
ctl.register_command(self)
def need_zstack_home(self):
return False
def run(self, args):
if not ctl.extra_arguments:
raise CtlError('please input variables that are in format of "key=value" split by space')
if not os.path.isdir(ctl.USER_ZSTACK_HOME_DIR):
raise CtlError('cannot find home directory(%s) of user "zstack"' % ctl.USER_ZSTACK_HOME_DIR)
with use_user_zstack():
path_dir = os.path.dirname(self.PATH)
if not os.path.isdir(path_dir):
os.makedirs(path_dir)
with open(self.PATH, 'a'):
# create the file if not existing
pass
env = PropertyFile(self.PATH)
arg_str = ' '.join(ctl.extra_arguments)
env.write_properties([arg_str.split('=', 1)])
class UnsetEnvironmentVariableCmd(Command):
NAME = 'unsetenv'
def __init__(self):
super(UnsetEnvironmentVariableCmd, self).__init__()
self.name = self.NAME
self.description = (
'unset variables in %s' % SetEnvironmentVariableCmd.PATH
)
ctl.register_command(self)
def run(self, args):
if not os.path.exists(SetEnvironmentVariableCmd.PATH):
return
if not ctl.extra_arguments:
raise CtlError('please input a list of variable names you want to unset')
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
env.delete_properties(ctl.extra_arguments)
info('unset zstack environment variables: %s' % ctl.extra_arguments)
class GetEnvironmentVariableCmd(Command):
NAME = 'getenv'
def __init__(self):
super(GetEnvironmentVariableCmd, self).__init__()
self.name = self.NAME
self.description = (
"get variables from %s" % SetEnvironmentVariableCmd.PATH
)
ctl.register_command(self)
def run(self, args):
if not os.path.exists(SetEnvironmentVariableCmd.PATH):
raise CtlError('cannot find the environment variable file at %s' % SetEnvironmentVariableCmd.PATH)
ret = []
if ctl.extra_arguments:
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
for key in ctl.extra_arguments:
value = env.read_property(key)
if value:
ret.append('%s=%s' % (key, value))
else:
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
for k, v in env.read_all_properties():
ret.append('%s=%s' % (k, v))
info('\n'.join(ret))
class InstallWebUiCmd(Command):
def __init__(self):
super(InstallWebUiCmd, self).__init__()
self.name = "install_ui"
self.description = "install ZStack web UI"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='target host IP, for example, 192.168.0.212, to install ZStack web UI; if omitted, it will be installed on local machine')
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
parser.add_argument('--force', help="delete existing virtualenv and resinstall zstack ui and all dependencies", action="store_true", default=False)
def _install_to_local(self, args):
install_script = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/install.sh")
if not os.path.isfile(install_script):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % install_script)
info('found installation script at %s, start installing ZStack web UI' % install_script)
if args.force:
shell('bash %s zstack-dashboard force' % install_script)
else:
shell('bash %s zstack-dashboard' % install_script)
def run(self, args):
if not args.host:
self._install_to_local(args)
return
if not args.yum:
args.yum = get_yum_repo_from_property()
tools_path = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/")
if not os.path.isdir(tools_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % tools_path)
ui_binary = None
for l in os.listdir(tools_path):
if l.startswith('zstack_dashboard'):
ui_binary = l
break
if not ui_binary:
raise CtlError('cannot find zstack-dashboard package under %s, please make sure you have installed ZStack management node' % tools_path)
ui_binary_path = os.path.join(tools_path, ui_binary)
pypi_path = os.path.join(ctl.zstack_home, "static/pypi/")
if not os.path.isdir(pypi_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % pypi_path)
pypi_tar_path = os.path.join(ctl.zstack_home, "static/pypi.tar.bz")
if not os.path.isfile(pypi_tar_path):
static_path = os.path.join(ctl.zstack_home, "static")
os.system('cd %s; tar jcf pypi.tar.bz pypi' % static_path)
yaml = '''---
- hosts: $host
remote_user: root
vars:
virtualenv_root: /var/lib/zstack/virtualenv/zstack-dashboard
yum_repo: "$yum_repo"
tasks:
- name: pre-install script
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
script: $pre_install_script
- name: install Python pip for RedHat OS from user defined repo
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y libselinux-python python-pip bzip2 python-devel gcc autoconf
- name: install Python pip for RedHat OS from system repo
when: ansible_os_family == 'RedHat' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y libselinux-python python-pip bzip2 python-devel gcc autoconf
- name: copy zstack-dashboard package
copy: src=$src dest=$dest
- name: copy pypi tar file
copy: src=$pypi_tar_path dest=$pypi_tar_path_dest
- name: untar pypi
shell: "cd /tmp/; tar jxf $pypi_tar_path_dest"
- name: install Python pip for Ubuntu
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- python-pip
- iptables-persistent
- name: install pip from local source
shell: "cd $pypi_path/simple/pip/; pip install --ignore-installed pip*.tar.gz"
- shell: virtualenv --version | grep "12.1.1"
register: virtualenv_ret
ignore_errors: True
- name: install virtualenv
pip: name=virtualenv version=12.1.1 extra_args="--ignore-installed --trusted-host localhost -i file://$pypi_path/simple"
when: virtualenv_ret.rc != 0
- name: create virtualenv
shell: "rm -rf {{virtualenv_root}} && virtualenv {{virtualenv_root}}"
- name: install zstack-dashboard
pip: name=$dest extra_args="--trusted-host localhost -i file://$pypi_path/simple" virtualenv="{{virtualenv_root}}"
'''
pre_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
'''
fd, pre_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(pre_script)
def cleanup_prescript():
os.remove(pre_script_path)
self.install_cleanup_routine(cleanup_prescript)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
"src": ui_binary_path,
"dest": os.path.join('/tmp', ui_binary),
"host": args.host,
'pre_install_script': pre_script_path,
'pypi_tar_path': pypi_tar_path,
'pypi_tar_path_dest': '/tmp/pypi.tar.bz',
'pypi_path': '/tmp/pypi/',
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo
})
ansible(yaml, args.host, ssh_key=args.ssh_key)
class BootstrapCmd(Command):
def __init__(self):
super(BootstrapCmd, self).__init__()
self.name = 'bootstrap'
self.description = (
'create user and group of "zstack" and add "zstack" to sudoers;'
'\nthis command is only needed by installation script'
' and users that install ZStack manually'
)
ctl.register_command(self)
def need_zstack_user(self):
return False
def run(self, args):
shell('id -u zstack 2>/dev/null || (useradd -d %s zstack -s /bin/false && mkdir -p %s && chown -R zstack.zstack %s)' % (ctl.USER_ZSTACK_HOME_DIR, ctl.USER_ZSTACK_HOME_DIR, ctl.USER_ZSTACK_HOME_DIR))
shell("grep 'zstack' /etc/sudoers || echo 'zstack ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers")
shell('mkdir -p %s && chown zstack:zstack %s' % (ctl.USER_ZSTACK_HOME_DIR, ctl.USER_ZSTACK_HOME_DIR))
class UpgradeManagementNodeCmd(Command):
def __init__(self):
super(UpgradeManagementNodeCmd, self).__init__()
self.name = "upgrade_management_node"
self.description = 'upgrade the management node to a specified version'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='IP or DNS name of the machine to upgrade the management node', default=None)
parser.add_argument('--war-file', help='path to zstack.war. A HTTP/HTTPS url or a path to a local zstack.war', required=True)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
def run(self, args):
error_if_tool_is_missing('unzip')
need_download = args.war_file.startswith('http')
if need_download:
error_if_tool_is_missing('wget')
upgrade_tmp_dir = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'upgrade', time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime()))
shell('mkdir -p %s' % upgrade_tmp_dir)
property_file_backup_path = os.path.join(upgrade_tmp_dir, 'zstack.properties')
class NewWarFilePath(object):
self.path = None
new_war = NewWarFilePath()
if not need_download:
new_war.path = expand_path(args.war_file)
if not os.path.exists(new_war.path):
raise CtlError('%s not found' % new_war.path)
def local_upgrade():
def backup():
ctl.internal_run('save_config', '--save-to %s' % os.path.dirname(property_file_backup_path))
shell('cp -r %s %s' % (ctl.zstack_home, upgrade_tmp_dir))
info('backup %s to %s' % (ctl.zstack_home, upgrade_tmp_dir))
def download_war_if_needed():
if need_download:
new_war.path = os.path.join(upgrade_tmp_dir, 'new', 'zstack.war')
shell_no_pipe('wget --no-check-certificate %s -O %s' % (args.war_file, new_war.path))
info('downloaded new zstack.war to %s' % new_war.path)
def stop_node():
info('start to stop the management node ...')
ctl.internal_run('stop_node')
def upgrade():
info('start to upgrade the management node ...')
shell('rm -rf %s' % ctl.zstack_home)
if ctl.zstack_home.endswith('/'):
webapp_dir = os.path.dirname(os.path.dirname(ctl.zstack_home))
else:
webapp_dir = os.path.dirname(ctl.zstack_home)
shell('cp %s %s' % (new_war.path, webapp_dir))
ShellCmd('unzip %s -d zstack' % os.path.basename(new_war.path), workdir=webapp_dir)()
#create local repo folder for possible zstack local yum repo
zstack_dvd_repo = '%s/zstack/static/zstack-dvd' % webapp_dir
shell('rm -f %s; ln -s /opt/zstack-dvd %s' % (zstack_dvd_repo, zstack_dvd_repo))
def restore_config():
info('restoring the zstack.properties ...')
ctl.internal_run('restore_config', '--restore-from %s' % os.path.dirname(property_file_backup_path))
def install_tools():
info('upgrading zstack-cli, zstack-ctl; this may cost several minutes ...')
install_script = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/install.sh")
if not os.path.isfile(install_script):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % install_script)
shell("bash %s zstack-cli" % install_script)
shell("bash %s zstack-ctl" % install_script)
info('successfully upgraded zstack-cli, zstack-ctl')
def save_new_war():
sdir = os.path.join(ctl.zstack_home, "../../../")
shell('yes | cp %s %s' % (new_war.path, sdir))
def chown_to_zstack():
info('change permission to user zstack')
shell('chown -R zstack:zstack %s' % os.path.join(ctl.zstack_home, '../../'))
backup()
download_war_if_needed()
stop_node()
upgrade()
restore_config()
install_tools()
save_new_war()
chown_to_zstack()
info('----------------------------------------------\n'
'Successfully upgraded the ZStack management node to a new version.\n'
'We backup the old zstack as follows:\n'
'\tzstack.properties: %s\n'
'\tzstack folder: %s\n'
'Please test your new ZStack. If everything is OK and stable, you can manually delete those backup by deleting %s.\n'
'Otherwise you can use them to rollback to the previous version\n'
'-----------------------------------------------\n' %
(property_file_backup_path, os.path.join(upgrade_tmp_dir, 'zstack'), upgrade_tmp_dir))
def remote_upgrade():
need_copy = 'true'
src_war = new_war.path
dst_war = '/tmp/zstack.war'
if need_download:
need_copy = 'false'
src_war = args.war_file
dst_war = args.war_file
upgrade_script = '''
zstack-ctl upgrade_management_node --war-file=$war_file
if [ $$? -ne 0 ]; then
echo 'failed to upgrade the remote management node'
exit 1
fi
if [ "$need_copy" == "true" ]; then
rm -f $war_file
fi
'''
t = string.Template(upgrade_script)
upgrade_script = t.substitute({
'war_file': dst_war,
'need_copy': need_copy
})
fd, upgrade_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(upgrade_script)
def cleanup_upgrade_script():
os.remove(upgrade_script_path)
self.install_cleanup_routine(cleanup_upgrade_script)
yaml = '''---
- hosts: $host
remote_user: root
vars:
need_copy: "$need_copy"
tasks:
- name: copy zstack.war to remote
copy: src=$src_war dest=$dst_war
when: need_copy == 'true'
- name: upgrade management node
script: $upgrade_script
register: output
ignore_errors: yes
- name: failure
fail: msg="failed to upgrade the remote management node. {{ output.stdout }} {{ output.stderr }}"
when: output.rc != 0
'''
t = string.Template(yaml)
yaml = t.substitute({
"src_war": src_war,
"dst_war": dst_war,
"host": args.host,
"need_copy": need_copy,
"upgrade_script": upgrade_script_path
})
info('start to upgrade the remote management node; the process may cost several minutes ...')
ansible(yaml, args.host, args.debug, ssh_key=args.ssh_key)
info('upgraded the remote management node successfully')
if args.host:
remote_upgrade()
else:
local_upgrade()
class UpgradeMultiManagementNodeCmd(Command):
logger_dir = '/var/log/zstack'
logger_file = 'zstack-ctl.log'
SpinnerInfo.spinner_status = {'stop_local':False, 'upgrade_local':False , 'start_local':False, 'upgrade':False, 'stop':False, 'start':False}
def __init__(self):
super(UpgradeMultiManagementNodeCmd, self).__init__()
self.name = "upgrade_multi_management_node"
self.description = 'upgrade the management cluster'
ctl.register_command(self)
def start_mn(self, host_post_info):
command = "zstack-ctl start_node && zstack-ctl start_ui"
#Ansible finish command will lead mn stop, so use ssh native connection to start mn
(status, output) = commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(host_post_info.private_key, host_post_info.host, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
def install_argparse_arguments(self, parser):
parser.add_argument('--installer-bin','--bin',
help="The new version installer package with absolute path",
required=True)
parser.add_argument('--force', '-F',
help="Force upgrade when database upgrading dry-run failed",
action='store_true', default=False)
def run(self, args):
if os.path.isfile(args.installer_bin) is not True:
error("Didn't find install package %s" % args.installer_bin)
create_log(UpgradeMultiManagementNodeCmd.logger_dir, UpgradeMultiManagementNodeCmd.logger_file)
mn_vo = get_host_list("ManagementNodeVO")
local_mn_ip = get_default_ip()
mn_ip_list = []
cmd = create_check_mgmt_node_command()
cmd(False)
if 'true' not in cmd.stdout:
error("Local management node status is not Running, can't make sure ZStack status is healthy")
for mn in mn_vo:
mn_ip_list.append(mn['hostName'])
mn_ip_list.insert(0, mn_ip_list.pop(mn_ip_list.index(local_mn_ip)))
all_mn_ip = ' '.join(mn_ip_list)
info(" Will upgrade all 'Running' management nodes: %s" % colored(all_mn_ip,'green'))
ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa.pub"
private_key = ssh_key.split('.')[0]
inventory_file = ctl.zstack_home + "/../../../ansible/hosts"
for mn_ip in mn_ip_list:
if mn_ip != local_mn_ip:
host_info = HostPostInfo()
host_info.host = mn_ip
host_info.private_key = private_key
host_info.host_inventory = inventory_file
host_reachable = check_host_reachable(host_info, True)
if host_reachable is True:
spinner_info = SpinnerInfo()
spinner_info.output = "Stop remote management node %s" % mn_ip
spinner_info.name = "stop_%s" % mn_ip
SpinnerInfo.spinner_status['stop_%s' % mn_ip] = False
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['stop_%s' % mn_ip] = True
ZstackSpinner(spinner_info)
command = "zstack-ctl stop_node"
run_remote_command(command, host_info)
else:
# running management node will block upgrade process
error("Management node %s is unreachable, please sync public key %s to other management nodes" % (mn_ip, ssh_key))
else:
spinner_info = SpinnerInfo()
spinner_info.output = "Stop local management node %s" % mn_ip
spinner_info.name = "stop_local"
SpinnerInfo.spinner_status['stop_local'] = False
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['stop_local'] = True
ZstackSpinner(spinner_info)
command = "zstack-ctl stop_node"
shell(command)
for mn_ip in mn_ip_list:
host_info = HostPostInfo()
host_info.host = mn_ip
host_info.private_key = private_key
host_info.host_inventory = inventory_file
if mn_ip == local_mn_ip:
spinner_info = SpinnerInfo()
spinner_info.output = "Upgrade management node on localhost(%s)" % local_mn_ip
spinner_info.name = 'upgrade_local'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_local'] = True
ZstackSpinner(spinner_info)
if args.force is True:
shell("rm -rf /tmp/zstack_upgrade.lock && bash %s -u -F" % args.installer_bin)
else:
shell("rm -rf /tmp/zstack_upgrade.lock && bash %s -u" % args.installer_bin)
spinner_info = SpinnerInfo()
spinner_info.output = "Start management node on localhost(%s)" % local_mn_ip
spinner_info.name = 'start'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start_local'] = True
ZstackSpinner(spinner_info)
shell("zstack-ctl start_node && zstack-ctl start_ui")
else:
spinner_info = SpinnerInfo()
spinner_info.output = "Upgrade management node on host %s" % mn_ip
spinner_info.name = 'upgrade'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade'] = True
ZstackSpinner(spinner_info)
war_file = ctl.zstack_home + "/../../../apache-tomcat-7.0.35/webapps/zstack.war"
ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa"
status,output = commands.getstatusoutput("zstack-ctl upgrade_management_node --host %s --ssh-key %s --war-file %s" % (mn_ip, ssh_key, war_file))
if status != 0:
error(output)
spinner_info = SpinnerInfo()
spinner_info.output = "Start management node on host %s" % mn_ip
spinner_info.name = 'start'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start'] = True
ZstackSpinner(spinner_info)
self.start_mn(host_info)
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
time.sleep(0.3)
info(colored("All management nodes upgrade successfully!",'blue'))
class UpgradeDbCmd(Command):
def __init__(self):
super(UpgradeDbCmd, self).__init__()
self.name = 'upgrade_db'
self.description = (
'upgrade the database from current version to a new version'
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--force', help='bypass management nodes status check.'
'\nNOTE: only use it when you know exactly what it does', action='store_true', default=False)
parser.add_argument('--no-backup', help='do NOT backup the database. If the database is very large and you have manually backup it, using this option will fast the upgrade process. [DEFAULT] false', default=False)
parser.add_argument('--dry-run', help='Check if db could be upgraded. [DEFAULT] not set', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysqldump')
error_if_tool_is_missing('mysql')
db_url = ctl.get_db_url()
db_url_params = db_url.split('//')
db_url = db_url_params[0] + '//' + db_url_params[1].split('/')[0]
if 'zstack' not in db_url:
db_url = '%s/zstack' % db_url.rstrip('/')
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
flyway_path = os.path.join(ctl.zstack_home, 'WEB-INF/classes/tools/flyway-3.2.1/flyway')
if not os.path.exists(flyway_path):
raise CtlError('cannot find %s. Have you run upgrade_management_node?' % flyway_path)
upgrading_schema_dir = os.path.join(ctl.zstack_home, 'WEB-INF/classes/db/upgrade/')
if not os.path.exists(upgrading_schema_dir):
raise CtlError('cannot find %s. Have you run upgrade_management_node?' % upgrading_schema_dir)
ctl.check_if_management_node_has_stopped(args.force)
if args.dry_run:
info('Dry run finished. Database could be upgraded. ')
return True
def backup_current_database():
if args.no_backup:
return
info('start to backup the database ...')
db_backup_path = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'db_backup', time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime()), 'backup.sql')
shell('mkdir -p %s' % os.path.dirname(db_backup_path))
if db_password:
shell('mysqldump -u %s -p%s --host %s --port %s zstack > %s' % (db_user, db_password, db_hostname, db_port, db_backup_path))
else:
shell('mysqldump -u %s --host %s --port %s zstack > %s' % (db_user, db_hostname, db_port, db_backup_path))
info('successfully backup the database to %s' % db_backup_path)
def create_schema_version_table_if_needed():
if db_password:
out = shell('''mysql -u %s -p%s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_password, db_hostname, db_port))
else:
out = shell('''mysql -u %s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_hostname, db_port))
if 'schema_version' in out:
return
info('version table "schema_version" is not existing; initializing a new version table first')
if db_password:
shell_no_pipe('bash %s baseline -baselineVersion=0.6 -baselineDescription="0.6 version" -user=%s -password=%s -url=%s' %
(flyway_path, db_user, db_password, db_url))
else:
shell_no_pipe('bash %s baseline -baselineVersion=0.6 -baselineDescription="0.6 version" -user=%s -url=%s' %
(flyway_path, db_user, db_url))
def migrate():
schema_path = 'filesystem:%s' % upgrading_schema_dir
if db_password:
shell_no_pipe('bash %s migrate -outOfOrder=true -user=%s -password=%s -url=%s -locations=%s' % (flyway_path, db_user, db_password, db_url, schema_path))
else:
shell_no_pipe('bash %s migrate -outOfOrder=true -user=%s -url=%s -locations=%s' % (flyway_path, db_user, db_url, schema_path))
info('Successfully upgraded the database to the latest version.\n')
backup_current_database()
create_schema_version_table_if_needed()
migrate()
class UpgradeCtlCmd(Command):
def __init__(self):
super(UpgradeCtlCmd, self).__init__()
self.name = 'upgrade_ctl'
self.description = (
'upgrade the zstack-ctl to a new version'
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--package', help='the path to the new zstack-ctl package', required=True)
def run(self, args):
error_if_tool_is_missing('pip')
path = expand_path(args.package)
if not os.path.exists(path):
raise CtlError('%s not found' % path)
pypi_path = os.path.join(ctl.zstack_home, "static/pypi/")
if not os.path.isdir(pypi_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % pypi_path)
install_script = '''set -e
which virtualenv &>/dev/null
if [ $$? != 0 ]; then
pip install -i file://$pypi_path/simple --trusted-host localhost virtualenv
fi
CTL_VIRENV_PATH=/var/lib/zstack/virtualenv/zstackctl
rm -rf $$CTL_VIRENV_PATH
virtualenv $$CTL_VIRENV_PATH
. $$CTL_VIRENV_PATH/bin/activate
pip install -i file://$pypi_path/simple --trusted-host --ignore-installed $package || exit 1
chmod +x /usr/bin/zstack-ctl
'''
script(install_script, {"pypi_path": pypi_path, "package": args.package})
info('successfully upgraded zstack-ctl to %s' % args.package)
class RollbackManagementNodeCmd(Command):
def __init__(self):
super(RollbackManagementNodeCmd, self).__init__()
self.name = "rollback_management_node"
self.description = "rollback the management node to a previous version if the upgrade fails"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='the IP or DNS name of machine to rollback the management node')
parser.add_argument('--war-file', help='path to zstack.war. A HTTP/HTTPS url or a path to a local zstack.war', required=True)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
parser.add_argument('--property-file', help="the path to zstack.properties. If omitted, the current zstack.properties will be used", default=None)
def run(self, args):
error_if_tool_is_missing('unzip')
rollback_tmp_dir = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'rollback', time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime()))
shell('mkdir -p %s' % rollback_tmp_dir)
need_download = args.war_file.startswith('http')
class Info(object):
def __init__(self):
self.war_path = None
self.property_file = None
rollbackinfo = Info()
def local_rollback():
def backup_current_zstack():
info('start to backup the current zstack ...')
shell('cp -r %s %s' % (ctl.zstack_home, rollback_tmp_dir))
info('backup %s to %s' % (ctl.zstack_home, rollback_tmp_dir))
info('successfully backup the current zstack to %s' % os.path.join(rollback_tmp_dir, os.path.basename(ctl.zstack_home)))
def download_war_if_needed():
if need_download:
rollbackinfo.war_path = os.path.join(rollback_tmp_dir, 'zstack.war')
shell_no_pipe('wget --no-check-certificate %s -O %s' % (args.war_file, rollbackinfo.war_path))
info('downloaded zstack.war to %s' % rollbackinfo.war_path)
else:
rollbackinfo.war_path = expand_path(args.war_file)
if not os.path.exists(rollbackinfo.war_path):
raise CtlError('%s not found' % rollbackinfo.war_path)
def save_property_file_if_needed():
if not args.property_file:
ctl.internal_run('save_config', '--save-to %s' % rollback_tmp_dir)
rollbackinfo.property_file = os.path.join(rollback_tmp_dir, 'zstack.properties')
else:
rollbackinfo.property_file = args.property_file
if not os.path.exists(rollbackinfo.property_file):
raise CtlError('%s not found' % rollbackinfo.property_file)
def stop_node():
info('start to stop the management node ...')
ctl.internal_run('stop_node')
def rollback():
info('start to rollback the management node ...')
shell('rm -rf %s' % ctl.zstack_home)
shell('unzip %s -d %s' % (rollbackinfo.war_path, ctl.zstack_home))
def restore_config():
info('restoring the zstack.properties ...')
ctl.internal_run('restore_config', '--restore-from %s' % rollbackinfo.property_file)
def install_tools():
info('rollback zstack-cli, zstack-ctl to the previous version. This may cost several minutes ...')
install_script = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/install.sh")
if not os.path.isfile(install_script):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % install_script)
shell("bash %s zstack-cli" % install_script)
shell("bash %s zstack-ctl" % install_script)
info('successfully upgraded zstack-cli, zstack-ctl')
backup_current_zstack()
download_war_if_needed()
save_property_file_if_needed()
stop_node()
rollback()
restore_config()
install_tools()
info('----------------------------------------------\n'
'Successfully rollback the ZStack management node to a previous version.\n'
'We backup the current zstack as follows:\n'
'\tzstack.properties: %s\n'
'\tzstack folder: %s\n'
'Please test your ZStack. If everything is OK and stable, you can manually delete those backup by deleting %s.\n'
'-----------------------------------------------\n' %
(rollbackinfo.property_file, os.path.join(rollback_tmp_dir, os.path.basename(ctl.zstack_home)), rollback_tmp_dir))
def remote_rollback():
error_if_tool_is_missing('wget')
need_copy = 'true'
src_war = rollbackinfo.war_path
dst_war = '/tmp/zstack.war'
if need_download:
need_copy = 'false'
src_war = args.war_file
dst_war = args.war_file
rollback_script = '''
zstack-ctl rollback_management_node --war-file=$war_file
if [ $$? -ne 0 ]; then
echo 'failed to rollback the remote management node'
exit 1
fi
if [ "$need_copy" == "true" ]; then
rm -f $war_file
fi
'''
t = string.Template(rollback_script)
rollback_script = t.substitute({
'war_file': dst_war,
'need_copy': need_copy
})
fd, rollback_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(rollback_script)
def cleanup_rollback_script():
os.remove(rollback_script_path)
self.install_cleanup_routine(cleanup_rollback_script)
yaml = '''---
- hosts: $host
remote_user: root
vars:
need_copy: "$need_copy"
tasks:
- name: copy zstack.war to remote
copy: src=$src_war dest=$dst_war
when: need_copy == 'true'
- name: rollback the management node
script: $rollback_script
register: output
ignore_errors: yes
- name: failure
fail: msg="failed to rollback the remote management node. {{ output.stdout }} {{ output.stderr }}"
when: output.rc != 0
'''
t = string.Template(yaml)
yaml = t.substitute({
"src_war": src_war,
"dst_war": dst_war,
"host": args.host,
"need_copy": need_copy,
"rollback_script": rollback_script_path
})
info('start to rollback the remote management node; the process may cost several minutes ...')
ansible(yaml, args.host, args.debug, ssh_key=args.ssh_key)
info('successfully rollback the remote management node')
if args.host:
remote_rollback()
else:
local_rollback()
class RollbackDatabaseCmd(Command):
def __init__(self):
super(RollbackDatabaseCmd, self).__init__()
self.name = 'rollback_db'
self.description = "rollback the database to the previous version if the upgrade fails"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--db-dump', help="the previous database dump file", required=True)
parser.add_argument('--root-password', help="the password for mysql root user. [DEFAULT] empty password")
parser.add_argument('--force', help='bypass management nodes status check.'
'\nNOTE: only use it when you know exactly what it does', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysql')
ctl.check_if_management_node_has_stopped(args.force)
if not os.path.exists(args.db_dump):
raise CtlError('%s not found' % args.db_dump)
host, port, _, _ = ctl.get_live_mysql_portal()
if args.root_password:
cmd = ShellCmd('mysql -u root -p%s --host %s --port %s -e "select 1"' % (args.root_password, host, port))
else:
cmd = ShellCmd('mysql -u root --host %s --port %s -e "select 1"' % (host, port))
cmd(False)
if cmd.return_code != 0:
error_not_exit('failed to test the mysql server. You may have provided a wrong password of the root user. Please use --root-password to provide the correct password')
cmd.raise_error()
info('start to rollback the database ...')
if args.root_password:
shell('mysql -u root -p%s --host %s --port %s -t zstack < %s' % (args.root_password, host, port, args.db_dump))
else:
shell('mysql -u root --host %s --port %s -t zstack < %s' % (host, port, args.db_dump))
info('successfully rollback the database to the dump file %s' % args.db_dump)
class StopUiCmd(Command):
def __init__(self):
super(StopUiCmd, self).__init__()
self.name = 'stop_ui'
self.description = "stop UI server on the local or remote host"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
def _remote_stop(self, host):
cmd = '/etc/init.d/zstack-dashboard stop'
ssh_run_no_pipe(host, cmd)
def run(self, args):
if args.host != 'localhost':
self._remote_stop(args.host)
return
pidfile = '/var/run/zstack/zstack-dashboard.pid'
if os.path.exists(pidfile):
with open(pidfile, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
shell('kill %s >/dev/null 2>&1' % pid, is_exception=False)
def stop_all():
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
shell('kill -9 %s >/dev/null 2>&1' % pid)
stop_all()
else:
return
stop_all()
info('successfully stopped the UI server')
class UiStatusCmd(Command):
def __init__(self):
super(UiStatusCmd, self).__init__()
self.name = "ui_status"
self.description = "check the UI server status on the local or remote host."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
parser.add_argument('--quiet', '-q', help='Do not log this action.', action='store_true', default=False)
def _remote_status(self, host):
cmd = '/etc/init.d/zstack-dashboard status'
ssh_run_no_pipe(host, cmd)
def run(self, args):
self.quiet = args.quiet
if args.host != 'localhost':
self._remote_status(args.host)
return
ha_info_file = '/var/lib/zstack/ha/ha.yaml'
pidfile = '/var/run/zstack/zstack-dashboard.pid'
portfile = '/var/run/zstack/zstack-dashboard.port'
if os.path.exists(pidfile):
with open(pidfile, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
check_pid_cmd = ShellCmd('ps -p %s > /dev/null' % pid)
check_pid_cmd(is_exception=False)
if check_pid_cmd.return_code == 0:
if os.path.exists(ha_info_file):
with open(ha_info_file, 'r') as fd2:
ha_conf = yaml.load(fd2)
if check_ip_port(ha_conf['vip'], 8888):
info('UI status: %s [PID:%s] http://%s:8888' % (colored('Running', 'green'), pid, ha_conf['vip']))
else:
info('UI status: %s' % colored('Unknown', 'yellow'))
return
default_ip = get_default_ip()
if not default_ip:
info('UI status: %s [PID:%s]' % (colored('Running', 'green'), pid))
else:
if os.path.exists(portfile):
with open(portfile, 'r') as fd2:
port = fd2.readline()
port = port.strip(' \t\n\r')
else:
port = 5000
info('UI status: %s [PID:%s] http://%s:%s' % (colored('Running', 'green'), pid, default_ip, port))
return
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
info('UI status: %s [PID: %s]' % (colored('Zombie', 'yellow'), pid))
else:
info('UI status: %s [PID: %s]' % (colored('Stopped', 'red'), pid))
class InstallLicenseCmd(Command):
def __init__(self):
super(InstallLicenseCmd, self).__init__()
self.name = "install_license"
self.description = "install zstack license"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--license', '-f', help="path to the license file", required=True)
parser.add_argument('--prikey', help="[OPTIONAL] the path to the private key used to generate license request")
def run(self, args):
lpath = expand_path(args.license)
if not os.path.isfile(lpath):
raise CtlError('cannot find the license file at %s' % args.license)
ppath = None
if args.prikey:
ppath = expand_path(args.prikey)
if not os.path.isfile(ppath):
raise CtlError('cannot find the private key file at %s' % args.prikey)
license_folder = '/var/lib/zstack/license'
shell('''mkdir -p %s''' % license_folder)
shell('''chown zstack:zstack %s''' % license_folder)
shell('''yes | cp %s %s/license.txt''' % (lpath, license_folder))
shell('''chown zstack:zstack %s/license.txt''' % license_folder)
info("successfully installed the license file to %s/license.txt" % license_folder)
if ppath:
shell('''yes | cp %s %s/pri.key''' % (ppath, license_folder))
shell('''chown zstack:zstack %s/pri.key''' % license_folder)
info("successfully installed the private key file to %s/pri.key" % license_folder)
class StartUiCmd(Command):
PID_FILE = '/var/run/zstack/zstack-dashboard.pid'
def __init__(self):
super(StartUiCmd, self).__init__()
self.name = "start_ui"
self.description = "start UI server on the local or remote host"
ctl.register_command(self)
if not os.path.exists(os.path.dirname(self.PID_FILE)):
shell("mkdir -p %s" % os.path.dirname(self.PID_FILE))
shell("mkdir -p /var/log/zstack")
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
parser.add_argument('--port', help="UI server port. [DEFAULT] 5000", default='5000')
def _remote_start(self, host, params):
cmd = '/etc/init.d/zstack-dashboard start --rabbitmq %s' % params
ssh_run_no_pipe(host, cmd)
info('successfully start the UI server on the remote host[%s]' % host)
def _check_status(self, port):
if os.path.exists(self.PID_FILE):
with open(self.PID_FILE, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
check_pid_cmd = ShellCmd('ps -p %s > /dev/null' % pid)
check_pid_cmd(is_exception=False)
if check_pid_cmd.return_code == 0:
default_ip = get_default_ip()
if not default_ip:
info('UI server is still running[PID:%s]' % pid)
else:
info('UI server is still running[PID:%s], http://%s:%s' % (pid, default_ip, port))
return False
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
info('found a zombie UI server[PID:%s], kill it and start a new one' % pid)
shell('kill -9 %s > /dev/null' % pid)
return True
def run(self, args):
ips = ctl.read_property_list("UI.vip.")
if not ips:
ips = ctl.read_property_list("CloudBus.serverIp.")
if not ips:
raise CtlError('no RabbitMQ IPs found in %s. The IPs should be configured as CloudBus.serverIp.0, CloudBus.serverIp.1 ... CloudBus.serverIp.N' % ctl.properties_file_path)
ips = [v for k, v in ips]
username = ctl.read_property("CloudBus.rabbitmqUsername")
password = ctl.read_property("CloudBus.rabbitmqPassword")
if username and not password:
raise CtlError('CloudBus.rabbitmqUsername is configured but CloudBus.rabbitmqPassword is not. They must be both set or not set. Check %s' % ctl.properties_file_path)
if not username and password:
raise CtlError('CloudBus.rabbitmqPassword is configured but CloudBus.rabbitmqUsername is not. They must be both set or not set. Check %s' % ctl.properties_file_path)
if username and password:
urls = ["%s:%s@%s" % (username, password, ip) for ip in ips]
else:
urls = ips
param = ','.join(urls)
if args.host != 'localhost':
self._remote_start(args.host, param)
return
virtualenv = '/var/lib/zstack/virtualenv/zstack-dashboard'
if not os.path.exists(virtualenv):
raise CtlError('%s not found. Are you sure the UI server is installed on %s?' % (virtualenv, args.host))
if not self._check_status(args.port):
return
distro = platform.dist()[0]
if distro == 'centos':
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 5000 -j ACCEPT && service iptables save)' % args.port)
elif distro == 'Ubuntu':
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 5000 -j ACCEPT && /etc/init.d/iptables-persistent save)' % args.port)
else:
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5000 -j ACCEPT ' % args.port)
scmd = '. %s/bin/activate\nZSTACK_DASHBOARD_PORT=%s nohup python -c "from zstack_dashboard import web; web.main()" --rabbitmq %s >/var/log/zstack/zstack-dashboard.log 2>&1 </dev/null &' % (virtualenv, args.port, param)
script(scmd, no_pipe=True)
@loop_until_timeout(5, 0.5)
def write_pid():
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
with open(self.PID_FILE, 'w') as fd:
fd.write(str(pid))
return True
else:
return False
write_pid()
pid = find_process_by_cmdline('zstack_dashboard')
if not pid:
info('fail to start UI server on the local host. Use zstack-ctl start_ui to restart it. zstack UI log could be found in /var/log/zstack/zstack-dashboard.log')
return False
default_ip = get_default_ip()
if not default_ip:
info('successfully started UI server on the local host, PID[%s]' % pid)
else:
info('successfully started UI server on the local host, PID[%s], http://%s:%s' % (pid, default_ip, args.port))
os.system('mkdir -p /var/run/zstack/')
with open('/var/run/zstack/zstack-dashboard.port', 'w') as fd:
fd.write(args.port)
def main():
AddManagementNodeCmd()
BootstrapCmd()
ChangeIpCmd()
CollectLogCmd()
ConfigureCmd()
DumpMysqlCmd()
ChangeMysqlPasswordCmd()
DeployDBCmd()
GetEnvironmentVariableCmd()
InstallWebUiCmd()
InstallHACmd()
InstallDbCmd()
InstallRabbitCmd()
InstallManagementNodeCmd()
InstallLicenseCmd()
ShowConfiguration()
SetEnvironmentVariableCmd()
RollbackManagementNodeCmd()
RollbackDatabaseCmd()
ResetRabbitCmd()
RestoreConfigCmd()
RestartNodeCmd()
RestoreMysqlCmd()
RecoverHACmd()
ShowStatusCmd()
StartCmd()
StopCmd()
SaveConfigCmd()
StartUiCmd()
StopUiCmd()
StartAllCmd()
StopAllCmd()
TailLogCmd()
UiStatusCmd()
UnsetEnvironmentVariableCmd()
UpgradeManagementNodeCmd()
UpgradeMultiManagementNodeCmd()
UpgradeDbCmd()
UpgradeCtlCmd()
UpgradeHACmd()
try:
ctl.run()
except CtlError as e:
if ctl.verbose:
error_not_exit(traceback.format_exc())
error(str(e))
if __name__ == '__main__':
main()
| mrwangxc/zstack-utility | zstackctl/zstackctl/ctl.py | Python | apache-2.0 | 302,359 |
# Copyright (C) 2014 VA Linux Systems Japan K.K.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K.
import collections
import contextlib
import mock
from neutron.openstack.common import importutils
import neutron.plugins.ofagent.agent.metadata as meta
from neutron.tests.unit.ofagent import ofa_test_base
_OFALIB_NAME = 'neutron.plugins.ofagent.agent.arp_lib'
class OFAAgentTestCase(ofa_test_base.OFAAgentTestBase):
def setUp(self):
super(OFAAgentTestCase, self).setUp()
Net = collections.namedtuple('Net', 'net, mac, ip')
self.nets = [Net(net=10, mac='11:11:11:44:55:66', ip='10.1.2.20'),
Net(net=10, mac='11:11:11:44:55:67', ip='10.1.2.21'),
Net(net=20, mac='22:22:22:44:55:66', ip='10.2.2.20')]
self.packet_mod = mock.Mock()
self.proto_ethernet_mod = mock.Mock()
self.proto_vlan_mod = mock.Mock()
self.proto_vlan_mod.vid = 999
self.proto_arp_mod = mock.Mock()
self.fake_get_protocol = mock.Mock(return_value=self.proto_vlan_mod)
self.packet_mod.get_protocol = self.fake_get_protocol
self.fake_add_protocol = mock.Mock()
self.packet_mod.add_protocol = self.fake_add_protocol
self.arp = importutils.import_module('ryu.lib.packet.arp')
self.ethernet = importutils.import_module('ryu.lib.packet.ethernet')
self.vlan = importutils.import_module('ryu.lib.packet.vlan')
mock.patch('ryu.lib.packet.packet.Packet',
return_value=self.packet_mod).start()
self.ryuapp = 'ryuapp'
self.inport = '1'
self.ev = mock.Mock()
self.datapath = self._mk_test_dp('tun_br')
self.ofproto = importutils.import_module('ryu.ofproto.ofproto_v1_3')
self.ofpp = mock.Mock()
self.datapath.ofproto = self.ofproto
self.datapath.ofproto_parser = self.ofpp
self.OFPActionOutput = mock.Mock()
self.OFPActionOutput.return_value = 'OFPActionOutput'
self.ofpp.OFPActionOutput = self.OFPActionOutput
self.msg = mock.Mock()
self.msg.datapath = self.datapath
self.msg.buffer_id = self.ofproto.OFP_NO_BUFFER
self.msg_data = 'test_message_data'
self.msg.data = self.msg_data
self.ev.msg = self.msg
self.msg.match = {'in_port': self.inport,
'metadata': meta.LOCAL | self.nets[0].net}
class TestArpLib(OFAAgentTestCase):
def setUp(self):
super(TestArpLib, self).setUp()
self.mod_arplib = importutils.import_module(_OFALIB_NAME)
self.arplib = self.mod_arplib.ArpLib(self.ryuapp)
self.packet_mod.get_protocol = self._fake_get_protocol
self._fake_get_protocol_ethernet = True
self._fake_get_protocol_vlan = True
self._fake_get_protocol_arp = True
self.br = mock.Mock(datapath=self.datapath)
self.arplib.set_bridge(self.br)
def test__send_unknown_packet_no_buffer(self):
in_port = 3
out_port = self.ofproto.OFPP_TABLE
self.msg.buffer_id = self.ofproto.OFP_NO_BUFFER
self.arplib._send_unknown_packet(self.msg, in_port, out_port)
actions = [self.ofpp.OFPActionOutput(self.ofproto.OFPP_TABLE, 0)]
self.ofpp.OFPPacketOut.assert_called_once_with(
datapath=self.datapath,
buffer_id=self.msg.buffer_id,
in_port=in_port,
actions=actions,
data=self.msg_data)
def test__send_unknown_packet_existence_buffer(self):
in_port = 3
out_port = self.ofproto.OFPP_TABLE
self.msg.buffer_id = 256
self.arplib._send_unknown_packet(self.msg, in_port, out_port)
actions = [self.ofpp.OFPActionOutput(self.ofproto.OFPP_TABLE, 0)]
self.ofpp.OFPPacketOut.assert_called_once_with(
datapath=self.datapath,
buffer_id=self.msg.buffer_id,
in_port=in_port,
actions=actions,
data=None)
def test__respond_arp(self):
self.arplib._arp_tbl = {
self.nets[0].net: {self.nets[0].ip: self.nets[0].mac}}
port = 3
arptbl = self.arplib._arp_tbl[self.nets[0].net]
pkt_ethernet = self.ethernet
pkt_vlan = self.vlan
pkt_arp = self.arp
pkt_arp.opcode = self.arp.ARP_REQUEST
pkt_arp.dst_ip = self.nets[0].ip
with mock.patch.object(
self.arplib, '_send_arp_reply'
) as send_arp_rep_fn:
self.assertTrue(
self.arplib._respond_arp(self.datapath, port, arptbl,
pkt_ethernet, pkt_vlan, pkt_arp))
ethernet_ethernet = self.ethernet.ethernet(
ethertype=pkt_ethernet.ethertype,
dst=pkt_ethernet.src,
src=self.nets[0].mac)
vlan_vlan = self.vlan.vlan(cfi=pkt_vlan.cfi,
ethertype=pkt_vlan.ethertype,
pcp=pkt_vlan.pcp,
vid=pkt_vlan.vid)
arp_arp = self.arp.arp(opcode=self.arp.ARP_REPLY,
src_mac=self.nets[0].mac,
src_ip=pkt_arp.dst_ip,
dst_mac=pkt_arp.src_mac,
dst_ip=pkt_arp.src_ip)
self.fake_add_protocol.assert_has_calls([mock.call(ethernet_ethernet),
mock.call(vlan_vlan),
mock.call(arp_arp)])
send_arp_rep_fn.assert_called_once_with(
self.datapath, port, self.packet_mod)
def _test__respond_arp(self, pkt_arp):
self.arplib._arp_tbl = {
self.nets[0].net: {self.nets[0].ip: self.nets[0].mac}}
port = 3
arptbl = self.arplib._arp_tbl[self.nets[0].net]
pkt_ethernet = mock.Mock()
pkt_vlan = mock.Mock()
self.assertFalse(
self.arplib._respond_arp(self.datapath, port, arptbl,
pkt_ethernet, pkt_vlan, pkt_arp))
def test__respond_arp_non_arp_req(self):
pkt_arp = mock.Mock()
pkt_arp.opcode = self.arp.ARP_REPLY
self._test__respond_arp(pkt_arp)
def test__respond_arp_ip_not_found_in_arptable(self):
pkt_arp = mock.Mock()
pkt_arp.opcode = self.arp.ARP_REQUEST
pkt_arp.dst_ip = self.nets[1].ip
self._test__respond_arp(pkt_arp)
def test_add_arp_table_entry(self):
self.arplib.add_arp_table_entry(self.nets[0].net,
self.nets[0].ip, self.nets[0].mac)
self.assertEqual(
self.arplib._arp_tbl,
{self.nets[0].net: {self.nets[0].ip: self.nets[0].mac}})
def test_add_arp_table_entry_multiple_net(self):
self.arplib.add_arp_table_entry(self.nets[0].net,
self.nets[0].ip, self.nets[0].mac)
self.arplib.add_arp_table_entry(self.nets[2].net,
self.nets[2].ip, self.nets[2].mac)
self.assertEqual(
self.arplib._arp_tbl,
{self.nets[0].net: {self.nets[0].ip: self.nets[0].mac},
self.nets[2].net: {self.nets[2].ip: self.nets[2].mac}})
def test_add_arp_table_entry_multiple_ip(self):
self.arplib.add_arp_table_entry(self.nets[0].net,
self.nets[0].ip, self.nets[0].mac)
self.arplib.add_arp_table_entry(self.nets[0].net,
self.nets[1].ip, self.nets[1].mac)
self.assertEqual(
self.arplib._arp_tbl,
{self.nets[0].net: {self.nets[0].ip: self.nets[0].mac,
self.nets[1].ip: self.nets[1].mac}})
def test_del_arp_table_entry(self):
self.arplib._arp_tbl = {
self.nets[0].net: {self.nets[0].ip: self.nets[0].mac}}
self.arplib.del_arp_table_entry(self.nets[0].net, self.nets[0].ip)
self.assertEqual(self.arplib._arp_tbl, {})
def test_del_arp_table_entry_multiple_net(self):
self.arplib._arp_tbl = {
self.nets[0].net: {self.nets[0].ip: self.nets[0].mac},
self.nets[2].net: {self.nets[2].ip: self.nets[2].mac}}
self.arplib.del_arp_table_entry(self.nets[0].net, self.nets[0].ip)
self.assertEqual(
self.arplib._arp_tbl,
{self.nets[2].net: {self.nets[2].ip: self.nets[2].mac}})
def test_del_arp_table_entry_multiple_ip(self):
self.arplib._arp_tbl = {
self.nets[0].net: {self.nets[0].ip: self.nets[0].mac,
self.nets[1].ip: self.nets[1].mac}}
self.arplib.del_arp_table_entry(self.nets[0].net, self.nets[1].ip)
self.assertEqual(
self.arplib._arp_tbl,
{self.nets[0].net: {self.nets[0].ip: self.nets[0].mac}})
def _fake_get_protocol(self, net_type):
if net_type == self.ethernet.ethernet:
if self._fake_get_protocol_ethernet:
return self.proto_ethernet_mod
else:
return
if net_type == self.vlan.vlan:
if self._fake_get_protocol_vlan:
return self.proto_vlan_mod
else:
return
if net_type == self.arp.arp:
if self._fake_get_protocol_arp:
return self.proto_arp_mod
else:
return
def _test_packet_in_handler(self):
self.arplib._arp_tbl = {
self.nets[0].net: {self.nets[0].ip: self.nets[0].mac}}
with contextlib.nested(
mock.patch.object(self.arplib, '_respond_arp',
return_value=True),
mock.patch.object(self.br,
'arp_passthrough'),
mock.patch.object(self.arplib,
'_send_unknown_packet'),
) as (res_arp_fn, add_flow_fn, send_unknown_pk_fn):
self.arplib.packet_in_handler(self.ev)
self.assertFalse(add_flow_fn.call_count)
self.assertFalse(send_unknown_pk_fn.call_count)
res_arp_fn.assert_called_once_with(
self.datapath, self.inport,
self.arplib._arp_tbl[self.nets[0].net],
self.proto_ethernet_mod,
self.proto_vlan_mod if self._fake_get_protocol_vlan else None,
self.proto_arp_mod)
def _test_packet_in_handler_drop(self):
self.arplib._arp_tbl = {
self.nets[0].net: {self.nets[0].ip: self.nets[0].mac}}
with contextlib.nested(
mock.patch.object(self.arplib, '_respond_arp',
return_value=True),
mock.patch.object(self.br, 'arp_passthrough'),
mock.patch.object(self.arplib,
'_send_unknown_packet'),
) as (res_arp_fn, add_flow_fn, send_unknown_pk_fn):
self.arplib.packet_in_handler(self.ev)
self.assertFalse(add_flow_fn.call_count)
self.assertFalse(send_unknown_pk_fn.call_count)
self.assertFalse(res_arp_fn.call_count)
def test_packet_in_handler(self):
self._test_packet_in_handler()
def test_packet_in_handler_non_ethernet(self):
self._fake_get_protocol_ethernet = False
self._test_packet_in_handler_drop()
def test_packet_in_handler_non_vlan(self):
self._fake_get_protocol_vlan = False
self._test_packet_in_handler()
def test_packet_in_handler_non_arp(self):
self._fake_get_protocol_arp = False
self._test_packet_in_handler_drop()
def test_packet_in_handler_unknown_network(self):
self.arplib._arp_tbl = {
self.nets[0].net: {self.nets[0].ip: self.nets[0].mac}}
with contextlib.nested(
mock.patch.object(self.arplib, '_respond_arp',
return_value=False),
mock.patch.object(self.br, 'arp_passthrough'),
mock.patch.object(self.arplib,
'_send_unknown_packet'),
) as (res_arp_fn, add_flow_fn, send_unknown_pk_fn):
self.arplib.packet_in_handler(self.ev)
add_flow_fn.assert_called_once_with(
network=self.nets[0].net,
tpa=self.proto_arp_mod.dst_ip)
send_unknown_pk_fn.assert_called_once_with(
self.ev.msg, self.msg.match['in_port'],
self.datapath.ofproto.OFPP_TABLE)
res_arp_fn.assert_called_once_with(
self.datapath, self.inport,
self.arplib._arp_tbl[self.nets[0].net],
self.proto_ethernet_mod, self.proto_vlan_mod, self.proto_arp_mod)
| CingHu/neutron-ustack | neutron/tests/unit/ofagent/test_arp_lib.py | Python | apache-2.0 | 13,264 |
"""Test deCONZ gateway."""
from unittest.mock import Mock, patch
import pytest
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.components.deconz import errors, gateway
from tests.common import mock_coro
import pydeconz
ENTRY_CONFIG = {
"host": "1.2.3.4",
"port": 80,
"api_key": "1234567890ABCDEF",
"bridgeid": "0123456789ABCDEF",
"allow_clip_sensor": True,
"allow_deconz_groups": True,
}
async def test_gateway_setup():
"""Successful setup."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
api = Mock()
api.async_add_remote.return_value = Mock()
api.sensors = {}
deconz_gateway = gateway.DeconzGateway(hass, entry)
with patch.object(gateway, 'get_gateway', return_value=mock_coro(api)), \
patch.object(
gateway, 'async_dispatcher_connect', return_value=Mock()):
assert await deconz_gateway.async_setup() is True
assert deconz_gateway.api is api
assert len(hass.config_entries.async_forward_entry_setup.mock_calls) == 7
assert hass.config_entries.async_forward_entry_setup.mock_calls[0][1] == \
(entry, 'binary_sensor')
assert hass.config_entries.async_forward_entry_setup.mock_calls[1][1] == \
(entry, 'climate')
assert hass.config_entries.async_forward_entry_setup.mock_calls[2][1] == \
(entry, 'cover')
assert hass.config_entries.async_forward_entry_setup.mock_calls[3][1] == \
(entry, 'light')
assert hass.config_entries.async_forward_entry_setup.mock_calls[4][1] == \
(entry, 'scene')
assert hass.config_entries.async_forward_entry_setup.mock_calls[5][1] == \
(entry, 'sensor')
assert hass.config_entries.async_forward_entry_setup.mock_calls[6][1] == \
(entry, 'switch')
assert len(api.start.mock_calls) == 1
async def test_gateway_retry():
"""Retry setup."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
deconz_gateway = gateway.DeconzGateway(hass, entry)
with patch.object(
gateway, 'get_gateway', side_effect=errors.CannotConnect), \
pytest.raises(ConfigEntryNotReady):
await deconz_gateway.async_setup()
async def test_gateway_setup_fails():
"""Retry setup."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
deconz_gateway = gateway.DeconzGateway(hass, entry)
with patch.object(gateway, 'get_gateway', side_effect=Exception):
result = await deconz_gateway.async_setup()
assert not result
async def test_connection_status(hass):
"""Make sure that connection status triggers a dispatcher send."""
entry = Mock()
entry.data = ENTRY_CONFIG
deconz_gateway = gateway.DeconzGateway(hass, entry)
with patch.object(gateway, 'async_dispatcher_send') as mock_dispatch_send:
deconz_gateway.async_connection_status_callback(True)
await hass.async_block_till_done()
assert len(mock_dispatch_send.mock_calls) == 1
assert len(mock_dispatch_send.mock_calls[0]) == 3
async def test_add_device(hass):
"""Successful retry setup."""
entry = Mock()
entry.data = ENTRY_CONFIG
deconz_gateway = gateway.DeconzGateway(hass, entry)
with patch.object(gateway, 'async_dispatcher_send') as mock_dispatch_send:
deconz_gateway.async_add_device_callback('sensor', Mock())
await hass.async_block_till_done()
assert len(mock_dispatch_send.mock_calls) == 1
assert len(mock_dispatch_send.mock_calls[0]) == 3
async def test_add_remote():
"""Successful add remote."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
remote = Mock()
remote.name = 'name'
remote.type = 'ZHASwitch'
remote.register_async_callback = Mock()
deconz_gateway = gateway.DeconzGateway(hass, entry)
deconz_gateway.async_add_remote([remote])
assert len(deconz_gateway.events) == 1
async def test_shutdown():
"""Successful shutdown."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
deconz_gateway = gateway.DeconzGateway(hass, entry)
deconz_gateway.api = Mock()
deconz_gateway.shutdown(None)
assert len(deconz_gateway.api.close.mock_calls) == 1
async def test_reset_after_successful_setup():
"""Verify that reset works on a setup component."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
api = Mock()
api.async_add_remote.return_value = Mock()
api.sensors = {}
deconz_gateway = gateway.DeconzGateway(hass, entry)
with patch.object(gateway, 'get_gateway', return_value=mock_coro(api)), \
patch.object(
gateway, 'async_dispatcher_connect', return_value=Mock()):
assert await deconz_gateway.async_setup() is True
listener = Mock()
deconz_gateway.listeners = [listener]
event = Mock()
event.async_will_remove_from_hass = Mock()
deconz_gateway.events = [event]
deconz_gateway.deconz_ids = {'key': 'value'}
hass.config_entries.async_forward_entry_unload.return_value = \
mock_coro(True)
assert await deconz_gateway.async_reset() is True
assert len(hass.config_entries.async_forward_entry_unload.mock_calls) == 7
assert len(listener.mock_calls) == 1
assert len(deconz_gateway.listeners) == 0
assert len(event.async_will_remove_from_hass.mock_calls) == 1
assert len(deconz_gateway.events) == 0
assert len(deconz_gateway.deconz_ids) == 0
async def test_get_gateway(hass):
"""Successful call."""
with patch('pydeconz.DeconzSession.async_load_parameters',
return_value=mock_coro(True)):
assert await gateway.get_gateway(hass, ENTRY_CONFIG, Mock(), Mock())
async def test_get_gateway_fails_unauthorized(hass):
"""Failed call."""
with patch('pydeconz.DeconzSession.async_load_parameters',
side_effect=pydeconz.errors.Unauthorized), \
pytest.raises(errors.AuthenticationRequired):
assert await gateway.get_gateway(
hass, ENTRY_CONFIG, Mock(), Mock()) is False
async def test_get_gateway_fails_cannot_connect(hass):
"""Failed call."""
with patch('pydeconz.DeconzSession.async_load_parameters',
side_effect=pydeconz.errors.RequestError), \
pytest.raises(errors.CannotConnect):
assert await gateway.get_gateway(
hass, ENTRY_CONFIG, Mock(), Mock()) is False
async def test_create_event():
"""Successfully created a deCONZ event."""
hass = Mock()
remote = Mock()
remote.name = 'Name'
event = gateway.DeconzEvent(hass, remote)
assert event._id == 'name'
async def test_update_event():
"""Successfully update a deCONZ event."""
hass = Mock()
remote = Mock()
remote.name = 'Name'
event = gateway.DeconzEvent(hass, remote)
remote.changed_keys = {'state': True}
event.async_update_callback()
assert len(hass.bus.async_fire.mock_calls) == 1
async def test_remove_event():
"""Successfully update a deCONZ event."""
hass = Mock()
remote = Mock()
remote.name = 'Name'
event = gateway.DeconzEvent(hass, remote)
event.async_will_remove_from_hass()
assert event._device is None
| jabesq/home-assistant | tests/components/deconz/test_gateway.py | Python | apache-2.0 | 7,242 |
"""Support for Freedompro sensor."""
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import LIGHT_LUX, PERCENTAGE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
DEVICE_CLASS_MAP = {
"temperatureSensor": SensorDeviceClass.TEMPERATURE,
"humiditySensor": SensorDeviceClass.HUMIDITY,
"lightSensor": SensorDeviceClass.ILLUMINANCE,
}
STATE_CLASS_MAP = {
"temperatureSensor": SensorStateClass.MEASUREMENT,
"humiditySensor": SensorStateClass.MEASUREMENT,
"lightSensor": None,
}
UNIT_MAP = {
"temperatureSensor": TEMP_CELSIUS,
"humiditySensor": PERCENTAGE,
"lightSensor": LIGHT_LUX,
}
DEVICE_KEY_MAP = {
"temperatureSensor": "currentTemperature",
"humiditySensor": "currentRelativeHumidity",
"lightSensor": "currentAmbientLightLevel",
}
SUPPORTED_SENSORS = {"temperatureSensor", "humiditySensor", "lightSensor"}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Freedompro sensor."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
Device(device, coordinator)
for device in coordinator.data
if device["type"] in SUPPORTED_SENSORS
)
class Device(CoordinatorEntity, SensorEntity):
"""Representation of an Freedompro sensor."""
def __init__(self, device, coordinator):
"""Initialize the Freedompro sensor."""
super().__init__(coordinator)
self._attr_name = device["name"]
self._attr_unique_id = device["uid"]
self._type = device["type"]
self._attr_device_info = DeviceInfo(
identifiers={
(DOMAIN, self.unique_id),
},
manufacturer="Freedompro",
model=device["type"],
name=self.name,
)
self._attr_device_class = DEVICE_CLASS_MAP[device["type"]]
self._attr_state_class = STATE_CLASS_MAP[device["type"]]
self._attr_native_unit_of_measurement = UNIT_MAP[device["type"]]
self._attr_native_value = 0
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
device = next(
(
device
for device in self.coordinator.data
if device["uid"] == self.unique_id
),
None,
)
if device is not None and "state" in device:
state = device["state"]
self._attr_native_value = state[DEVICE_KEY_MAP[self._type]]
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
| mezz64/home-assistant | homeassistant/components/freedompro/sensor.py | Python | apache-2.0 | 3,148 |
"""Unit test for treadmill.appcfg.configure.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import shutil
import sys
import tempfile
import unittest
import mock
import treadmill
from treadmill.appcfg import configure as app_cfg
from treadmill.trace.app import events
class AppCfgConfigureTest(unittest.TestCase):
"""Tests for teadmill.appcfg.configure"""
def setUp(self):
# Access protected module _base_service
# pylint: disable=W0212
self.root = tempfile.mkdtemp()
self.tm_env = mock.Mock(
apps_dir=os.path.join(self.root, 'apps'),
cleanup_dir=os.path.join(self.root, 'cleanup'),
running_tombstone_dir=os.path.join(self.root, 'tombstones',
'running')
)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@unittest.skipUnless(sys.platform.startswith('linux'), 'Requires Linux')
@mock.patch('pwd.getpwnam', mock.Mock(auto_spec=True))
@mock.patch('shutil.copyfile', mock.Mock(auto_spec=True))
@mock.patch('treadmill.appcfg.manifest.load', auto_spec=True)
@mock.patch('treadmill.trace.post', mock.Mock(auto_spec=True))
@mock.patch('treadmill.fs.write_safe', mock.mock_open())
@mock.patch('treadmill.subproc.get_aliases', mock.Mock(return_value={}))
@mock.patch('treadmill.subproc.resolve', mock.Mock(return_value='mock'))
@mock.patch('treadmill.supervisor.create_service', auto_spec=True)
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/treadmill'))
def test_configure_linux(self, mock_create_svc, mock_load):
"""Tests that appcfg.configure creates necessary s6 layout."""
manifest = {
'proid': 'foo',
'environment': 'dev',
'shared_network': False,
'cpu': '100',
'memory': '100M',
'disk': '100G',
'services': [
{
'name': 'web_server',
'command': '/bin/true',
'restart': {
'limit': 5,
'interval': 60,
},
},
],
'environ': [
{
'name': 'Hello',
'value': 'World!',
},
],
'zookeeper': 'foo',
'cell': 'cell',
'system_services': [],
'endpoints': [
{
'name': 'http',
'port': '8000',
},
],
'name': 'proid.myapp#0',
'uniqueid': 'AAAAA',
}
mock_load.return_value = manifest
app_unique_name = 'proid.myapp-0-00000000AAAAA'
app_dir = os.path.join(self.root, 'apps', app_unique_name)
mock_create_svc.return_value.data_dir = os.path.join(app_dir, 'data')
app_cfg.configure(self.tm_env, '/some/event', 'linux')
mock_load.assert_called_with('/some/event')
mock_create_svc.assert_called_with(
self.tm_env.apps_dir,
name=app_unique_name,
app_run_script=mock.ANY,
downed=False,
monitor_policy={
'limit': 0,
'interval': 60,
'tombstone': {
'uds': False,
'path': self.tm_env.running_tombstone_dir,
'id': 'proid.myapp#0'
}
},
userid='root',
environ={},
environment='dev'
)
treadmill.fs.write_safe.assert_called_with(
os.path.join(app_dir, 'data', 'app.json'),
mock.ANY,
mode='w',
permission=0o644
)
shutil.copyfile.assert_called_with(
'/some/event',
os.path.join(app_dir, 'data', 'manifest.yml')
)
treadmill.trace.post.assert_called_with(
mock.ANY,
events.ConfiguredTraceEvent(
instanceid='proid.myapp#0',
uniqueid='AAAAA',
payload=None
)
)
@unittest.skipUnless(sys.platform.startswith('linux'), 'Requires Linux')
@mock.patch('pwd.getpwnam', mock.Mock(auto_spec=True))
@mock.patch('shutil.copyfile', mock.Mock(auto_spec=True))
@mock.patch('shutil.rmtree', mock.Mock())
@mock.patch('treadmill.appcfg.manifest.load', auto_spec=True)
@mock.patch('treadmill.trace.post', mock.Mock(auto_spec=True))
@mock.patch('treadmill.fs.write_safe', mock.mock_open())
@mock.patch('treadmill.subproc.get_aliases', mock.Mock(return_value={}))
@mock.patch('treadmill.subproc.resolve', mock.Mock(return_value='mock'))
@mock.patch('treadmill.supervisor.create_service', auto_spec=True)
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/treadmill'))
def test_configure_linux_event_rm(self, mock_create_svc, mock_load):
"""Tests when event file is removed when copied."""
manifest = {
'proid': 'foo',
'environment': 'dev',
'shared_network': False,
'cpu': '100',
'memory': '100M',
'disk': '100G',
'services': [
{
'name': 'web_server',
'command': '/bin/true',
'restart': {
'limit': 5,
'interval': 60,
},
},
],
'system_services': [],
'endpoints': [
{
'name': 'http',
'port': '8000',
},
],
'environ': [
{
'name': 'Hello',
'value': 'World!',
},
],
'cell': 'cell',
'zookeeper': 'foo',
'name': 'proid.myapp#0',
'uniqueid': 'AAAAA',
}
mock_load.return_value = manifest
app_unique_name = 'proid.myapp-0-00000000AAAAA'
app_dir = os.path.join(self.root, 'apps', app_unique_name)
mock_create_svc.return_value.directory = app_dir
mock_create_svc.return_value.data_dir = os.path.join(app_dir, 'data')
shutil.copyfile.side_effect = IOError(2, 'No such file or directory')
app_cfg.configure(self.tm_env, '/some/event', 'linux')
mock_load.assert_called_with('/some/event')
mock_create_svc.assert_called_with(
self.tm_env.apps_dir,
name=app_unique_name,
app_run_script=mock.ANY,
downed=False,
monitor_policy={
'limit': 0,
'interval': 60,
'tombstone': {
'uds': False,
'path': self.tm_env.running_tombstone_dir,
'id': 'proid.myapp#0'
}
},
userid='root',
environ={},
environment='dev'
)
shutil.copyfile.assert_called_with(
'/some/event',
os.path.join(app_dir, 'data', 'manifest.yml')
)
treadmill.fs.write_safe.assert_not_called()
shutil.rmtree.assert_called_with(app_dir)
treadmill.trace.post.assert_not_called()
if __name__ == '__main__':
unittest.main()
| Morgan-Stanley/treadmill | lib/python/treadmill/tests/appcfg/configure_test.py | Python | apache-2.0 | 7,613 |
###############################################################################
#
# file: urlfetcher.py
#
# Purpose: refer to module documentation for details
#
# Note: This file is part of Termsaver application, and should not be used
# or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
Simple screensaver that displays data from a URL.
See additional information in the class itself.
The screen class available here is:
* `UrlFetcherScreen`
"""
#
# Internal modules
#
from termsaverlib.screen.base.urlfetcher import UrlFetcherBase
from termsaverlib import constants
from termsaverlib.i18n import _
class UrlFetcherScreen(UrlFetcherBase):
"""
Simple screensaver that displays data from a URL.
"""
def __init__(self):
"""
Creates a new instance of this class.
"""
UrlFetcherBase.__init__(self,
"urlfetcher",
_("displays url contents with typing animation"))
def _message_no_url(self):
"""
"""
return _("""
You just need to provide the URL from where %(app_title)s will read and
display on screen.
If you do not have any idea which URL to use, check out some examples here:
RFC
RFC-1034 - http://tools.ietf.org/rfc/rfc1034.txt
See a RFC list from Wikipedia:
http://en.wikipedia.org/wiki/List_of_RFCs
(remember to use the txt version)
""") % {
'app_title': constants.App.TITLE,
}
| wkentaro/termsaver | termsaverlib/screen/urlfetcher.py | Python | apache-2.0 | 2,181 |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains the random number generating methods used in the framework.
created on 07/15/2017
@author: talbpaul
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import sys
import math
import threading
from collections import deque, defaultdict
import numpy as np
from utils.utils import findCrowModule
from utils import mathUtils
# in general, we will use Crow for now, but let's make it easy to switch just in case it is helpful eventually.
# Numpy stochastic environment can not pass the test as this point
stochasticEnv = 'crow'
#stochasticEnv = 'numpy'
class BoxMullerGenerator:
"""
Iterator class for the Box-Muller transform
"""
def __init__(self):
"""
Constructor.
@ In, engine, instance, optional, random number generator
@ Out, None
"""
self.queue = defaultdict(deque)
self.__queueLock = threading.RLock()
def generate(self,engine=None):
"""
Yields a normally-distributed pseudorandom value
@ In, engine, instance, optional, random number generator
@ Out, generate, float, random value
"""
with self.__queueLock:
if len(self.queue[engine]) == 0:
#calculate new values
self.queue[engine].extend(self.createSamples(engine=engine))
val = self.queue[engine].pop()
return val
def createSamples(self,engine=None):
"""
Sample calculator. Because Box Muller does batches of 2, add them to a queue.
@ In, engine, instance, optional, random number generator.
@ Out, (z1,z2), tuple, two independent random values
"""
u1,u2 = random(2,engine=engine)
z1 = np.sqrt(-2.*np.log(u1))*np.cos(2.*np.pi*u2)
z2 = np.sqrt(-2.*np.log(u1))*np.sin(2.*np.pi*u2)
return z1,z2
def testSampling(self, n=1e5,engine=None):
"""
Tests distribution of samples over a large number.
@ In, n, int, optional, number of samples to test with
@ In, engine, instance, optional, random number generator
@ Out, mean, float, mean of sample set
@ Out, stdev, float, standard deviation of sample set
"""
n = int(n)
samples = np.array([self.generate(engine=engine) for _ in range(n)])
mean = np.average(samples)
stdev = np.std(samples)
return mean,stdev
if stochasticEnv == 'numpy':
npStochEnv = np.random.RandomState()
else:
crowStochEnv = findCrowModule('randomENG').RandomClass()
# this is needed for now since we need to split the stoch environments
distStochEnv = findCrowModule('distribution1D').DistributionContainer.instance()
boxMullerGen = BoxMullerGenerator()
#
# Utilities
#
#
def randomSeed(value, seedBoth=False, engine=None):
"""
Function to get a random seed
@ In, value, float, the seed
@ In, engine, instance, optional, random number generator
@ In, seedBoth, bool, optional, if True then seed both random environments
@ Out, None
"""
# we need a flag to tell us if the global numpy stochastic environment is needed to be changed
replaceGlobalEnv=False
## choose an engine if it is none
if engine is None:
if stochasticEnv == 'crow':
distStochEnv.seedRandom(value)
engine=crowStochEnv
elif stochasticEnv == 'numpy':
replaceGlobalEnv=True
global npStochEnv
# global npStochEvn is needed in numpy environment here
# to prevent referenced before assignment in local loop
engine = npStochEnv
if isinstance(engine, np.random.RandomState):
engine = np.random.RandomState(value)
elif isinstance(engine, findCrowModule('randomENG').RandomClass):
engine.seed(value)
if seedBoth:
np.random.seed(value+1) # +1 just to prevent identical seed sets
if stochasticEnv== 'numpy' and replaceGlobalEnv:
npStochEnv= engine
if replaceGlobalEnv:
print('randomUtils: Global random number seed has been changed to',value)
def random(dim=1, samples=1, keepMatrix=False, engine=None):
"""
Function to get a single random value, an array of random values, or a matrix of random values, on [0,1]
@ In, dim, int, optional, dimensionality of samples
@ In, samples, int, optional, number of arrays to deliver
@ In, keepMatrix, bool, optional, if True then will always return np.array(np.array(float))
@ In, engine, instance, optional, random number generator
@ Out, vals, float, random normal number (or np.array with size [n] if n>1, or np.array with size [n,samples] if sampels>1)
"""
engine = getEngine(engine)
dim = int(dim)
samples = int(samples)
if isinstance(engine, np.random.RandomState):
vals = engine.rand(samples,dim)
elif isinstance(engine, findCrowModule('randomENG').RandomClass):
vals = np.zeros([samples, dim])
for i in range(len(vals)):
for j in range(len(vals[0])):
vals[i][j] = engine.random()
# regardless of stoch env
if keepMatrix:
return vals
else:
return _reduceRedundantListing(vals, (samples, dim))
def randomNormal(size=(1,), keepMatrix=False, engine=None):
"""
Function to get a single random value, an array of random values, or a matrix of random values, normally distributed
@ In, size, int or tuple, optional, shape of the samples to return
(if int, an array of samples will be returned if size>1, otherwise a float if keepMatrix is false)
@ In, keepMatrix, bool, optional, if True then will always return np.array(np.array(float))
@ In, engine, instance, optional, random number generator
@ Out, vals, float, random normal number (or np.array with size [n] if n>1, or np.array with size [n,samples] if sampels>1)
"""
engine = getEngine(engine)
if isinstance(size, int):
size = (size, )
if isinstance(engine, np.random.RandomState):
vals = engine.randn(*size)
elif isinstance(engine, findCrowModule('randomENG').RandomClass):
vals = np.zeros(np.prod(size))
for i in range(len(vals)):
vals[i] = boxMullerGen.generate(engine=engine)
vals.shape = size
if keepMatrix:
return vals
else:
return _reduceRedundantListing(vals,size)
def randomMultivariateNormal(cov, size=1, mean=None):
"""
Provides a random sample from a multivariate distribution.
@ In, cov, np.array, covariance matrix (must be square, positive definite)
@ In, size, int, optional, number of samples to return
@ In, mean, np.array, means for distributions (must be length of 1 side of covar matrix == len(cov[0]))
@ Out, vals, np.array, array of samples with size [n_samples, len(cov[0])]
"""
dims = cov.shape[0]
if mean is None:
mean = np.zeros(dims)
eps = 10 * sys.float_info.epsilon
covEps = cov + eps * np.identity(dims)
decomp = np.linalg.cholesky(covEps)
randSamples = randomNormal(size=(dims, size)).reshape((dims, size))
vals = mean + np.dot(decomp, randSamples)
return vals
def randomIntegers(low, high, caller=None, engine=None):
"""
Function to get a random integer
@ In, low, int, low boundary
@ In, high, int, upper boundary
@ In, caller, instance, optional, object requesting the random integers
@ In, engine, instance, optional, optional, random number generator
@ Out, rawInt, int, random int
"""
engine = getEngine(engine)
if isinstance(engine, np.random.RandomState):
return engine.randint(low, high=high+1)
elif isinstance(engine, findCrowModule('randomENG').RandomClass):
intRange = high - low + 1.0
rawNum = low + random(engine=engine)*intRange
rawInt = math.floor(rawNum)
if rawInt < low or rawInt > high:
if caller:
caller.raiseAMessage("Random int out of range")
rawInt = max(low, min(rawInt, high))
return rawInt
else:
raise TypeError('Engine type not recognized! {}'.format(type(engine)))
def randomChoice(array, size = 1, replace = True, engine = None):
"""
Generates a random sample or a sequence of random samples from a given array-like (list or such) or N-D array
This equivalent to np.random.choice but extending the functionality to N-D arrays
@ In, array, list or np.ndarray, the array from which to pick
@ In, size, int, optional, the number of samples to return
@ In, replace, bool, optional, allows replacement if True, default is True
@ In, engine, instance, optional, optional, random number generator
@ Out, selected, object, the random choice (1 element) or a list of elements
"""
assert(hasattr(array,"shape") or isinstance(array,list))
if not replace:
if hasattr(array,"shape"):
raise RuntimeError("Option with replace False not available for ndarrays")
if len(array) < size:
raise RuntimeError("array size < of number of requested samples (size)")
sel = []
coords = array
for _ in range(size):
if hasattr(array,"shape"):
coord = tuple([randomIntegers(0, dim-1, engine=engine) for dim in coords.shape])
sel.append(coords[coord])
else:
sel.append(coords[randomIntegers(0, len(coords)-1, engine=engine)])
if not replace:
coords.remove(sel[-1])
selected = sel[0] if size == 1 else sel
return selected
def randomPermutation(l,caller,engine=None):
"""
Function to get a random permutation
@ In, l, list, list to be permuted
@ In, caller, instance, the caller
@ In, engine, instance, optional, random number generator
@ Out, newList, list, randomly permuted list
"""
engine = getEngine(engine)
if isinstance(engine, np.random.RandomState):
return engine.permutation(l)
elif isinstance(engine, findCrowModule('randomENG').RandomClass):
newList = []
oldList = l[:]
while len(oldList) > 0:
newList.append(oldList.pop(randomIntegers(0,len(oldList)-1,caller,engine=engine)))
return newList
def randPointsOnHypersphere(dim,samples=1,r=1,keepMatrix=False,engine=None):
"""
obtains random points on the surface of a hypersphere of dimension "n" with radius "r".
see http://www.sciencedirect.com/science/article/pii/S0047259X10001211
"On decompositional algorithms for uniform sampling from n-spheres and n-balls", Harman and Lacko, 2010, J. Multivariate Analysis
@ In, dim, int, the dimensionality of the hypersphere
@ In, samples, int, optional, the number of samples desired
@ In, r, float, optional, the radius of the hypersphere
@ In, keepMatrix, bool, optional, if True then will always return np.array(np.array(float))
@ In, engine, instance, optional, random number generator
@ Out, pts, np.array(np.array(float)), random points on the surface of the hypersphere [sample#][pt]
"""
engine=getEngine(engine)
## first fill random samples
pts = randomNormal(size=(samples, dim),keepMatrix=True,engine=engine)
## extend radius, place inside sphere through normalization
rnorm = float(r)/np.linalg.norm(pts,axis=1)
pts *= rnorm[:,np.newaxis]
#TODO if all values in any given sample are 0,
# this produces an unphysical result, so we should resample;
# however, this probability is miniscule and the speed benefits of skipping checking loop seems worth it.
if keepMatrix:
return pts
else:
return _reduceRedundantListing(pts,(samples, dim))
return pts
def randPointsInHypersphere(dim,samples=1,r=1,keepMatrix=False,engine=None):
"""
obtains a random point internal to a hypersphere of dimension "n" with radius "r"
see http://www.sciencedirect.com/science/article/pii/S0047259X10001211
"On decompositional algorithms for uniform sampling from n-spheres and n-balls", Harman and Lacko, 2010, J. Multivariate Analysis
@ In, dim, int, the dimensionality of the hypersphere
@ In, r, float, the radius of the hypersphere
@ In, keepMatrix, bool, optional, if True then will always return np.array(np.array(float))
@ In, engine, instance, optional, random number generator
@ Out, pt, np.array(float), a random point on the surface of the hypersphere
"""
engine=getEngine(engine)
#sample on surface of n+2-sphere and discard the last two dimensions
pts = randPointsOnHypersphere(dim+2,samples=samples,r=r,keepMatrix=True,engine=engine)[:,:-2]
if keepMatrix:
return pts
else:
return _reduceRedundantListing(pts,(samples, dim))
return pts
def newRNG(env=None):
"""
Provides a new instance of the random number generator.
@ In, env, string, optional, type of random number generator. Defaults to global option stored in "stochasticEnv".
@ Out, engine, object, RNG producer
"""
if env is None:
env = stochasticEnv
if env == 'crow':
engine = findCrowModule('randomENG').RandomClass()
elif env == 'numpy':
engine = np.random.RandomState()
return engine
### internal utilities ###
def _reduceRedundantListing(data,size):
"""
Adjusts data to be intuitive for developers.
- if np.prod(size) => dim = samples = 1: returns a float
- if size[1,...,n] > 1 but size[0] (samples) = 1: returns a 1D numpy array of floats
- otherwise: returns a numpy array indexed by the original shape
@ In, data, numpy.array, n-dimensional array indexed by [sample, :, ...,n]
@ In, dim, int, dimensionality of each sample
@ In, samples, int, number of samples taken
@ Out, data, np.array, shape and size described above in method description.
"""
if np.prod(size) == 1: #user expects single float
return data.flatten()[0]
elif size[0]==1: #user expects array of floats (or matrix)
return data[0]
else:
return data
def getEngine(eng):
"""
Choose an engine if it is none and raise error if engine type not recognized
@ In, engine, instance, random number generator
@ Out, engine, instance, random number generator
"""
if eng is None:
if stochasticEnv == 'numpy':
eng = npStochEnv
elif stochasticEnv == 'crow':
eng = crowStochEnv
if not isinstance(eng, np.random.RandomState) and not isinstance(eng, findCrowModule('randomENG').RandomClass):
raise TypeError('Engine type not recognized! {}'.format(type(eng)))
return eng
def randomPerpendicularVector(vector):
"""
Finds a random vector perpendicular to the given vector
Uses definition of dot product orthogonality:
0 = sum_i (p_i * g_i)
p_i = rand() forall i != n
p_n = -1/g_n * sum_i(p_i * g_i) forall i != n
@ In, vector, np.array, ND vector
@ Out, perp, np.array, perpendicular vector
"""
# sanity check
numNonZero = np.count_nonzero(vector)
if not numNonZero:
raise RuntimeError('Provided vector is the zero vector!')
N = len(vector)
indices = np.arange(N)
nonZeroMap = vector != 0
# choose a random NONZERO index to be dependent (don't divide by zero, mate)
depIndex = indices[nonZeroMap][randomIntegers(0, numNonZero - 1, None)]
# random values for all but chosen variable
perp = randomNormal(N)
# cheat some math, zero out the random index term by setting the perp value to 0
perp[depIndex] = 0
dotProd = np.dot(vector, perp)
perp[depIndex] = - dotProd / vector[depIndex]
return perp
| joshua-cogliati-inl/raven | framework/utils/randomUtils.py | Python | apache-2.0 | 15,561 |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
One repository to update them all
On mbed.org the mbed SDK is split up in multiple repositories, this script takes
care of updating them all.
"""
import sys
from copy import copy
from os import walk, remove, makedirs
from os.path import join, abspath, dirname, relpath, exists, isfile
from shutil import copyfile
from optparse import OptionParser
import re
import string
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from workspace_tools.settings import MBED_ORG_PATH, MBED_ORG_USER, BUILD_DIR
from workspace_tools.paths import *
from workspace_tools.utils import run_cmd
MBED_URL = "mbed.org"
MBED_USER = "mbed_official"
changed = []
push_remote = True
quiet = False
commit_msg = ''
# Code that does have a mirror in the mbed SDK
# Tuple data: (repo_name, list_of_code_dirs, [team])
# team is optional - if not specified, the code is published under mbed_official
OFFICIAL_CODE = (
("mbed-dev" , MBED_BASE),
("mbed-rtos", RTOS),
("mbed-dsp" , DSP),
("mbed-rpc" , MBED_RPC),
("lwip" , LWIP_SOURCES+"/lwip"),
("lwip-sys", LWIP_SOURCES+"/lwip-sys"),
("Socket" , LWIP_SOURCES+"/Socket"),
("lwip-eth" , ETH_SOURCES+"/lwip-eth"),
("EthernetInterface", ETH_SOURCES+"/EthernetInterface"),
("USBDevice", USB),
("USBHost" , USB_HOST),
("CellularModem", CELLULAR_SOURCES),
("CellularUSBModem", CELLULAR_USB_SOURCES),
("UbloxUSBModem", UBLOX_SOURCES),
("UbloxModemHTTPClientTest", [TEST_DIR+"/net/cellular/http/common", TEST_DIR+"/net/cellular/http/ubloxusb"]),
("UbloxModemSMSTest", [TEST_DIR+"/net/cellular/sms/common", TEST_DIR+"/net/cellular/sms/ubloxusb"]),
("FATFileSystem", FAT_FS, "mbed-official"),
)
# Code that does have dependencies to libraries should point to
# the latest revision. By default, they point to a specific revision.
CODE_WITH_DEPENDENCIES = (
# Libraries
"EthernetInterface",
# RTOS Examples
"rtos_basic",
"rtos_isr",
"rtos_mail",
"rtos_mutex",
"rtos_queue",
"rtos_semaphore",
"rtos_signals",
"rtos_timer",
# Net Examples
"TCPEchoClient",
"TCPEchoServer",
"TCPSocket_HelloWorld",
"UDPSocket_HelloWorld",
"UDPEchoClient",
"UDPEchoServer",
"BroadcastReceive",
"BroadcastSend",
# mbed sources
"mbed-src-program",
)
# A list of regular expressions that will be checked against each directory
# name and skipped if they match.
IGNORE_DIRS = (
)
IGNORE_FILES = (
'COPYING',
'\.md',
"\.lib",
"\.bld"
)
def ignore_path(name, reg_exps):
for r in reg_exps:
if re.search(r, name):
return True
return False
class MbedRepository:
@staticmethod
def run_and_print(command, cwd):
stdout, _, _ = run_cmd(command, wd=cwd, redirect=True)
print(stdout)
def __init__(self, name, team = None):
self.name = name
self.path = join(MBED_ORG_PATH, name)
if team is None:
self.url = "http://" + MBED_URL + "/users/" + MBED_USER + "/code/%s/"
else:
self.url = "http://" + MBED_URL + "/teams/" + team + "/code/%s/"
if not exists(self.path):
# Checkout code
if not exists(MBED_ORG_PATH):
makedirs(MBED_ORG_PATH)
self.run_and_print(['hg', 'clone', self.url % name], cwd=MBED_ORG_PATH)
else:
# Update
self.run_and_print(['hg', 'pull'], cwd=self.path)
self.run_and_print(['hg', 'update'], cwd=self.path)
def publish(self):
# The maintainer has to evaluate the changes first and explicitly accept them
self.run_and_print(['hg', 'addremove'], cwd=self.path)
stdout, _, _ = run_cmd(['hg', 'status'], wd=self.path)
if stdout == '':
print "No changes"
return False
print stdout
if quiet:
commit = 'Y'
else:
commit = raw_input(push_remote and "Do you want to commit and push? Y/N: " or "Do you want to commit? Y/N: ")
if commit == 'Y':
args = ['hg', 'commit', '-u', MBED_ORG_USER]
if commit_msg:
args = args + ['-m', commit_msg]
self.run_and_print(args, cwd=self.path)
if push_remote:
self.run_and_print(['hg', 'push'], cwd=self.path)
return True
# Check if a file is a text file or a binary file
# Taken from http://code.activestate.com/recipes/173220/
text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
def is_text_file(filename):
block_size = 1024
def istext(s):
if "\0" in s:
return 0
if not s: # Empty files are considered text
return 1
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = s.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if float(len(t))/len(s) > 0.30:
return 0
return 1
with open(filename) as f:
res = istext(f.read(block_size))
return res
# Return the line ending type for the given file ('cr' or 'crlf')
def get_line_endings(f):
examine_size = 1024
try:
tf = open(f, "rb")
lines, ncrlf = tf.readlines(examine_size), 0
tf.close()
for l in lines:
if l.endswith("\r\n"):
ncrlf = ncrlf + 1
return 'crlf' if ncrlf > len(lines) >> 1 else 'cr'
except:
return 'cr'
# Copy file to destination, but preserve destination line endings if possible
# This prevents very annoying issues with huge diffs that appear because of
# differences in line endings
def copy_with_line_endings(sdk_file, repo_file):
if not isfile(repo_file):
copyfile(sdk_file, repo_file)
return
is_text = is_text_file(repo_file)
if is_text:
sdk_le = get_line_endings(sdk_file)
repo_le = get_line_endings(repo_file)
if not is_text or sdk_le == repo_le:
copyfile(sdk_file, repo_file)
else:
print "Converting line endings in '%s' to '%s'" % (abspath(repo_file), repo_le)
f = open(sdk_file, "rb")
data = f.read()
f.close()
f = open(repo_file, "wb")
data = data.replace("\r\n", "\n") if repo_le == 'cr' else data.replace('\n','\r\n')
f.write(data)
f.close()
def visit_files(path, visit):
for root, dirs, files in walk(path):
# Ignore hidden directories
for d in copy(dirs):
full = join(root, d)
if d.startswith('.'):
dirs.remove(d)
if ignore_path(full, IGNORE_DIRS):
print "Skipping '%s'" % full
dirs.remove(d)
for file in files:
if ignore_path(file, IGNORE_FILES):
continue
visit(join(root, file))
def update_repo(repo_name, sdk_paths, team_name):
repo = MbedRepository(repo_name, team_name)
# copy files from mbed SDK to mbed_official repository
def visit_mbed_sdk(sdk_file):
repo_file = join(repo.path, relpath(sdk_file, sdk_path))
repo_dir = dirname(repo_file)
if not exists(repo_dir):
makedirs(repo_dir)
copy_with_line_endings(sdk_file, repo_file)
for sdk_path in sdk_paths:
visit_files(sdk_path, visit_mbed_sdk)
# remove repository files that do not exist in the mbed SDK
def visit_repo(repo_file):
for sdk_path in sdk_paths:
sdk_file = join(sdk_path, relpath(repo_file, repo.path))
if exists(sdk_file):
break
else:
remove(repo_file)
print "remove: %s" % repo_file
visit_files(repo.path, visit_repo)
if repo.publish():
changed.append(repo_name)
def update_code(repositories):
for r in repositories:
repo_name, sdk_dir = r[0], r[1]
team_name = r[2] if len(r) == 3 else None
print '\n=== Updating "%s" ===' % repo_name
sdk_dirs = [sdk_dir] if type(sdk_dir) != type([]) else sdk_dir
update_repo(repo_name, sdk_dirs, team_name)
def update_single_repo(repo):
repos = [r for r in OFFICIAL_CODE if r[0] == repo]
if not repos:
print "Repository '%s' not found" % repo
else:
update_code(repos)
def update_dependencies(repositories):
for repo_name in repositories:
print '\n=== Updating "%s" ===' % repo_name
repo = MbedRepository(repo_name)
# point to the latest libraries
def visit_repo(repo_file):
with open(repo_file, "r") as f:
url = f.read()
with open(repo_file, "w") as f:
f.write(url[:(url.rindex('/')+1)])
visit_files(repo.path, visit_repo, None, MBED_REPO_EXT)
if repo.publish():
changed.append(repo_name)
def update_mbed():
update_repo("mbed", [join(BUILD_DIR, "mbed")], None)
def do_sync(options):
global push_remote, quiet, commit_msg, changed
push_remote = not options.nopush
quiet = options.quiet
commit_msg = options.msg
chnaged = []
if options.code:
update_code(OFFICIAL_CODE)
if options.dependencies:
update_dependencies(CODE_WITH_DEPENDENCIES)
if options.mbed:
update_mbed()
if options.repo:
update_single_repo(options.repo)
if changed:
print "Repositories with changes:", changed
return changed
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--code",
action="store_true", default=False,
help="Update the mbed_official code")
parser.add_option("-d", "--dependencies",
action="store_true", default=False,
help="Update the mbed_official code dependencies")
parser.add_option("-m", "--mbed",
action="store_true", default=False,
help="Release a build of the mbed library")
parser.add_option("-n", "--nopush",
action="store_true", default=False,
help="Commit the changes locally only, don't push them")
parser.add_option("", "--commit_message",
action="store", type="string", default='', dest='msg',
help="Commit message to use for all the commits")
parser.add_option("-r", "--repository",
action="store", type="string", default='', dest='repo',
help="Synchronize only the given repository")
parser.add_option("-q", "--quiet",
action="store_true", default=False,
help="Don't ask for confirmation before commiting or pushing")
(options, args) = parser.parse_args()
do_sync(options)
| bikeNomad/mbed | workspace_tools/synch.py | Python | apache-2.0 | 11,453 |
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GradingRecord (Model) query functions.
"""
__authors__ = [
'"Lennard de Rijk" <[email protected]>',
]
from google.appengine.ext import db
from soc.logic.models import base
from soc.modules.gsoc.logic.models.survey_record import grading_logic
from soc.modules.gsoc.logic.models.survey_record import project_logic
import soc.modules.gsoc.models.grading_record
class Logic(base.Logic):
"""Logic methods for the GradingRecord model.
"""
def __init__(self,
model=soc.modules.gsoc.models.grading_record.GradingRecord,
base_model=None, scope_logic=None):
"""Defines the name, key_name and model for this entity.
"""
super(Logic, self).__init__(model=model, base_model=base_model,
scope_logic=scope_logic, id_based=True)
def updateOrCreateRecordsFor(self, survey_group, project_entities):
"""Updates or creates a GradingRecord in a batch.
Args:
survey_group: GradingSurveyGroup entity
project_entities: list of project_entities which to process
"""
records_to_store = []
query_fields = {'grading_survey_group': survey_group}
for project_entity in project_entities:
# set a new project to query for
query_fields['project'] = project_entity
# try to retrieve an existing record
record_entity = self.getForFields(query_fields, unique=True)
# retrieve the fields that should be set
record_fields = self.getFieldsForGradingRecord(project_entity,
survey_group,
record_entity)
if not record_entity and project_entity.status in ['failed', 'invalid'] \
and not record_fields['mentor_record'] \
and not record_fields['student_record']:
# Don't create a new GradingRecord for an already failed project which
# has no records attached. Because it does not matter.
continue
if record_entity:
# update existing GradingRecord
for key,value in record_fields.iteritems():
setattr(record_entity, key, value)
else:
# create a new GradingRecord
record_entity = self.getModel()(**record_fields)
# prepare the new/updated record for storage
records_to_store.append(record_entity)
# batch put and return the entities
return db.put(records_to_store)
def getFieldsForGradingRecord(self, project, survey_group,
record_entity=None):
"""Returns the fields for a GradingRecord.
See GradingRecord model for description of the grade_decision value.
Args:
project: Project entity
survey_group: a GradingSurveyGroup entity
record_entity: an optional GradingRecord entity
Returns:
Dict containing the fields that should be set on a GradingRecord for this
GradingSurveyGroup and StudentProject
"""
# retrieve the two Surveys, student_survey might be None
grading_survey = survey_group.grading_survey
student_survey = survey_group.student_survey
# retrieve a GradingSurveyRecord
survey_record_fields = {'project': project,
'survey': grading_survey}
grading_survey_record = grading_logic.getForFields(survey_record_fields,
unique=True)
if student_survey:
# retrieve ProjectSurveyRecord
survey_record_fields['survey'] = student_survey
project_survey_record = project_logic.getForFields(survey_record_fields,
unique=True)
else:
project_survey_record = None
# set the necessary fields
fields = {'grading_survey_group': survey_group,
'project': project,
'mentor_record': grading_survey_record,
'student_record': project_survey_record}
if not record_entity or not record_entity.locked:
# find grading decision for new or unlocked records
if not grading_survey_record:
# no record found, return undecided
grade_decision = 'undecided'
elif not student_survey or project_survey_record:
# if the grade is True then pass else fail
grade_decision = 'pass' if grading_survey_record.grade else 'fail'
else:
# no ProjectSurveyRecord on file while there is a survey to be taken
grade_decision = 'fail'
fields['grade_decision'] = grade_decision
# return the fields that should be set for a GradingRecord
return fields
logic = Logic()
| MatthewWilkes/mw4068-packaging | src/melange/src/soc/modules/gsoc/logic/models/grading_record.py | Python | apache-2.0 | 5,231 |
"""
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import unittest
import time
from sonmanobase.messaging import ManoBrokerConnection, ManoBrokerRequestResponseConnection
# TODO the active waiting for messages should be replaced by threading.Event() functionality
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._message_buffer = list()
self._message_buffer.append(list())
self._message_buffer.append(list())
self.m = None
def tearDown(self):
self.m.stop_connection()
self.m.stop_threads()
del self.m
def _simple_subscribe_cbf1(self, ch, method, props, body):
self.assertIsNotNone(props.app_id)
self.assertIsNotNone(props.headers)
self.assertIsNotNone(props.content_type)
self.waiting = 0
self._message_buffer[0].append(body)
print("SUBSCRIBE CBF1: %s" % body)
def _simple_subscribe_cbf2(self, ch, method, props, body):
self.assertIsNotNone(props.app_id)
self.assertIsNotNone(props.headers)
self.assertIsNotNone(props.content_type)
self.waiting = 0
self._message_buffer[1].append(body)
print("SUBSCRIBE CBF2: %s" % body)
def _simple_request_echo_cbf(self, ch, method, props, body):
self.assertIsNotNone(props.app_id)
self.assertIsNotNone(props.reply_to)
self.assertIsNotNone(props.correlation_id)
self.assertIsNotNone(props.headers)
self.assertIsNotNone(props.content_type)
print("REQUEST ECHO CBF: %s" % body)
return body
def wait_for_messages(self, buffer=0, n_messages=1, timeout=5):
"""
Helper to deal with async messaging system.
Waits until a message is written to self._last_message
or until a timeout is reached.
:param timeout: seconds to wait
:return:
"""
self.waiting = 0
while len(self._message_buffer[buffer]) < n_messages and self.waiting < timeout:
time.sleep(0.01)
self.waiting += 0.01
if not self.waiting < timeout:
raise Exception("Message lost. Subscription timeout reached. Buffer: %r" % self._message_buffer[buffer])
return self._message_buffer[buffer]
def wait_for_particular_messages(self, message, buffer=0, timeout=5):
"""
Helper to deal with async messaging system.
Waits until a the specified message can be found in the buffer.
:param timeout: seconds to wait
:return:
"""
self.waiting = 0
while message not in self._message_buffer[buffer] and self.waiting < timeout:
time.sleep(0.01)
self.waiting += 0.01
if not self.waiting < timeout:
raise Exception(
"Message never found. Subscription timeout reached. Buffer: %r" % self._message_buffer[buffer])
return True
class TestManoBrokerConnection(BaseTestCase):
"""
Test basic broker interactions.
"""
def setUp(self):
super().setUp()
self.m = ManoBrokerConnection("test-basic-broker-connection")
#@unittest.skip("disabled")
def test_broker_connection(self):
"""
Test broker connection.
"""
self.m.publish("test.topic", "testmessage")
#@unittest.skip("disabled")
def test_broker_bare_publishsubscribe(self):
"""
Test publish / subscribe messaging.
"""
self.m.subscribe(self._simple_subscribe_cbf1, "test.topic")
time.sleep(1)
self.m.publish("test.topic", "testmsg")
self.assertEqual(self.wait_for_messages()[0], "testmsg")
#@unittest.skip("disabled")
def test_broker_multi_publish(self):
"""
Test publish / subscribe messaging.
"""
self.m.subscribe(self._simple_subscribe_cbf1, "test.topic")
time.sleep(1)
for i in range(0, 100):
self.m.publish("test.topic", "%d" % i)
self.assertEqual(self.wait_for_messages(n_messages=100)[99], "99")
#@unittest.skip("disabled")
def test_broker_doulbe_subscription(self):
"""
Test publish / subscribe messaging.
"""
self.m.subscribe(self._simple_subscribe_cbf1, "test.topic")
self.m.subscribe(self._simple_subscribe_cbf2, "test.topic")
time.sleep(1)
for i in range(0, 100):
self.m.publish("test.topic", "%d" % i)
self.assertEqual(self.wait_for_messages(buffer=0, n_messages=100)[99], "99")
self.assertEqual(self.wait_for_messages(buffer=1, n_messages=100)[99], "99")
class TestManoBrokerRequestResponseConnection(BaseTestCase):
"""
Test async. request/response and notification functionality.
"""
def setUp(self):
super().setUp()
self.m = ManoBrokerRequestResponseConnection("test-request-response-broker-connection")
#@unittest.skip("disabled")
def test_broker_connection(self):
"""
Test broker connection.
"""
self.m.notify("test.topic2", "simplemessage")
#@unittest.skip("disabled")
def test_request_response(self):
"""
Test request/response messaging pattern.
"""
self.m.register_async_endpoint(self._simple_request_echo_cbf, "test.request")
time.sleep(0.5) # give broker some time to register subscriptions
self.m.call_async(self._simple_subscribe_cbf1, "test.request", "ping-pong")
self.assertEqual(self.wait_for_messages()[0], "ping-pong")
#@unittest.skip("disabled")
def test_request_response_sync(self):
"""
Test request/response messaging pattern (synchronous).
"""
self.m.register_async_endpoint(self._simple_request_echo_cbf, "test.request.sync")
time.sleep(0.5) # give broker some time to register subscriptions
result = self.m.call_sync("test.request.sync", "ping-pong")
self.assertTrue(len(result) == 4)
self.assertEqual(str(result[3]), "ping-pong")
#@unittest.skip("disabled")
def test_notification(self):
"""
Test notification messaging pattern.
"""
self.m.register_notification_endpoint(self._simple_subscribe_cbf1, "test.notification")
time.sleep(0.5) # give broker some time to register subscriptions
self.m.notify("test.notification", "my-notification")
self.assertTrue(self.wait_for_particular_messages("my-notification"))
#@unittest.skip("disabled")
def test_notification_pub_sub_mix(self):
"""
Test notification messaging pattern mixed with basic pub/sub calls.
"""
self.m.register_notification_endpoint(self._simple_subscribe_cbf1, "test.notification1")
self.m.subscribe(self._simple_subscribe_cbf1, "test.notification2")
time.sleep(0.5) # give broker some time to register subscriptions
# send publish to notify endpoint
self.m.publish("test.notification1", "my-notification1")
self.assertEqual(self.wait_for_messages()[0], "my-notification1")
# send notify to subscribe endpoint
self.m.notify("test.notification2", "my-notification2")
#res = self.wait_for_messages(n_messages=2)
self.assertTrue(self.wait_for_particular_messages("my-notification1"))
self.assertTrue(self.wait_for_particular_messages("my-notification2"))
#@unittest.skip("disabled")
def test_double_subscriptions(self):
"""
Ensure that messages are delivered to all subscriptions of a topic.
(e.g. identifies queue setup problems)
:return:
"""
self.m.subscribe(self._simple_subscribe_cbf1, "test.interleave")
self.m.subscribe(self._simple_subscribe_cbf2, "test.interleave")
time.sleep(0.5)
# send publish to notify endpoint
self.m.publish("test.interleave", "my-notification1")
# enusre that it is received by each subscription
self.assertTrue(self.wait_for_particular_messages("my-notification1", buffer=0))
self.assertTrue(self.wait_for_particular_messages("my-notification1", buffer=1))
#@unittest.skip("disabled")
def test_interleaved_subscriptions(self):
"""
Ensure that interleaved subscriptions to the same topic do not lead to problems.
:return:
"""
self.m.subscribe(self._simple_subscribe_cbf2, "test.interleave2")
time.sleep(0.5)
# do a async call on the same topic
self.m.register_async_endpoint(self._simple_request_echo_cbf, "test.interleave2")
time.sleep(0.5) # give broker some time to register subscriptions
self.m.call_async(self._simple_subscribe_cbf1, "test.interleave2", "ping-pong")
self.assertTrue(self.wait_for_particular_messages("ping-pong"))
# send publish to notify endpoint
self.m.publish("test.interleave2", "my-notification1")
time.sleep(0.5)
# ensure that the subcriber still gets the message (and also sees the one from async_call)
self.assertTrue(self.wait_for_particular_messages("ping-pong"))
self.assertTrue(self.wait_for_particular_messages("my-notification1", buffer=1))
if __name__ == "__main__":
#unittest.main()
t = TestManoBrokerRequestResponseConnection()
t.setUp()
t.test_request_response()
t.tearDown()
| sonata-nfv/son-qual | qual-stress-mano-framework/son-mano-base/test/test_messaging.py | Python | apache-2.0 | 10,419 |
__author__ = 'Brian Wickman'
from process_provider_ps import ProcessProvider_PS
from process_provider_procfs import ProcessProvider_Procfs
class ProcessProviderFactory(object):
"""
A factory for producing platform-appropriate ProcessProviders.
Typical use-cases:
Import
>>> from twitter.common.process import ProcessProviderFactory
>>> ps = ProcessProviderFactory.get()
Run a collection of all pids
>>> ps.collect_all()
Get a ProcessHandle to the init process
>>> init = ps.get_handle(1)
>>> init
<twitter.common.process.process_handle_ps.ProcessHandlePs object at 0x1004ad950>
Get stats
>>> init.cpu_time()
7980.0600000000004
>>> init.user()
'root'
>>> init.wall_time()
6485509.0
>>> init.pid()
1
>>> init.ppid()
0
Refresh stats
>>> init.refresh()
>>> init.cpu_time()
7982.9700000000003
Introspect the process tree
>>> list(ps.children_of(init.pid()))
[10, 11, 12, 13, 14, 15, 16, 17, 26, 32, 37, 38, 39, 40, 42, 43, 45,
51, 59, 73, 108, 140, 153, 157, 162, 166, 552, 1712, 1968, 38897,
58862, 63321, 64513, 66458, 68598, 78610, 85633, 91019, 97271]
Aggregations
>>> sum(map(lambda pid: ps.get_handle(pid).cpu_time(), ps.children_of(init.pid())))
228574.40999999995
Collect data from a subset of processes
>>> ps.collect_set(ps.children_of(init.pid()))
Re-evaluate
>>> sum(map(lambda pid: ps.get_handle(pid).cpu_time(), ps.children_of(init.pid())))
228642.19999999998
"""
PROVIDERS = [
ProcessProvider_Procfs,
ProcessProvider_PS
]
@staticmethod
def get():
"""
Return a platform-specific ProcessProvider.
"""
for provider in ProcessProviderFactory.PROVIDERS:
if provider._platform_compatible():
return provider()
| foursquare/commons-old | src/python/twitter/common/process/__init__.py | Python | apache-2.0 | 1,910 |
import time
from datetime import datetime
def format_ts_from_float(ts):
return int(ts) * 1000000000
def format_ts_from_date(ts):
return format_ts_from_float(time.mktime(ts.timetuple()))
def format_ts_from_str(ts, pattern='%Y-%m-%d %H:%M:%S'):
return format_ts_from_date(datetime.strptime(ts, pattern))
def format_ts_from_last_modified(ts, pattern='%a, %d %b %Y %H:%M:%S %Z'):
ts = datetime.strptime(ts, pattern)
return int(time.mktime(ts.timetuple()) * 1000)
| rdo-infra/ci-config | ci-scripts/infra-setup/roles/rrcockpit/files/telegraf_py3/influxdb_utils.py | Python | apache-2.0 | 487 |
"""Handle automations."""
# Copyright 2013-2017 The Home Assistant Authors
# https://github.com/home-assistant/home-assistant/blob/master/LICENSE.md
# This file was modified by The Camacq Authors.
import logging
from collections import deque
from functools import partial
import voluptuous as vol
from camacq.exceptions import TemplateError
from camacq.helper import BASE_ACTION_SCHEMA, get_module, has_at_least_one_key
from camacq.helper.template import make_template, render_template
from camacq.const import CAMACQ_STOP_EVENT, CONF_DATA, CONF_ID
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATIONS = "automations"
CONF_ACTION = "action"
CONF_CONDITION = "condition"
CONF_CONDITIONS = "conditions"
CONF_NAME = "name"
CONF_TRIGGER = "trigger"
CONF_TYPE = "type"
ENABLED = "enabled"
NAME = "name"
ACTION_DELAY = "delay"
ACTION_TOGGLE = "toggle"
DATA_AUTOMATIONS = "automations"
TRIGGER_ACTION_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_TYPE): vol.Coerce(str),
vol.Required(CONF_ID): vol.Coerce(str),
vol.Optional(CONF_DATA, default={}): dict,
}
],
)
CONDITION_SCHEMA = vol.All(
has_at_least_one_key(CONF_TYPE, CONF_CONDITION),
{
# pylint: disable=no-value-for-parameter
vol.Inclusive(CONF_TYPE, "condition"): vol.All(
vol.Upper, vol.In(["AND", "OR"])
),
vol.Inclusive(CONF_CONDITIONS, "condition"): [
# pylint: disable=unnecessary-lambda
lambda value: CONDITION_SCHEMA(value)
],
vol.Exclusive(CONF_CONDITION, "condition"): vol.Coerce(str),
},
)
CONFIG_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_NAME): vol.Coerce(str),
vol.Required(CONF_TRIGGER): TRIGGER_ACTION_SCHEMA,
vol.Required(CONF_ACTION): TRIGGER_ACTION_SCHEMA,
vol.Optional(
CONF_CONDITION, default={CONF_CONDITION: "true"}
): CONDITION_SCHEMA,
}
]
)
async def setup_module(center, config):
"""Set up automations package.
Parameters
----------
center : Center instance
The Center instance.
config : dict
The config dict.
"""
_process_automations(center, config)
automations = center.data[DATA_AUTOMATIONS]
async def handle_action(**kwargs):
"""Enable or disable an automation."""
name = kwargs[NAME]
automation = automations[name]
enabled = kwargs.get(ENABLED, not automation.enabled)
if enabled:
automation.enable()
else:
automation.disable()
toggle_action_schema = BASE_ACTION_SCHEMA.extend(
{
vol.Required(NAME): vol.All(vol.Coerce(str), vol.In(automations)),
ENABLED: vol.Boolean(), # pylint: disable=no-value-for-parameter
}
)
# register action to enable/disable automation
center.actions.register(
"automations", ACTION_TOGGLE, handle_action, toggle_action_schema
)
def _process_automations(center, config):
"""Process automations from config."""
automations = center.data.setdefault(DATA_AUTOMATIONS, {})
conf = config[CONF_AUTOMATIONS]
for block in conf:
name = block[CONF_NAME]
_LOGGER.debug("Setting up automation %s", name)
action_sequence = _get_actions(center, block[CONF_ACTION])
cond_func = _process_condition(center, block[CONF_CONDITION])
# use partial to get a function with args to call later
attach_triggers = partial(_process_trigger, center, block[CONF_TRIGGER])
automations[name] = Automation(
center, name, attach_triggers, cond_func, action_sequence
)
def _get_actions(center, config_block):
"""Return actions."""
actions = (TemplateAction(center, action_conf) for action_conf in config_block)
return ActionSequence(center, actions)
def _process_condition(center, config_block):
"""Return a function that parses the condition."""
if CONF_TYPE in config_block:
checks = []
condition_type = config_block[CONF_TYPE]
conditions = config_block[CONF_CONDITIONS]
for cond in conditions:
check = _process_condition(center, cond)
checks.append(check)
return make_checker(condition_type, checks)
data = config_block[CONF_CONDITION]
template = make_template(center, data)
return partial(render_template, template)
def make_checker(condition_type, checks):
"""Return a function to check condition."""
def check_condition(variables):
"""Return True if all or any condition(s) pass."""
if condition_type.lower() == "and":
return all(template_check(check(variables)) for check in checks)
if condition_type.lower() == "or":
return any(template_check(check(variables)) for check in checks)
return False
return check_condition
def template_check(value):
"""Check if a rendered template string equals true.
If value is not a string, return value as is.
"""
if isinstance(value, str):
return value.lower() == "true"
return value
def _process_trigger(center, config_block, trigger):
"""Process triggers for an automation."""
remove_funcs = []
for conf in config_block:
trigger_id = conf[CONF_ID]
trigger_type = conf[CONF_TYPE]
trigger_mod = get_module(__name__, trigger_type)
if not trigger_mod:
continue
_LOGGER.debug("Setting up trigger %s", trigger_id)
remove = trigger_mod.handle_trigger(center, conf, trigger)
if not remove:
_LOGGER.error("Setting up trigger %s failed", trigger_id)
continue
remove_funcs.append(remove)
if not remove_funcs:
return None
def remove_triggers():
"""Remove attached triggers."""
for remove in remove_funcs:
remove()
return remove_triggers
class Automation:
"""Automation class."""
# pylint: disable=too-many-arguments
def __init__(
self, center, name, attach_triggers, cond_func, action_sequence, enabled=True
):
"""Set up instance."""
self._center = center
self.name = name
self.enabled = False
self._action_sequence = action_sequence
self._attach_triggers = attach_triggers
self._detach_triggers = None
self._cond_func = cond_func
if enabled:
self.enable()
def __repr__(self):
"""Return the representation."""
return (
f"Automation(center={self._center}, name={self.name}, "
f"attach_triggers={self._attach_triggers}, cond_func={self._cond_func}, "
f"action_sequence={self._action_sequence}, enabled={self.enabled})"
)
def enable(self):
"""Enable automation."""
if self.enabled:
return
self._detach_triggers = self._attach_triggers(self.trigger)
self.enabled = True
def disable(self):
"""Disable automation."""
if not self.enabled:
return
if self._detach_triggers is not None:
self._detach_triggers()
self._detach_triggers = None
self.enabled = False
async def trigger(self, variables):
"""Run actions of this automation."""
variables["samples"] = self._center.samples
_LOGGER.debug("Triggered automation %s", self.name)
try:
cond = self._cond_func(variables)
except TemplateError as exc:
_LOGGER.error("Failed to render condition for %s: %s", self.name, exc)
return
if cond:
_LOGGER.debug("Condition passed for %s", self.name)
await self._action_sequence(variables)
class ActionSequence:
"""Represent a sequence of actions."""
# pylint: disable=too-few-public-methods
def __init__(self, center, actions):
"""Set up instance."""
self._center = center
self.actions = list(actions) # copy to list to make sure it's a list
async def __call__(self, variables):
"""Start action sequence."""
waiting = deque(self.actions)
while waiting:
action = waiting.popleft()
if action.action_type == "automations" and action.action_id == ACTION_DELAY:
rendered_kwargs = action.render(variables)
seconds = rendered_kwargs.get("seconds")
self.delay(float(seconds), variables, waiting)
else:
_LOGGER.debug(
"Calling action %s.%s", action.action_type, action.action_id
)
await action(variables)
def delay(self, seconds, variables, waiting):
"""Delay action sequence.
Parameters
----------
seconds : float
A time interval to delay the pending action sequence.
variables : dict
A dict of template variables.
"""
sequence = ActionSequence(self._center, waiting)
callback = partial(self._center.create_task, sequence(variables))
waiting.clear()
_LOGGER.info("Action delay for %s seconds", seconds)
callback = self._center.loop.call_later(seconds, callback)
async def cancel_pending_actions(center, event):
"""Cancel pending actions."""
callback.cancel()
self._center.bus.register(CAMACQ_STOP_EVENT, cancel_pending_actions)
class TemplateAction:
"""Representation of an action with template data."""
# pylint: disable=too-few-public-methods
def __init__(self, center, action_conf):
"""Set up instance."""
self._center = center
self.action_id = action_conf[CONF_ID]
self.action_type = action_conf[CONF_TYPE]
action_data = action_conf[CONF_DATA]
self.template = make_template(center, action_data)
async def __call__(self, variables=None):
"""Execute action with optional template variables."""
try:
rendered = self.render(variables)
except TemplateError:
return
await self._center.actions.call(self.action_type, self.action_id, **rendered)
def render(self, variables):
"""Render the template with the kwargs for the action."""
variables = variables or {}
try:
rendered = render_template(self.template, variables)
except TemplateError as exc:
_LOGGER.error(
"Failed to render variables for %s.%s: %s",
self.action_type,
self.action_id,
exc,
)
raise
return rendered
| CellProfiling/cam_acq | camacq/plugins/automations/__init__.py | Python | apache-2.0 | 10,741 |
"""Helper methods to handle the time in Home Assistant."""
from __future__ import annotations
from contextlib import suppress
import datetime as dt
import re
from typing import Any, cast
import ciso8601
import pytz
import pytz.exceptions as pytzexceptions
import pytz.tzinfo as pytzinfo
from homeassistant.const import MATCH_ALL
DATE_STR_FORMAT = "%Y-%m-%d"
NATIVE_UTC = dt.timezone.utc
UTC = pytz.utc
DEFAULT_TIME_ZONE: dt.tzinfo = pytz.utc
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
r"[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
)
def set_default_time_zone(time_zone: dt.tzinfo) -> None:
"""Set a default time zone to be used when none is specified.
Async friendly.
"""
global DEFAULT_TIME_ZONE # pylint: disable=global-statement
# NOTE: Remove in the future in favour of typing
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone
def get_time_zone(time_zone_str: str) -> dt.tzinfo | None:
"""Get time zone from string. Return None if unable to determine.
Async friendly.
"""
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None
def utcnow() -> dt.datetime:
"""Get now in UTC time."""
return dt.datetime.now(NATIVE_UTC)
def now(time_zone: dt.tzinfo | None = None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def as_utc(dattim: dt.datetime) -> dt.datetime:
"""Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.
"""
if dattim.tzinfo == UTC:
return dattim
if dattim.tzinfo is None:
dattim = DEFAULT_TIME_ZONE.localize(dattim) # type: ignore
return dattim.astimezone(UTC)
def as_timestamp(dt_value: dt.datetime) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt: dt.datetime | None = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
def as_local(dattim: dt.datetime) -> dt.datetime:
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
if dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def utc_from_timestamp(timestamp: float) -> dt.datetime:
"""Return a UTC time from a timestamp."""
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp))
def start_of_local_day(dt_or_d: dt.date | dt.datetime | None = None) -> dt.datetime:
"""Return local datetime object of start of day from date or datetime."""
if dt_or_d is None:
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
else:
date = dt_or_d
return DEFAULT_TIME_ZONE.localize( # type: ignore
dt.datetime.combine(date, dt.time())
)
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
def parse_datetime(dt_str: str) -> dt.datetime | None:
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
with suppress(ValueError, IndexError):
return ciso8601.parse_datetime(dt_str)
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws: dict[str, Any] = match.groupdict()
if kws["microsecond"]:
kws["microsecond"] = kws["microsecond"].ljust(6, "0")
tzinfo_str = kws.pop("tzinfo")
tzinfo: dt.tzinfo | None = None
if tzinfo_str == "Z":
tzinfo = UTC
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo_str[0] == "-":
offset = -offset
tzinfo = dt.timezone(offset)
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws["tzinfo"] = tzinfo
return dt.datetime(**kws)
def parse_date(dt_str: str) -> dt.date | None:
"""Convert a date string to a date object."""
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError: # If dt_str did not match our format
return None
def parse_time(time_str: str) -> dt.time | None:
"""Parse a time string (00:20:00) into Time object.
Return None if invalid.
"""
parts = str(time_str).split(":")
if len(parts) < 2:
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2]) if len(parts) > 2 else 0
return dt.time(hour, minute, second)
except ValueError:
# ValueError if value cannot be converted to an int or not in range
return None
def get_age(date: dt.datetime) -> str:
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(number: int, unit: str) -> str:
"""Add "unit" if it's plural."""
if number == 1:
return f"1 {unit}"
return f"{number:d} {unit}s"
delta = (now() - date).total_seconds()
rounded_delta = round(delta)
units = ["second", "minute", "hour", "day", "month"]
factors = [60, 60, 24, 30, 12]
selected_unit = "year"
for i, next_factor in enumerate(factors):
if rounded_delta < next_factor:
selected_unit = units[i]
break
delta /= next_factor
rounded_delta = round(delta)
return formatn(rounded_delta, selected_unit)
def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> list[int]:
"""Parse the time expression part and return a list of times to match."""
if parameter is None or parameter == MATCH_ALL:
res = list(range(min_value, max_value + 1))
elif isinstance(parameter, str):
if parameter.startswith("/"):
parameter = int(parameter[1:])
res = [x for x in range(min_value, max_value + 1) if x % parameter == 0]
else:
res = [int(parameter)]
elif not hasattr(parameter, "__iter__"):
res = [int(parameter)]
else:
res = sorted(int(x) for x in parameter)
for val in res:
if val < min_value or val > max_value:
raise ValueError(
f"Time expression '{parameter}': parameter {val} out of range "
f"({min_value} to {max_value})"
)
return res
def find_next_time_expression_time(
now: dt.datetime, # pylint: disable=redefined-outer-name
seconds: list[int],
minutes: list[int],
hours: list[int],
) -> dt.datetime:
"""Find the next datetime from now for which the time expression matches.
The algorithm looks at each time unit separately and tries to find the
next one that matches for each. If any of them would roll over, all
time units below that are reset to the first matching value.
Timezones are also handled (the tzinfo of the now object is used),
including daylight saving time.
"""
if not seconds or not minutes or not hours:
raise ValueError("Cannot find a next time: Time expression never matches!")
def _lower_bound(arr: list[int], cmp: int) -> int | None:
"""Return the first value in arr greater or equal to cmp.
Return None if no such value exists.
"""
left = 0
right = len(arr)
while left < right:
mid = (left + right) // 2
if arr[mid] < cmp:
left = mid + 1
else:
right = mid
if left == len(arr):
return None
return arr[left]
result = now.replace(microsecond=0)
# Match next second
next_second = _lower_bound(seconds, result.second)
if next_second is None:
# No second to match in this minute. Roll-over to next minute.
next_second = seconds[0]
result += dt.timedelta(minutes=1)
result = result.replace(second=next_second)
# Match next minute
next_minute = _lower_bound(minutes, result.minute)
if next_minute != result.minute:
# We're in the next minute. Seconds needs to be reset.
result = result.replace(second=seconds[0])
if next_minute is None:
# No minute to match in this hour. Roll-over to next hour.
next_minute = minutes[0]
result += dt.timedelta(hours=1)
result = result.replace(minute=next_minute)
# Match next hour
next_hour = _lower_bound(hours, result.hour)
if next_hour != result.hour:
# We're in the next hour. Seconds+minutes needs to be reset.
result = result.replace(second=seconds[0], minute=minutes[0])
if next_hour is None:
# No minute to match in this day. Roll-over to next day.
next_hour = hours[0]
result += dt.timedelta(days=1)
result = result.replace(hour=next_hour)
if result.tzinfo is None:
return result
# Now we need to handle timezones. We will make this datetime object
# "naive" first and then re-convert it to the target timezone.
# This is so that we can call pytz's localize and handle DST changes.
tzinfo: pytzinfo.DstTzInfo = UTC if result.tzinfo == NATIVE_UTC else result.tzinfo
result = result.replace(tzinfo=None)
try:
result = tzinfo.localize(result, is_dst=None)
except pytzexceptions.AmbiguousTimeError:
# This happens when we're leaving daylight saving time and local
# clocks are rolled back. In this case, we want to trigger
# on both the DST and non-DST time. So when "now" is in the DST
# use the DST-on time, and if not, use the DST-off time.
use_dst = bool(now.dst())
result = tzinfo.localize(result, is_dst=use_dst)
except pytzexceptions.NonExistentTimeError:
# This happens when we're entering daylight saving time and local
# clocks are rolled forward, thus there are local times that do
# not exist. In this case, we want to trigger on the next time
# that *does* exist.
# In the worst case, this will run through all the seconds in the
# time shift, but that's max 3600 operations for once per year
result = result.replace(tzinfo=tzinfo) + dt.timedelta(seconds=1)
return find_next_time_expression_time(result, seconds, minutes, hours)
result_dst = cast(dt.timedelta, result.dst())
now_dst = cast(dt.timedelta, now.dst()) or dt.timedelta(0)
if result_dst >= now_dst:
return result
# Another edge-case when leaving DST:
# When now is in DST and ambiguous *and* the next trigger time we *should*
# trigger is ambiguous and outside DST, the excepts above won't catch it.
# For example: if triggering on 2:30 and now is 28.10.2018 2:30 (in DST)
# we should trigger next on 28.10.2018 2:30 (out of DST), but our
# algorithm above would produce 29.10.2018 2:30 (out of DST)
# Step 1: Check if now is ambiguous
try:
tzinfo.localize(now.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# Step 2: Check if result of (now - DST) is ambiguous.
check = now - now_dst
check_result = find_next_time_expression_time(check, seconds, minutes, hours)
try:
tzinfo.localize(check_result.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# OK, edge case does apply. We must override the DST to DST-off
check_result = tzinfo.localize(check_result.replace(tzinfo=None), is_dst=False)
return check_result
| w1ll1am23/home-assistant | homeassistant/util/dt.py | Python | apache-2.0 | 12,636 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, List, Set, Tuple
from marshmallow import Schema
from sqlalchemy.orm import Session
from sqlalchemy.sql import select
from superset.charts.commands.importers.v1.utils import import_chart
from superset.charts.schemas import ImportV1ChartSchema
from superset.commands.importers.v1 import ImportModelsCommand
from superset.dashboards.commands.exceptions import DashboardImportError
from superset.dashboards.commands.importers.v1.utils import (
find_chart_uuids,
find_native_filter_datasets,
import_dashboard,
update_id_refs,
)
from superset.dashboards.dao import DashboardDAO
from superset.dashboards.schemas import ImportV1DashboardSchema
from superset.databases.commands.importers.v1.utils import import_database
from superset.databases.schemas import ImportV1DatabaseSchema
from superset.datasets.commands.importers.v1.utils import import_dataset
from superset.datasets.schemas import ImportV1DatasetSchema
from superset.models.dashboard import dashboard_slices
class ImportDashboardsCommand(ImportModelsCommand):
"""Import dashboards"""
dao = DashboardDAO
model_name = "dashboard"
prefix = "dashboards/"
schemas: Dict[str, Schema] = {
"charts/": ImportV1ChartSchema(),
"dashboards/": ImportV1DashboardSchema(),
"datasets/": ImportV1DatasetSchema(),
"databases/": ImportV1DatabaseSchema(),
}
import_error = DashboardImportError
# TODO (betodealmeida): refactor to use code from other commands
# pylint: disable=too-many-branches, too-many-locals
@staticmethod
def _import(
session: Session, configs: Dict[str, Any], overwrite: bool = False
) -> None:
# discover charts and datasets associated with dashboards
chart_uuids: Set[str] = set()
dataset_uuids: Set[str] = set()
for file_name, config in configs.items():
if file_name.startswith("dashboards/"):
chart_uuids.update(find_chart_uuids(config["position"]))
dataset_uuids.update(
find_native_filter_datasets(config.get("metadata", {}))
)
# discover datasets associated with charts
for file_name, config in configs.items():
if file_name.startswith("charts/") and config["uuid"] in chart_uuids:
dataset_uuids.add(config["dataset_uuid"])
# discover databases associated with datasets
database_uuids: Set[str] = set()
for file_name, config in configs.items():
if file_name.startswith("datasets/") and config["uuid"] in dataset_uuids:
database_uuids.add(config["database_uuid"])
# import related databases
database_ids: Dict[str, int] = {}
for file_name, config in configs.items():
if file_name.startswith("databases/") and config["uuid"] in database_uuids:
database = import_database(session, config, overwrite=False)
database_ids[str(database.uuid)] = database.id
# import datasets with the correct parent ref
dataset_info: Dict[str, Dict[str, Any]] = {}
for file_name, config in configs.items():
if (
file_name.startswith("datasets/")
and config["database_uuid"] in database_ids
):
config["database_id"] = database_ids[config["database_uuid"]]
dataset = import_dataset(session, config, overwrite=False)
dataset_info[str(dataset.uuid)] = {
"datasource_id": dataset.id,
"datasource_type": dataset.datasource_type,
"datasource_name": dataset.table_name,
}
# import charts with the correct parent ref
chart_ids: Dict[str, int] = {}
for file_name, config in configs.items():
if (
file_name.startswith("charts/")
and config["dataset_uuid"] in dataset_info
):
# update datasource id, type, and name
config.update(dataset_info[config["dataset_uuid"]])
chart = import_chart(session, config, overwrite=False)
chart_ids[str(chart.uuid)] = chart.id
# store the existing relationship between dashboards and charts
existing_relationships = session.execute(
select([dashboard_slices.c.dashboard_id, dashboard_slices.c.slice_id])
).fetchall()
# import dashboards
dashboard_chart_ids: List[Tuple[int, int]] = []
for file_name, config in configs.items():
if file_name.startswith("dashboards/"):
config = update_id_refs(config, chart_ids, dataset_info)
dashboard = import_dashboard(session, config, overwrite=overwrite)
for uuid in find_chart_uuids(config["position"]):
if uuid not in chart_ids:
break
chart_id = chart_ids[uuid]
if (dashboard.id, chart_id) not in existing_relationships:
dashboard_chart_ids.append((dashboard.id, chart_id))
# set ref in the dashboard_slices table
values = [
{"dashboard_id": dashboard_id, "slice_id": chart_id}
for (dashboard_id, chart_id) in dashboard_chart_ids
]
# pylint: disable=no-value-for-parameter # sqlalchemy/issues/4656
session.execute(dashboard_slices.insert(), values)
| apache/incubator-superset | superset/dashboards/commands/importers/v1/__init__.py | Python | apache-2.0 | 6,297 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import importlib
import os
import six
import ujson
import django.core.urlresolvers
from django.test import TestCase
from typing import List, Optional
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Stream
from zproject import urls
class PublicURLTest(ZulipTestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
# type: (str, List[str], int) -> None
for url in urls:
# e.g. self.client_post(url) if method is "post"
response = getattr(self, method)(url)
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
# type: () -> None
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# can't do so because this Django test mechanism doesn't go
# through Tornado.
denmark_stream_id = Stream.objects.get(name='Denmark').id
get_urls = {200: ["/accounts/home/", "/accounts/login/"
"/en/accounts/home/", "/ru/accounts/home/",
"/en/accounts/login/", "/ru/accounts/login/",
"/help/"],
302: ["/", "/en/", "/ru/"],
401: ["/json/streams/%d/members" % (denmark_stream_id,),
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
"/json/messages",
"/api/v1/streams",
],
404: ["/help/nonexistent"],
}
# Add all files in 'templates/zerver/help' directory (except for 'main.html' and
# 'index.md') to `get_urls['200']` list.
for doc in os.listdir('./templates/zerver/help'):
if doc.startswith(".") or '~' in doc or '#' in doc:
continue # nocoverage -- just here for convenience
if doc not in {'main.html', 'index.md', 'include'}:
get_urls[200].append('/help/' + os.path.splitext(doc)[0]) # Strip the extension.
post_urls = {200: ["/accounts/login/"],
302: ["/accounts/logout/"],
401: ["/json/messages",
"/json/invites",
"/json/subscriptions/exists",
"/api/v1/users/me/subscriptions/properties",
"/json/fetch_api_key",
"/json/users/me/pointer",
"/json/users/me/subscriptions",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
patch_urls = {
401: ["/json/settings"],
}
put_urls = {401: ["/json/users/me/pointer"],
}
for status_code, url_set in six.iteritems(get_urls):
self.fetch("client_get", url_set, status_code)
for status_code, url_set in six.iteritems(post_urls):
self.fetch("client_post", url_set, status_code)
for status_code, url_set in six.iteritems(patch_urls):
self.fetch("client_patch", url_set, status_code)
for status_code, url_set in six.iteritems(put_urls):
self.fetch("client_put", url_set, status_code)
def test_get_gcid_when_not_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
self.assertEqual('error', resp.json()['result'])
def test_get_gcid_when_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEqual(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % (
resp.status_code,))
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class URLResolutionTest(TestCase):
def get_callback_string(self, pattern):
# type: (django.core.urlresolvers.RegexURLPattern) -> Optional[str]
callback_str = hasattr(pattern, 'lookup_str') and 'lookup_str'
callback_str = callback_str or '_callback_str'
return getattr(pattern, callback_str, None)
def check_function_exists(self, module_name, view):
# type: (str, str) -> None
module = importlib.import_module(module_name)
self.assertTrue(hasattr(module, view), "View %s.%s does not exist" % (module_name, view))
# Tests that all views in urls.v1_api_and_json_patterns exist
def test_rest_api_url_resolution(self):
# type: () -> None
for pattern in urls.v1_api_and_json_patterns:
callback_str = self.get_callback_string(pattern)
if callback_str and hasattr(pattern, "default_args"):
for func_string in pattern.default_args.values():
if isinstance(func_string, tuple):
func_string = func_string[0]
module_name, view = func_string.rsplit('.', 1)
self.check_function_exists(module_name, view)
# Tests function-based views declared in urls.urlpatterns for
# whether the function exists. We at present do not test the
# class-based views.
def test_non_api_url_resolution(self):
# type: () -> None
for pattern in urls.urlpatterns:
callback_str = self.get_callback_string(pattern)
if callback_str:
(module_name, base_view) = callback_str.rsplit(".", 1)
self.check_function_exists(module_name, base_view)
| vaidap/zulip | zerver/tests/test_urls.py | Python | apache-2.0 | 6,615 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from eventlet import greenthread
import mock
from mox3 import mox
import os_xenapi
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import fixture as config_fixture
from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from nova.compute import flavors
from nova.compute import power_state
import nova.conf
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields as obj_fields
from nova import test
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.virt.xenapi import stubs
from nova.tests import uuidsentinel as uuids
from nova import utils
from nova.virt import hardware
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import vm_utils
CONF = nova.conf.CONF
XENSM_TYPE = 'xensm'
ISCSI_TYPE = 'iscsi'
def get_fake_connection_data(sr_type):
fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
'name_label': 'fake_storage',
'name_description': 'test purposes',
'server': 'myserver',
'serverpath': '/local/scratch/myname',
'sr_type': 'nfs',
'introduce_sr_keys': ['server',
'serverpath',
'sr_type'],
'vdi_uuid': 'falseVDI'},
ISCSI_TYPE: {'volume_id': 'fake_volume_id',
'target_lun': 1,
'target_iqn': 'fake_iqn:volume-fake_volume_id',
'target_portal': u'localhost:3260',
'target_discovered': False}, }
return fakes[sr_type]
@contextlib.contextmanager
def contextified(result):
yield result
def _fake_noop(*args, **kwargs):
return
class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
pass
class LookupTestCase(VMUtilsTestBase):
def setUp(self):
super(LookupTestCase, self).setUp()
self.session = self.mox.CreateMockAnything('Fake Session')
self.name_label = 'my_vm'
def _do_mock(self, result):
self.session.call_xenapi(
"VM.get_by_name_label", self.name_label).AndReturn(result)
self.mox.ReplayAll()
def test_normal(self):
self._do_mock(['x'])
result = vm_utils.lookup(self.session, self.name_label)
self.assertEqual('x', result)
def test_no_result(self):
self._do_mock([])
result = vm_utils.lookup(self.session, self.name_label)
self.assertIsNone(result)
def test_too_many(self):
self._do_mock(['a', 'b'])
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label)
def test_rescue_none(self):
self.session.call_xenapi(
"VM.get_by_name_label", self.name_label + '-rescue').AndReturn([])
self._do_mock(['x'])
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('x', result)
def test_rescue_found(self):
self.session.call_xenapi(
"VM.get_by_name_label",
self.name_label + '-rescue').AndReturn(['y'])
self.mox.ReplayAll()
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('y', result)
def test_rescue_too_many(self):
self.session.call_xenapi(
"VM.get_by_name_label",
self.name_label + '-rescue').AndReturn(['a', 'b', 'c'])
self.mox.ReplayAll()
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label,
check_rescue=True)
class GenerateConfigDriveTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'safe_find_sr')
@mock.patch.object(vm_utils, "create_vdi", return_value='vdi_ref')
@mock.patch.object(vm_utils.instance_metadata, "InstanceMetadata")
@mock.patch.object(vm_utils.configdrive, 'ConfigDriveBuilder')
@mock.patch.object(vm_utils.utils, 'execute')
@mock.patch.object(vm_utils.volume_utils, 'stream_to_vdi')
@mock.patch.object(vm_utils.os.path, 'getsize', return_value=100)
@mock.patch.object(vm_utils, 'create_vbd', return_value='vbd_ref')
@mock.patch.object(vm_utils.utils, 'tempdir')
def test_no_admin_pass(self, mock_tmpdir, mock_create_vbd, mock_size,
mock_stream, mock_execute, mock_builder,
mock_instance_metadata, mock_create_vdi,
mock_find_sr):
mock_tmpdir.return_value.__enter__.return_value = '/mock'
with mock.patch.object(six.moves.builtins, 'open') as mock_open:
mock_open.return_value.__enter__.return_value = 'open_fd'
vm_utils.generate_configdrive('session', 'context', 'instance',
'vm_ref', 'userdevice',
'network_info')
mock_size.assert_called_with('/mock/configdrive.vhd')
mock_open.assert_called_with('/mock/configdrive.vhd')
mock_execute.assert_called_with('qemu-img', 'convert', '-Ovpc',
'/mock/configdrive',
'/mock/configdrive.vhd')
mock_instance_metadata.assert_called_with(
'instance', content=None, extra_md={},
network_info='network_info', request_context='context')
mock_stream.assert_called_with('session', 'instance', 'vhd',
'open_fd', 100, 'vdi_ref')
@mock.patch.object(vm_utils, "destroy_vdi")
@mock.patch.object(vm_utils, 'safe_find_sr')
@mock.patch.object(vm_utils, "create_vdi", return_value='vdi_ref')
@mock.patch.object(vm_utils.instance_metadata, "InstanceMetadata",
side_effect=test.TestingException)
def test_vdi_cleaned_up(self, mock_instance_metadata, mock_create,
mock_find_sr, mock_destroy):
self.assertRaises(test.TestingException, vm_utils.generate_configdrive,
'session', None, None, None, None, None)
mock_destroy.assert_called_once_with('session', 'vdi_ref')
class XenAPIGetUUID(VMUtilsTestBase):
def test_get_this_vm_uuid_new_kernel(self):
self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
vm_utils._get_sys_hypervisor_uuid().AndReturn(
'2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
self.mox.ReplayAll()
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
vm_utils.get_this_vm_uuid(None))
self.mox.VerifyAll()
def test_get_this_vm_uuid_old_kernel_reboot(self):
self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
self.mox.StubOutWithMock(utils, 'execute')
vm_utils._get_sys_hypervisor_uuid().AndRaise(
IOError(13, 'Permission denied'))
utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn(
('27', ''))
utils.execute('xenstore-read', '/local/domain/27/vm',
run_as_root=True).AndReturn(
('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', ''))
self.mox.ReplayAll()
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
vm_utils.get_this_vm_uuid(None))
self.mox.VerifyAll()
class FakeSession(object):
def call_xenapi(self, *args):
pass
def call_plugin(self, *args):
pass
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
pass
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, *args, **kwargs):
pass
class FetchVhdImageTestCase(VMUtilsTestBase):
def setUp(self):
super(FetchVhdImageTestCase, self).setUp()
self.context = context.get_admin_context()
self.context.auth_token = 'auth_token'
self.session = FakeSession()
self.instance = {"uuid": "uuid"}
self.flags(group='glance', api_servers=['http://localhost:9292'])
self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack')
vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
vm_utils.get_sr_path(self.session).AndReturn('sr_path')
def _stub_glance_download_vhd(self, raise_exc=None):
self.mox.StubOutWithMock(
self.session, 'call_plugin_serialized_with_retry')
func = self.session.call_plugin_serialized_with_retry(
'glance.py',
'download_vhd2',
0,
mox.IgnoreArg(),
mox.IgnoreArg(),
extra_headers={'X-Auth-Token': 'auth_token',
'X-Roles': '',
'X-Tenant-Id': None,
'X-User-Id': None,
'X-Identity-Status': 'Confirmed'},
image_id='image_id',
uuid_stack=["uuid_stack"],
sr_path='sr_path')
if raise_exc:
func.AndRaise(raise_exc)
else:
func.AndReturn({'root': {'uuid': 'vdi'}})
def test_fetch_vhd_image_works_with_glance(self):
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(
self.context, self.session, self.instance, "vdi")
self.mox.ReplayAll()
self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
self.session, self.instance, 'image_id')['root']['uuid'])
self.mox.VerifyAll()
def test_fetch_vhd_image_cleans_up_vdi_on_fail(self):
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(self.context, self.session, self.instance,
"vdi").AndRaise(exception.FlavorDiskSmallerThanImage(
flavor_size=0, image_size=1))
self.mox.StubOutWithMock(self.session, 'call_xenapi')
self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref")
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
vm_utils.destroy_vdi(self.session,
"ref").AndRaise(exception.StorageError(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.FlavorDiskSmallerThanImage,
vm_utils._fetch_vhd_image, self.context, self.session,
self.instance, 'image_id')
self.mox.VerifyAll()
def test_fetch_vhd_image_download_exception(self):
self._stub_glance_download_vhd(raise_exc=RuntimeError)
self.mox.ReplayAll()
self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
self.context, self.session, self.instance, 'image_id')
self.mox.VerifyAll()
class TestImageCompression(VMUtilsTestBase):
def test_image_compression(self):
# Testing for nova.conf, too low, negative, and a correct value.
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=6, group='xenserver')
self.assertEqual(vm_utils.get_compression_level(), 6)
class ResizeHelpersTestCase(VMUtilsTestBase):
def setUp(self):
super(ResizeHelpersTestCase, self).setUp()
self.context = context.RequestContext('user', 'project')
def test_repair_filesystem(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('e2fsck', '-f', "-y", "fakepath",
run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn(
("size is: 42", ""))
self.mox.ReplayAll()
vm_utils._repair_filesystem("fakepath")
def _call_tune2fs_remove_journal(self, path):
utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True)
def _call_tune2fs_add_journal(self, path):
utils.execute("tune2fs", "-j", path, run_as_root=True)
def _call_parted_mkpart(self, path, start, end):
utils.execute('parted', '--script', path, 'rm', '1',
run_as_root=True)
utils.execute('parted', '--script', path, 'mkpart',
'primary', '%ds' % start, '%ds' % end, run_as_root=True)
def _call_parted_boot_flag(self, path):
utils.execute('parted', '--script', path, 'set', '1',
'boot', 'on', run_as_root=True)
def test_resize_part_and_fs_down_succeeds(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
utils.execute("resize2fs", partition_path, "10s", run_as_root=True)
self._call_parted_mkpart(dev_path, 0, 9)
self._call_parted_boot_flag(dev_path)
self._call_tune2fs_add_journal(partition_path)
self.mox.ReplayAll()
vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot")
def test_log_progress_if_required(self):
self.mox.StubOutWithMock(vm_utils.LOG, "debug")
vm_utils.LOG.debug("Sparse copy in progress, "
"%(complete_pct).2f%% complete. "
"%(left)s bytes left to copy",
{"complete_pct": 50.0, "left": 1})
current = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(current))
time_fixture.advance_time_seconds(
vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
self.mox.ReplayAll()
vm_utils._log_progress_if_required(1, current, 2)
def test_log_progress_if_not_required(self):
self.mox.StubOutWithMock(vm_utils.LOG, "debug")
current = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(current))
time_fixture.advance_time_seconds(
vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
self.mox.ReplayAll()
vm_utils._log_progress_if_required(1, current, 2)
def test_resize_part_and_fs_down_fails_disk_too_big(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
new_sectors = 10
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
mobj = utils.execute("resize2fs",
partition_path,
"%ss" % new_sectors,
run_as_root=True)
mobj.AndRaise(processutils.ProcessExecutionError)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vm_utils._resize_part_and_fs,
"fake", 0, 20, 10, "boot")
def test_resize_part_and_fs_up_succeeds(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
self._call_parted_mkpart(dev_path, 0, 29)
utils.execute("resize2fs", partition_path, run_as_root=True)
self._call_tune2fs_add_journal(partition_path)
self.mox.ReplayAll()
vm_utils._resize_part_and_fs("fake", 0, 20, 30, "")
def test_resize_disk_throws_on_zero_size(self):
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0)
self.assertRaises(exception.ResizeError, vm_utils.resize_disk,
"session", "instance", "vdi_ref", flavor)
def test_auto_config_disk_returns_early_on_zero_size(self):
vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
@mock.patch.object(utils, "execute")
def test_get_partitions(self, mock_execute):
parted_return = "BYT;\n...\n"
parted_return += "1:2s:11s:10s:ext3::boot;\n"
parted_return += "2:20s:11s:10s::bob:;\n"
mock_execute.return_value = (parted_return, None)
partitions = vm_utils._get_partitions("abc")
self.assertEqual(2, len(partitions))
self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0])
self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1])
class CheckVDISizeTestCase(VMUtilsTestBase):
def setUp(self):
super(CheckVDISizeTestCase, self).setUp()
self.context = 'fakecontext'
self.session = 'fakesession'
self.instance = objects.Instance(uuid=uuids.fake)
self.flavor = objects.Flavor()
self.vdi_uuid = 'fakeuuid'
def test_not_too_large(self):
self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
vm_utils._get_vdi_chain_size(self.session,
self.vdi_uuid).AndReturn(1073741824)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 1
get.return_value = self.flavor
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
def test_too_large(self):
self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
vm_utils._get_vdi_chain_size(self.session,
self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 1
get.return_value = self.flavor
self.assertRaises(exception.FlavorDiskSmallerThanImage,
vm_utils._check_vdi_size, self.context,
self.session, self.instance, self.vdi_uuid)
def test_zero_root_gb_disables_check(self):
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 0
get.return_value = self.flavor
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
def setUp(self):
super(GetInstanceForVdisForSrTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
def test_get_instance_vdis_for_sr(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
vdi_1 = fake.create_vdi('vdiname1', sr_ref)
vdi_2 = fake.create_vdi('vdiname2', sr_ref)
for vdi_ref in [vdi_1, vdi_2]:
fake.create_vbd(vm_ref, vdi_ref)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([vdi_1, vdi_2], result)
def test_get_instance_vdis_for_sr_no_vbd(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([], result)
class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'lookup', return_value='ignored')
def test_lookup_call(self, mock_lookup):
vm_utils.vm_ref_or_raise('session', 'somename')
mock_lookup.assert_called_once_with('session', 'somename')
@mock.patch.object(vm_utils, 'lookup', return_value='vmref')
def test_return_value(self, mock_lookup):
self.assertEqual(
'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
mock_lookup.assert_called_once_with('session', 'somename')
class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'lookup', return_value=None)
def test_exception_raised(self, mock_lookup):
self.assertRaises(
exception.InstanceNotFound,
lambda: vm_utils.vm_ref_or_raise('session', 'somename')
)
mock_lookup.assert_called_once_with('session', 'somename')
@mock.patch.object(vm_utils, 'lookup', return_value=None)
def test_exception_msg_contains_vm_name(self, mock_lookup):
try:
vm_utils.vm_ref_or_raise('session', 'somename')
except exception.InstanceNotFound as e:
self.assertIn('somename', six.text_type(e))
mock_lookup.assert_called_once_with('session', 'somename')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
class CreateCachedImageTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateCachedImageTestCase, self).setUp()
self.session = stubs.get_fake_session()
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
@mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
def test_no_cow_no_ext(self, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
'vdi_ref', None, None, None,
'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
@mock.patch.object(vm_utils, '_fetch_image',
return_value={'root': {'uuid': 'vdi_uuid',
'file': None}})
def test_noncached(self, mock_fetch_image, mock_clone_vdi,
mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
None, None, None, None, None,
None, 'vdi_uuid']
self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
class ShutdownTestCase(VMUtilsTestBase):
def test_hardshutdown_should_return_true_when_vm_is_shutdown(self):
self.mock = mox.Mox()
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
self.mock.StubOutWithMock(vm_utils, 'LOG')
self.assertTrue(vm_utils.hard_shutdown_vm(
session, instance, vm_ref))
def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self):
self.mock = mox.Mox()
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
self.mock.StubOutWithMock(vm_utils, 'LOG')
self.assertTrue(vm_utils.clean_shutdown_vm(
session, instance, vm_ref))
class CreateVBDTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateVBDTestCase, self).setUp()
self.session = FakeSession()
self.mock = mox.Mox()
self.mock.StubOutWithMock(self.session, 'call_xenapi')
self.vbd_rec = self._generate_vbd_rec()
def _generate_vbd_rec(self):
vbd_rec = {}
vbd_rec['VM'] = 'vm_ref'
vbd_rec['VDI'] = 'vdi_ref'
vbd_rec['userdevice'] = '0'
vbd_rec['bootable'] = False
vbd_rec['mode'] = 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
return vbd_rec
def test_create_vbd_default_args(self):
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_create_vbd_osvol(self):
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref",
"osvol", "True")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
osvol=True)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_create_vbd_extra_args(self):
self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
self.vbd_rec['type'] = 'a'
self.vbd_rec['mode'] = 'RO'
self.vbd_rec['bootable'] = True
self.vbd_rec['empty'] = True
self.vbd_rec['unpluggable'] = False
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
vbd_type="a", read_only=True, bootable=True,
empty=True, unpluggable=False)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_attach_cd(self):
self.mock.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.create_vbd(self.session, "vm_ref", None, 1,
vbd_type='cd', read_only=True, bootable=True,
empty=True, unpluggable=False).AndReturn("vbd_ref")
self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref")
self.mock.ReplayAll()
result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
class UnplugVbdTestCase(VMUtilsTestBase):
@mock.patch.object(greenthread, 'sleep')
def test_unplug_vbd_works(self, mock_sleep):
session = stubs.get_fake_session()
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
self.assertEqual(0, mock_sleep.call_count)
def test_unplug_vbd_raises_unexpected_error(self):
session = stubs.get_fake_session()
session.XenAPI.Failure = fake.Failure
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
session.call_xenapi.side_effect = test.TestingException()
self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_detached_works(self):
error = "DEVICE_ALREADY_DETACHED"
session = stubs.get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
session = stubs.get_fake_session("")
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def _test_uplug_vbd_retries(self, mock_sleep, error):
session = stubs.get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(11, session.call_xenapi.call_count)
self.assertEqual(10, mock_sleep.call_count)
def _test_uplug_vbd_retries_with_neg_val(self):
session = stubs.get_fake_session()
self.flags(num_vbd_unplug_retries=-1, group='xenserver')
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"DEVICE_DETACH_REJECTED")
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"INTERNAL_ERROR")
class VDIOtherConfigTestCase(VMUtilsTestBase):
"""Tests to ensure that the code is populating VDI's `other_config`
attribute with the correct metadta.
"""
def setUp(self):
super(VDIOtherConfigTestCase, self).setUp()
class _FakeSession(object):
def call_xenapi(self, operation, *args, **kwargs):
# VDI.add_to_other_config -> VDI_add_to_other_config
method = getattr(self, operation.replace('.', '_'), None)
if method:
return method(*args, **kwargs)
self.operation = operation
self.args = args
self.kwargs = kwargs
self.session = _FakeSession()
self.context = context.get_admin_context()
self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
'name': 'myinstance'}
def test_create_vdi(self):
# Some images are registered with XenServer explicitly by calling
# `create_vdi`
vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
'myvdi', 'root', 1024, read_only=True)
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, self.session.args[0]['other_config'])
@mock.patch.object(vm_utils, '_fetch_image',
return_value={'root': {'uuid': 'fake-uuid'}})
def test_create_image(self, mock_vm_utils):
# Other images are registered implicitly when they are dropped into
# the SR by a dom0 plugin or some other process
self.flags(cache_images='none', group='xenserver')
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
vm_utils.create_image(self.context, self.session, self.fake_instance,
'myvdi', 'image1', vm_utils.ImageType.DISK_VHD)
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
@mock.patch.object(os_xenapi.client.vm_management, 'receive_vhd')
@mock.patch.object(vm_utils, 'scan_default_sr')
@mock.patch.object(vm_utils, 'get_sr_path')
def test_import_migrated_vhds(self, mock_sr_path, mock_scan_sr,
mock_recv_vhd):
# Migrated images should preserve the `other_config`
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
mock_sr_path.return_value = {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
vm_utils._import_migrated_vhds(self.session, self.fake_instance,
"disk_label", "root", "vdi_label")
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
mock_scan_sr.assert_called_once_with(self.session)
mock_recv_vhd.assert_called_with(
self.session, "disk_label",
{'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}, mock.ANY)
mock_sr_path.assert_called_once_with(self.session)
class GenerateDiskTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch.object(vm_utils.utils, 'mkfs',
side_effect = test.TestingException())
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_with_no_fs_given(self, mock_create_vbd,
mock_create_vdi, mock_findsr,
mock_dom0ref, mock_mkfs,
mock_attached_here):
session = stubs.get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'user', 10, None, None)
mock_attached_here.assert_called_once_with(session, 'vdi_ref',
read_only=False,
dom0=True)
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch.object(vm_utils.utils, 'mkfs')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils.utils, 'make_dev_path',
return_value='/dev/fake_devp1')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_swap(self, mock_create_vbd, mock_make_path,
mock_create_vdi,
mock_findsr, mock_dom0ref, mock_mkfs,
mock_attached_here):
session = stubs.get_fake_session()
vdi_dev = mock.MagicMock()
mock_attached_here.return_value = vdi_dev
vdi_dev.__enter__.return_value = 'fakedev'
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'user', 10, 'swap',
'swap-1')
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False,
dom0=True)
# As swap is supported in dom0, mkfs will run there
session.call_plugin_serialized.assert_any_call(
'partition_utils.py', 'mkfs', 'fakedev', '1', 'swap', 'swap-1')
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch.object(vm_utils.utils, 'mkfs')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils.utils, 'make_dev_path',
return_value='/dev/fake_devp1')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_ephemeral(self, mock_create_vbd, mock_make_path,
mock_create_vdi, mock_findsr,
mock_dom0ref, mock_mkfs,
mock_attached_here):
session = stubs.get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
vm_utils._generate_disk(session, instance, 'vm_ref', '2',
'name', 'ephemeral', 10, 'ext4',
'ephemeral-1')
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False,
dom0=True)
# As ext4 is not supported in dom0, mkfs will run in domU
mock_attached_here.assert_any_call(session, 'vdi_ref',
read_only=False)
mock_mkfs.assert_called_with('ext4', '/dev/fake_devp1',
'ephemeral-1', run_as_root=True)
mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2',
bootable=False)
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, '_get_dom0_ref',
side_effect = test.TestingException())
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
def test_generate_disk_ensure_cleanup_called(self, mock_destroy_vdis,
mock_dom0ref,
mock_create_vdi,
mock_findsr):
session = stubs.get_fake_session()
instance = {'uuid': 'fake_uuid'}
self.assertRaises(test.TestingException, vm_utils._generate_disk,
session, instance, None, '2', 'name', 'user', 10,
None, None)
mock_destroy_vdis.assert_called_once_with(session, ['vdi_ref'])
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref')
@mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref')
@mock.patch.object(vm_utils, 'vdi_attached')
@mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref')
@mock.patch.object(vm_utils, 'create_vbd')
def test_generate_disk_ephemeral_no_vmref(self, mock_create_vbd,
mock_dom0_ref,
mock_attached_here,
mock_create_vdi,
mock_findsr):
session = stubs.get_fake_session()
vdi_ref = mock.MagicMock()
mock_attached_here.return_value = vdi_ref
instance = {'uuid': 'fake_uuid'}
vdi_ref = vm_utils._generate_disk(
session, instance,
None, None, 'name', 'user', 10, None, None)
mock_attached_here.assert_called_once_with(session, 'vdi_ref',
read_only=False, dom0=True)
self.assertFalse(mock_create_vbd.called)
class GenerateEphemeralTestCase(VMUtilsTestBase):
def setUp(self):
super(GenerateEphemeralTestCase, self).setUp()
self.session = "session"
self.instance = "instance"
self.vm_ref = "vm_ref"
self.name_label = "name"
self.ephemeral_name_label = "name ephemeral"
self.userdevice = 4
self.fs_label = "ephemeral"
self.mox.StubOutWithMock(vm_utils, "_generate_disk")
self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis")
def test_get_ephemeral_disk_sizes_simple(self):
result = vm_utils.get_ephemeral_disk_sizes(20)
expected = [20]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_three_disks_2000(self):
result = vm_utils.get_ephemeral_disk_sizes(4030)
expected = [2000, 2000, 30]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_two_disks_1024(self):
result = vm_utils.get_ephemeral_disk_sizes(2048)
expected = [1024, 1024]
self.assertEqual(expected, list(result))
def _expect_generate_disk(self, size, device, name_label, fs_label):
vm_utils._generate_disk(
self.session, self.instance, self.vm_ref,
str(device), name_label, 'ephemeral',
size * 1024, None, fs_label).AndReturn(device)
def test_generate_ephemeral_adds_one_disk(self):
self._expect_generate_disk(
20, self.userdevice, self.ephemeral_name_label, self.fs_label)
self.mox.ReplayAll()
vm_utils.generate_ephemeral(
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 20)
def test_generate_ephemeral_adds_multiple_disks(self):
self._expect_generate_disk(
2000, self.userdevice, self.ephemeral_name_label, self.fs_label)
self._expect_generate_disk(
2000, self.userdevice + 1, self.ephemeral_name_label + " (1)",
self.fs_label + "1")
self._expect_generate_disk(
30, self.userdevice + 2, self.ephemeral_name_label + " (2)",
self.fs_label + "2")
self.mox.ReplayAll()
vm_utils.generate_ephemeral(
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4030)
def test_generate_ephemeral_cleans_up_on_error(self):
self._expect_generate_disk(
1024, self.userdevice, self.ephemeral_name_label, self.fs_label)
self._expect_generate_disk(
1024, self.userdevice + 1, self.ephemeral_name_label + " (1)",
self.fs_label + "1")
vm_utils._generate_disk(
self.session, self.instance, self.vm_ref,
str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
units.Mi, None, 'ephemeral2').AndRaise(exception.NovaException)
vm_utils.safe_destroy_vdis(self.session, [4, 5])
self.mox.ReplayAll()
self.assertRaises(
exception.NovaException, vm_utils.generate_ephemeral,
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4096)
class FakeFile(object):
def __init__(self):
self._file_operations = []
def seek(self, offset):
self._file_operations.append((self.seek, offset))
class StreamDiskTestCase(VMUtilsTestBase):
def setUp(self):
super(StreamDiskTestCase, self).setUp()
self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path')
self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown')
self.mox.StubOutWithMock(vm_utils, '_write_partition')
# NOTE(matelakat): This might hide the fail reason, as test runners
# are unhappy with a mocked out open.
self.mox.StubOutWithMock(six.moves.builtins, 'open')
self.image_service_func = self.mox.CreateMockAnything()
def test_non_ami(self):
fake_file = FakeFile()
vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
vm_utils.utils.temporary_chown(
'some_path').AndReturn(contextified(None))
open('some_path', 'wb').AndReturn(contextified(fake_file))
self.image_service_func(fake_file)
self.mox.ReplayAll()
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.KERNEL, None, 'dev')
self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations)
def test_ami_disk(self):
fake_file = FakeFile()
vm_utils._write_partition("session", 100, 'dev')
vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
vm_utils.utils.temporary_chown(
'some_path').AndReturn(contextified(None))
open('some_path', 'wb').AndReturn(contextified(fake_file))
self.image_service_func(fake_file)
self.mox.ReplayAll()
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.DISK, 100, 'dev')
self.assertEqual(
[(fake_file.seek, vm_utils.MBR_SIZE_BYTES)],
fake_file._file_operations)
class VMUtilsSRPath(VMUtilsTestBase):
def setUp(self):
super(VMUtilsSRPath, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.session.is_local_connection = False
def test_defined(self):
self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
self.mox.StubOutWithMock(self.session, "call_xenapi")
vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
self.session.host_ref = "host_ref"
self.session.call_xenapi('PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
{'pbd_ref': {'device_config': {'path': 'sr_path'}}})
self.mox.ReplayAll()
self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path")
def test_default(self):
self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
self.mox.StubOutWithMock(self.session, "call_xenapi")
vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
self.session.host_ref = "host_ref"
self.session.call_xenapi('PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
{'pbd_ref': {'device_config': {}}})
self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn(
{'uuid': 'sr_uuid', 'type': 'ext'})
self.mox.ReplayAll()
self.assertEqual(vm_utils.get_sr_path(self.session),
"/var/run/sr-mount/sr_uuid")
class CreateKernelRamdiskTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateKernelRamdiskTestCase, self).setUp()
self.context = "context"
self.session = FakeSession()
self.instance = {"kernel_id": None, "ramdisk_id": None}
self.name_label = "name"
self.mox.StubOutWithMock(self.session, "call_plugin")
self.mox.StubOutWithMock(uuidutils, "generate_uuid")
self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image")
def test_create_kernel_and_ramdisk_no_create(self):
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual((None, None), result)
@mock.patch.object(os_xenapi.client.disk_management,
'create_kernel_ramdisk')
def test_create_kernel_and_ramdisk_create_both_cached(self, mock_ramdisk):
kernel_id = "kernel"
ramdisk_id = "ramdisk"
self.instance["kernel_id"] = kernel_id
self.instance["ramdisk_id"] = ramdisk_id
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
uuidutils.generate_uuid().AndReturn("fake_uuid1")
mock_ramdisk.side_effect = ["k", "r"]
args_ramdisk = {}
args_ramdisk['cached-image'] = ramdisk_id
args_ramdisk['new-image-uuid'] = "fake_uuid2"
uuidutils.generate_uuid().AndReturn("fake_uuid2")
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", "r"), result)
@mock.patch.object(os_xenapi.client.disk_management,
'create_kernel_ramdisk')
def test_create_kernel_and_ramdisk_create_kernel_not_cached(self,
mock_ramdisk):
kernel_id = "kernel"
self.instance["kernel_id"] = kernel_id
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
uuidutils.generate_uuid().AndReturn("fake_uuid1")
mock_ramdisk.return_value = ""
kernel = {"kernel": {"file": "k"}}
vm_utils._fetch_disk_image(self.context, self.session, self.instance,
self.name_label, kernel_id, 0).AndReturn(kernel)
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", None), result)
def _test_create_kernel_image(self, cache_images):
kernel_id = "kernel"
self.instance["kernel_id"] = kernel_id
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
self.flags(cache_images=cache_images, group='xenserver')
if cache_images == 'all':
uuidutils.generate_uuid().AndReturn("fake_uuid1")
else:
kernel = {"kernel": {"file": "new_image", "uuid": None}}
vm_utils._fetch_disk_image(self.context, self.session,
self.instance, self.name_label,
kernel_id, 0).AndReturn(kernel)
self.mox.ReplayAll()
result = vm_utils._create_kernel_image(self.context,
self.session,
self.instance,
self.name_label,
kernel_id, 0)
if cache_images == 'all':
self.assertEqual(result, {"kernel":
{"file": "cached_image", "uuid": None}})
else:
self.assertEqual(result, {"kernel":
{"file": "new_image", "uuid": None}})
@mock.patch.object(os_xenapi.client.disk_management,
'create_kernel_ramdisk')
def test_create_kernel_image_cached_config(self, mock_ramdisk):
mock_ramdisk.return_value = "cached_image"
self._test_create_kernel_image('all')
mock_ramdisk.assert_called_once_with(self.session, "kernel",
"fake_uuid1")
def test_create_kernel_image_uncached_config(self):
self._test_create_kernel_image('none')
class ScanSrTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, "_scan_sr")
@mock.patch.object(vm_utils, "safe_find_sr")
def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
mock_safe_find_sr.return_value = "sr_ref"
self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
def test_scan_sr_works(self):
session = mock.Mock()
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
def test_scan_sr_unknown_error_fails_once(self):
session = mock.Mock()
session.XenAPI.Failure = fake.Failure
session.call_xenapi.side_effect = test.TestingException
self.assertRaises(test.TestingException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
session.call_xenapi.side_effect = FakeException
self.assertRaises(FakeException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(4, session.call_xenapi.call_count)
mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
def fake_call_xenapi(*args):
fake_call_xenapi.count += 1
if fake_call_xenapi.count != 2:
raise FakeException()
fake_call_xenapi.count = 0
session.call_xenapi.side_effect = fake_call_xenapi
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(2, session.call_xenapi.call_count)
mock_sleep.assert_called_once_with(2)
@mock.patch.object(flavors, 'extract_flavor',
return_value={
'memory_mb': 1024,
'vcpus': 1,
'vcpu_weight': 1.0,
})
class CreateVmTestCase(VMUtilsTestBase):
def test_vss_provider(self, mock_extract):
self.flags(vcpu_pin_set="2,3")
session = stubs.get_fake_session()
instance = objects.Instance(uuid=uuids.nova_uuid,
os_type="windows",
system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
vm_utils.create_vm(session, instance, "label",
"kernel", "ramdisk")
vm_rec = {
'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '', 'tags': [],
'VCPUs_max': '4',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': uuids.nova_uuid},
'name_label': 'label',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '4',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': 'true',
'acpi': 'true'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False
}
session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
def test_invalid_cpu_mask_raises(self, mock_extract):
self.flags(vcpu_pin_set="asdf")
session = mock.Mock()
instance = objects.Instance(uuid=uuids.fake, system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
self.assertRaises(exception.Invalid,
vm_utils.create_vm,
session, instance, "label",
"kernel", "ramdisk")
def test_destroy_vm(self, mock_extract):
session = mock.Mock()
instance = objects.Instance(uuid=uuids.fake)
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
def test_destroy_vm_silently_fails(self, mock_extract):
session = mock.Mock()
exc = test.TestingException()
session.XenAPI.Failure = test.TestingException
session.VM.destroy.side_effect = exc
instance = objects.Instance(uuid=uuids.fake)
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
class DetermineVmModeTestCase(VMUtilsTestBase):
def _fake_object(self, updates):
return fake_instance.fake_instance_obj(None, **updates)
def test_determine_vm_mode_returns_xen_mode(self):
instance = self._fake_object({"vm_mode": "xen"})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_mode(self):
instance = self._fake_object({"vm_mode": "hvm"})
self.assertEqual(obj_fields.VMMode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_linux(self):
instance = self._fake_object({"vm_mode": None, "os_type": "linux"})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_for_windows(self):
instance = self._fake_object({"vm_mode": None, "os_type": "windows"})
self.assertEqual(obj_fields.VMMode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_by_default(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(obj_fields.VMMode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_VHD(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
def test_determine_vm_mode_returns_xen_for_DISK(self):
instance = self._fake_object({"vm_mode": None, "os_type": None})
self.assertEqual(obj_fields.VMMode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
class CallXenAPIHelpersTestCase(VMUtilsTestBase):
def test_vm_get_vbd_refs(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
def test_vbd_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
session.call_xenapi.assert_called_once_with("VBD.get_record",
"vbd_ref")
def test_vdi_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.get_record",
"vdi_ref")
def test_vdi_snapshot(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.snapshot",
"vdi_ref", {})
def test_vdi_get_virtual_size(self):
session = mock.Mock()
session.call_xenapi.return_value = "123"
self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
"ref")
@mock.patch.object(vm_utils, '_get_resize_func_name')
def test_vdi_resize(self, mock_get_resize_func_name):
session = mock.Mock()
mock_get_resize_func_name.return_value = "VDI.fake"
vm_utils._vdi_resize(session, "ref", 123)
session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
mock_get_size.return_value = (1024 ** 3) - 1
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3 + 1
instance = {"uuid": "a"}
self.assertRaises(exception.ResizeError,
vm_utils.update_vdi_virtual_size,
"s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_get_rec')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetVdiForVMTestCase(VMUtilsTestBase):
def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
vdi_get_rec.return_value = {}
result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
self.assertEqual(('vdi_ref', {}), result)
vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
vbd_get_rec.assert_called_once_with(session, "a")
vdi_get_rec.assert_called_once_with(session, "vdi_ref")
def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
self.assertRaises(exception.NovaException,
vm_utils.get_vdi_for_vm_safely,
session, "vm_ref", userdevice='1')
self.assertEqual([], vdi_get_rec.call_args_list)
self.assertEqual(2, len(vbd_get_rec.call_args_list))
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetAllVdiForVMTestCase(VMUtilsTestBase):
def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
def fake_vbd_get_rec(session, vbd_ref):
return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
def fake_vdi_get_uuid(session, vdi_ref):
return vdi_ref
vm_get_vbd_refs.return_value = ["0", "2"]
vbd_get_rec.side_effect = fake_vbd_get_rec
vdi_get_uuid.side_effect = fake_vdi_get_uuid
def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
expected = ['vdi_ref_0', 'vdi_ref_2']
self.assertEqual(expected, list(result))
def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
min_userdevice=1)
expected = ["vdi_ref_2"]
self.assertEqual(expected, list(result))
class GetAllVdisTestCase(VMUtilsTestBase):
def test_get_all_vdis_in_sr(self):
def fake_get_rec(record_type, ref):
if ref == "2":
return "vdi_rec_2"
session = mock.Mock()
session.call_xenapi.return_value = ["1", "2"]
session.get_rec.side_effect = fake_get_rec
sr_ref = "sr_ref"
actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
self.assertEqual(actual, [('2', 'vdi_rec_2')])
session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
class SnapshotAttachedHereTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
def test_snapshot_attached_here(self, mock_impl):
def fake_impl(session, instance, vm_ref, label, userdevice,
post_snapshot_callback):
self.assertEqual("session", session)
self.assertEqual("instance", instance)
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("label", label)
self.assertEqual('0', userdevice)
self.assertIsNone(post_snapshot_callback)
yield "fake"
mock_impl.side_effect = fake_impl
with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
"label") as result:
self.assertEqual("fake", result)
mock_impl.assert_called_once_with("session", "instance", "vm_ref",
"label", '0', None)
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vdi_snapshot')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
mock_vdi_snapshot, mock_vdi_get_uuid,
mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
session = "session"
instance = {"uuid": "uuid"}
mock_callback = mock.Mock()
mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
{"SR": "sr_ref",
"uuid": "vdi_uuid"})
mock_vdi_snapshot.return_value = "snap_ref"
mock_vdi_get_uuid.return_value = "snap_uuid"
mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
try:
with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
"label", '2', mock_callback) as result:
self.assertEqual(["a", "b"], result)
raise test.TestingException()
self.assertTrue(False)
except test.TestingException:
pass
mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
'2')
mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
"sr_ref", "vdi_ref", ['a', 'b'])
mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
mock.call(session, "snap_uuid")])
mock_callback.assert_called_once_with(
task_state="image_pending_upload")
mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
instance, ['a', 'b'], "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid"])
self.assertFalse(mock_sleep.called)
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
mock_count):
mock_count.return_value = 2
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertFalse(mock_sleep.called)
self.assertTrue(mock_count.called)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
self.assertRaises(exception.NovaException,
vm_utils._wait_for_vhd_coalesce, "session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertTrue(mock_count.called)
self.assertEqual(20, mock_sleep.call_count)
self.assertEqual(20, mock_scan_sr.call_count)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertEqual(1, mock_sleep.call_count)
self.assertEqual(2, mock_scan_sr.call_count)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_count_children(self, mock_get_all_vdis_in_sr):
vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
mock_get_all_vdis_in_sr.return_value = vdis
self.assertEqual(2, vm_utils._count_children('session',
'parent1', 'sr'))
class ImportMigratedDisksTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance)
expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
mock_root.assert_called_once_with(session, instance)
mock_ephemeral.assert_called_once_with(session, instance)
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks_import_root_false(self, mock_root,
mock_ephemeral):
session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance,
import_root=False)
expected = {'root': None, 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
self.assertEqual(0, mock_root.call_count)
mock_ephemeral.assert_called_once_with(session, instance)
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrated_root_disk(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = {"uuid": "uuid", "name": "name"}
result = vm_utils._import_migrated_root_disk("s", instance)
self.assertEqual("foo", result)
mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
"name")
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrate_ephemeral_disks(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = objects.Instance(id=1, uuid=uuids.fake)
instance.old_flavor = objects.Flavor(ephemeral_gb=4000)
result = vm_utils._import_migrate_ephemeral_disks("s", instance)
self.assertEqual({'4': 'foo', '5': 'foo'}, result)
inst_uuid = instance.uuid
inst_name = instance.name
expected_calls = [mock.call("s", instance,
"%s_ephemeral_1" % inst_uuid,
"ephemeral",
"%s ephemeral (1)" % inst_name),
mock.call("s", instance,
"%s_ephemeral_2" % inst_uuid,
"ephemeral",
"%s ephemeral (2)" % inst_name)]
self.assertEqual(expected_calls, mock_migrate.call_args_list)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_import_migrate_ephemeral_disks_use_old_flavor(self,
mock_get_sizes):
mock_get_sizes.return_value = []
instance = objects.Instance(id=1, uuid=uuids.fake, ephemeral_gb=2000)
instance.old_flavor = objects.Flavor(ephemeral_gb=4000)
vm_utils._import_migrate_ephemeral_disks("s", instance)
mock_get_sizes.assert_called_once_with(4000)
@mock.patch.object(os_xenapi.client.vm_management, 'receive_vhd')
@mock.patch.object(vm_utils, '_set_vdi_info')
@mock.patch.object(vm_utils, 'scan_default_sr')
@mock.patch.object(vm_utils, 'get_sr_path')
def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
mock_set_info, mock_recv_vhd):
session = mock.Mock()
instance = {"uuid": "uuid"}
mock_recv_vhd.return_value = {"root": {"uuid": "a"}}
session.call_xenapi.return_value = "vdi_ref"
mock_get_sr_path.return_value = "sr_path"
result = vm_utils._import_migrated_vhds(session, instance,
'chain_label', 'disk_type', 'vdi_label')
expected = {'uuid': "a", 'ref': "vdi_ref"}
self.assertEqual(expected, result)
mock_get_sr_path.assert_called_once_with(session)
mock_recv_vhd.assert_called_once_with(session, 'chain_label',
'sr_path', mock.ANY)
mock_scan_sr.assert_called_once_with(session)
session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
'vdi_label', 'disk_type', instance)
def test_get_vhd_parent_uuid_rec_provided(self):
session = mock.Mock()
vdi_ref = 'vdi_ref'
vdi_rec = {'sm_config': {}}
self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
vdi_ref,
vdi_rec))
self.assertFalse(session.call_xenapi.called)
class MigrateVHDTestCase(VMUtilsTestBase):
def _assert_transfer_called(self, session, label):
session.call_plugin_serialized.assert_called_once_with(
'migration.py', 'transfer_vhd', instance_uuid=label, host="dest",
vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
@mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd')
def test_migrate_vhd_root(self, mock_trans_vhd):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2)
mock_trans_vhd.assert_called_once_with(session, "a",
"dest", "vdi_uuid", "sr_path",
2)
@mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd')
def test_migrate_vhd_ephemeral(self, mock_trans_vhd):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2, 2)
mock_trans_vhd.assert_called_once_with(session, "a_ephemeral_2",
"dest", "vdi_uuid", "sr_path",
2)
@mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd')
def test_migrate_vhd_converts_exceptions(self, mock_trans_vhd):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
mock_trans_vhd.side_effect = test.TestingException()
instance = {"uuid": "a"}
self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
session, instance, "vdi_uuid", "dest", "sr_path", 2)
mock_trans_vhd.assert_called_once_with(session, "a",
"dest", "vdi_uuid", "sr_path",
2)
class StripBaseMirrorTestCase(VMUtilsTestBase):
def test_strip_base_mirror_from_vdi_works(self):
session = mock.Mock()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
def test_strip_base_mirror_from_vdi_hides_error(self):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
session.call_xenapi.side_effect = test.TestingException()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
@mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
def test_strip_base_mirror_from_vdis(self, mock_strip):
def call_xenapi(method, arg):
if method == "VM.get_VBDs":
return ['VBD_ref_1', 'VBD_ref_2']
if method == "VBD.get_VDI":
return 'VDI' + arg[3:]
return "Unexpected call_xenapi: %s.%s" % (method, arg)
session = mock.Mock()
session.call_xenapi.side_effect = call_xenapi
vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
expected = [mock.call('VM.get_VBDs', "vm_ref"),
mock.call('VBD.get_VDI', "VBD_ref_1"),
mock.call('VBD.get_VDI', "VBD_ref_2")]
self.assertEqual(expected, session.call_xenapi.call_args_list)
expected = [mock.call(session, "VDI_ref_1"),
mock.call(session, "VDI_ref_2")]
self.assertEqual(expected, mock_strip.call_args_list)
class DeviceIdTestCase(VMUtilsTestBase):
def test_device_id_is_none_if_not_specified_in_meta_data(self):
image_meta = objects.ImageMeta.from_dict({})
session = mock.Mock()
session.product_version = (6, 1, 0)
self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'xenapi_device_id': '0002'}})
session = mock.Mock()
session.product_version = (6, 2, 0)
self.assertEqual(2,
vm_utils.get_vm_device_id(session, image_meta))
session.product_version = (6, 3, 1)
self.assertEqual(2,
vm_utils.get_vm_device_id(session, image_meta))
def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'xenapi_device_id': '0002'}})
session = mock.Mock()
session.product_version = (6, 0)
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 2 specified is not supported by "
"hypervisor version (6, 0)", exc.message)
session.product_version = ('6a')
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 2 specified is not supported by "
"hypervisor version 6a", exc.message)
class CreateVmRecordTestCase(VMUtilsTestBase):
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_linux(self, mock_extract_flavor):
instance = objects.Instance(uuid=uuids.nova_uuid,
os_type="linux")
self._test_create_vm_record(mock_extract_flavor, instance, False)
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_windows(self, mock_extract_flavor):
instance = objects.Instance(uuid=uuids.nova_uuid,
os_type="windows")
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
self._test_create_vm_record(mock_extract_flavor, instance, True)
def _test_create_vm_record(self, mock_extract_flavor, instance,
is_viridian):
session = stubs.get_fake_session()
flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
mock_extract_flavor.return_value = flavor
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor(memory_mb=1024,
vcpus=1,
vcpu_weight=2)
vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
device_id=2)
is_viridian_str = str(is_viridian).lower()
expected_vm_rec = {
'VCPUs_params': {'cap': '0', 'weight': '2'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '',
'tags': [],
'VCPUs_max': '1',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': uuids.nova_uuid},
'name_label': 'name',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '1',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': is_viridian_str,
'acpi': 'true', 'device_id': '0002'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False}
session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
def test_list_vms(self):
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
fake.create_vm("foo1", "Halted")
vm_ref = fake.create_vm("foo2", "Running")
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.list_vms(driver._session))
# Will have 3 VMs - but one is Dom0 and one is not running on the host
self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
self.assertEqual(len(result), 1)
result_keys = [key for (key, value) in result]
self.assertIn(vm_ref, result_keys)
class ChildVHDsTestCase(test.NoDBTestCase):
all_vdis = [
("my-vdi-ref",
{"uuid": "my-uuid", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("non-parent",
{"uuid": "uuid-1", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("diff-parent",
{"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child",
{"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child-snap",
{"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": True, "other_config": {}}),
]
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_defaults(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
self.assertJsonEqual(['uuid-child', 'uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_only_snapshots(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_chain(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref",
["my-uuid", "other-uuid"], old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
def test_is_vdi_a_snapshot_works(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {}}
self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_base_images_false(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {"image-id": "fake"}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
vdi_rec = {"is_a_snapshot": False,
"other_config": {}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
instance = {"uuid": "fake"}
mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
vm_utils.remove_old_snapshots("session", instance, "vm_ref")
mock_delete.assert_called_once_with("session", instance,
["uuid1", "uuid2"], "sr_ref")
mock_get.assert_called_once_with("session", "vm_ref")
mock_walk.assert_called_once_with("session", "vdi")
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
instance = {"uuid": "fake"}
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid"], "sr")
self.assertFalse(mock_child.called)
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
instance = {"uuid": "fake"}
mock_child.return_value = []
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with("session", "sr", ["uuid2"],
old_snapshots_only=True)
@mock.patch.object(vm_utils, '_scan_sr')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
mock_destroy, mock_scan):
instance = {"uuid": "fake"}
mock_child.return_value = ["suuid1", "suuid2"]
session = mock.Mock()
session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
vm_utils._delete_snapshots_in_vdi_chain(session, instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with(session, "sr", ["uuid2"],
old_snapshots_only=True)
session.VDI.get_by_uuid.assert_has_calls([
mock.call("suuid1"), mock.call("suuid2")])
mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
mock_scan.assert_called_once_with(session, "sr")
class ResizeFunctionTestCase(test.NoDBTestCase):
def _call_get_resize_func_name(self, brand, version):
session = mock.Mock()
session.product_brand = brand
session.product_version = version
return vm_utils._get_resize_func_name(session)
def _test_is_resize(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize", result)
def _test_is_resize_online(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize_online", result)
def test_xenserver_5_5(self):
self._test_is_resize_online("XenServer", (5, 5, 0))
def test_xenserver_6_0(self):
self._test_is_resize("XenServer", (6, 0, 0))
def test_xcp_1_1(self):
self._test_is_resize_online("XCP", (1, 1, 0))
def test_xcp_1_2(self):
self._test_is_resize("XCP", (1, 2, 0))
def test_xcp_2_0(self):
self._test_is_resize("XCP", (2, 0, 0))
def test_random_brand(self):
self._test_is_resize("asfd", (1, 1, 0))
def test_default(self):
self._test_is_resize(None, None)
def test_empty(self):
self._test_is_resize("", "")
class VMInfoTests(VMUtilsTestBase):
def setUp(self):
super(VMInfoTests, self).setUp()
self.session = mock.Mock()
def test_get_power_state_valid(self):
# Save on test setup calls by having these simple tests in one method
self.session.call_xenapi.return_value = "Running"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.RUNNING)
self.session.call_xenapi.return_value = "Halted"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SHUTDOWN)
self.session.call_xenapi.return_value = "Paused"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.PAUSED)
self.session.call_xenapi.return_value = "Suspended"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SUSPENDED)
self.session.call_xenapi.return_value = "Crashed"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.CRASHED)
def test_get_power_state_invalid(self):
self.session.call_xenapi.return_value = "Invalid"
self.assertRaises(KeyError,
vm_utils.get_power_state, self.session, "ref")
_XAPI_record = {'power_state': 'Running',
'memory_static_max': str(10 << 10),
'memory_dynamic_max': str(9 << 10),
'VCPUs_max': '5'}
def test_compile_info(self):
def call_xenapi(method, *args):
if method.startswith('VM.get_') and args[0] == 'dummy':
return self._XAPI_record[method[7:]]
self.session.call_xenapi.side_effect = call_xenapi
info = vm_utils.compile_info(self.session, "dummy")
self.assertEqual(hardware.InstanceInfo(state=power_state.RUNNING,
max_mem_kb=10, mem_kb=9,
num_cpu='5', cpu_time_ns=0),
info)
| Juniper/nova | nova/tests/unit/virt/xenapi/test_vm_utils.py | Python | apache-2.0 | 98,282 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells.
This module provides a number of basic commonly used RNN cells, such as LSTM
(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
operators that allow adding dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
calling the `rnn` ops several times.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import numbers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import activations
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
# This can be used with self.assertRaisesRegexp for assert_like_rnncell.
ASSERT_LIKE_RNNCELL_ERROR_REGEXP = "is not an RNNCell"
def assert_like_rnncell(cell_name, cell):
"""Raises a TypeError if cell is not like an RNNCell.
NOTE: Do not rely on the error message (in particular in tests) which can be
subject to change to increase readability. Use
ASSERT_LIKE_RNNCELL_ERROR_REGEXP.
Args:
cell_name: A string to give a meaningful error referencing to the name
of the functionargument.
cell: The object which should behave like an RNNCell.
Raises:
TypeError: A human-friendly exception.
"""
conditions = [
hasattr(cell, "output_size"),
hasattr(cell, "state_size"),
hasattr(cell, "get_initial_state") or hasattr(cell, "zero_state"),
callable(cell),
]
errors = [
"'output_size' property is missing",
"'state_size' property is missing",
"either 'zero_state' or 'get_initial_state' method is required",
"is not callable"
]
if not all(conditions):
errors = [error for error, cond in zip(errors, conditions) if not cond]
raise TypeError("The argument {!r} ({}) is not an RNNCell: {}.".format(
cell_name, cell, ", ".join(errors)))
def _concat(prefix, suffix, static=False):
"""Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
"""
if isinstance(prefix, ops.Tensor):
p = prefix
p_static = tensor_util.constant_value(prefix)
if p.shape.ndims == 0:
p = array_ops.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError("prefix tensor must be either a scalar or vector, "
"but saw tensor: %s" % p)
else:
p = tensor_shape.as_shape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)
if p.is_fully_defined() else None)
if isinstance(suffix, ops.Tensor):
s = suffix
s_static = tensor_util.constant_value(suffix)
if s.shape.ndims == 0:
s = array_ops.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError("suffix tensor must be either a scalar or vector, "
"but saw tensor: %s" % s)
else:
s = tensor_shape.as_shape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)
if s.is_fully_defined() else None)
if static:
shape = tensor_shape.as_shape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError("Provided a prefix or suffix of None: %s and %s"
% (prefix, suffix))
shape = array_ops.concat((p, s), 0)
return shape
def _zero_state_tensors(state_size, batch_size, dtype):
"""Create tensors of zeros based on state_size, batch_size, and dtype."""
def get_state_shape(s):
"""Combine s with batch_size to get a proper tensor shape."""
c = _concat(batch_size, s)
size = array_ops.zeros(c, dtype=dtype)
if not context.executing_eagerly():
c_static = _concat(batch_size, s, static=True)
size.set_shape(c_static)
return size
return nest.map_structure(get_state_shape, state_size)
@tf_export("nn.rnn_cell.RNNCell")
class RNNCell(base_layer.Layer):
"""Abstract object representing an RNN cell.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`. The optional
third input argument, `scope`, is allowed for backwards compatibility
purposes; but should be left off for new subclasses.
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def __init__(self, trainable=True, name=None, dtype=None, **kwargs):
super(RNNCell, self).__init__(
trainable=trainable, name=name, dtype=dtype, **kwargs)
# Attribute that indicates whether the cell is a TF RNN cell, due the slight
# difference between TF and Keras RNN cell.
self._is_tf_rnn_cell = True
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size, s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
if scope is not None:
with vs.variable_scope(scope,
custom_getter=self._rnn_get_variable) as scope:
return super(RNNCell, self).__call__(inputs, state, scope=scope)
else:
scope_attrname = "rnncell_scope"
scope = getattr(self, scope_attrname, None)
if scope is None:
scope = vs.variable_scope(vs.get_variable_scope(),
custom_getter=self._rnn_get_variable)
setattr(self, scope_attrname, scope)
with scope:
return super(RNNCell, self).__call__(inputs, state)
def _rnn_get_variable(self, getter, *args, **kwargs):
variable = getter(*args, **kwargs)
if context.executing_eagerly():
trainable = variable._trainable # pylint: disable=protected-access
else:
trainable = (
variable in tf_variables.trainable_variables() or
(isinstance(variable, tf_variables.PartitionedVariable) and
list(variable)[0] in tf_variables.trainable_variables()))
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def build(self, _):
# This tells the parent Layer object that it's OK to call
# self.add_variable() inside the call() method.
pass
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if inputs is not None:
# Validate the given batch_size and dtype against inputs if provided.
inputs = ops.convert_to_tensor(inputs, name="inputs")
if batch_size is not None:
if tensor_util.is_tensor(batch_size):
static_batch_size = tensor_util.constant_value(
batch_size, partial=True)
else:
static_batch_size = batch_size
if inputs.shape.dims[0].value != static_batch_size:
raise ValueError(
"batch size from input tensor is different from the "
"input param. Input tensor batch: {}, batch_size: {}".format(
inputs.shape.dims[0].value, batch_size))
if dtype is not None and inputs.dtype != dtype:
raise ValueError(
"dtype from input tensor is different from the "
"input param. Input tensor dtype: {}, dtype: {}".format(
inputs.dtype, dtype))
batch_size = inputs.shape.dims[0].value or array_ops.shape(inputs)[0]
dtype = inputs.dtype
if None in [batch_size, dtype]:
raise ValueError(
"batch_size and dtype cannot be None while constructing initial "
"state: batch_size={}, dtype={}".format(batch_size, dtype))
return self.zero_state(batch_size, dtype)
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size, state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size, s]` for each s in `state_size`.
"""
# Try to use the last cached zero_state. This is done to avoid recreating
# zeros, especially when eager execution is enabled.
state_size = self.state_size
is_eager = context.executing_eagerly()
if is_eager and hasattr(self, "_last_zero_state"):
(last_state_size, last_batch_size, last_dtype,
last_output) = getattr(self, "_last_zero_state")
if (last_batch_size == batch_size and
last_dtype == dtype and
last_state_size == state_size):
return last_output
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
output = _zero_state_tensors(state_size, batch_size, dtype)
if is_eager:
self._last_zero_state = (state_size, batch_size, dtype, output)
return output
class LayerRNNCell(RNNCell):
"""Subclass of RNNCells that act like proper `tf.Layer` objects.
For backwards compatibility purposes, most `RNNCell` instances allow their
`call` methods to instantiate variables via `tf.get_variable`. The underlying
variable scope thus keeps track of any variables, and returning cached
versions. This is atypical of `tf.layer` objects, which separate this
part of layer building into a `build` method that is only called once.
Here we provide a subclass for `RNNCell` objects that act exactly as
`Layer` objects do. They must provide a `build` method and their
`call` methods do not access Variables `tf.get_variable`.
"""
def __call__(self, inputs, state, scope=None, *args, **kwargs):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size, s] for s in self.state_size`.
scope: optional cell scope.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
# Bypass RNNCell's variable capturing semantics for LayerRNNCell.
# Instead, it is up to subclasses to provide a proper build
# method. See the class docstring for more details.
return base_layer.Layer.__call__(self, inputs, state, scope=scope,
*args, **kwargs)
@tf_export(v1=["nn.rnn_cell.BasicRNNCell"])
class BasicRNNCell(LayerRNNCell):
"""The most basic RNN cell.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`. It could also be string
that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.SimpleRNNCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
super(BasicRNNCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn("%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
input_depth = inputs_shape[-1]
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, self._num_units])
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
self.built = True
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, state], 1), self._kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
return output, output
def get_config(self):
config = {
"num_units": self._num_units,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(BasicRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export(v1=["nn.rnn_cell.GRUCell"])
class GRUCell(LayerRNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnGRU` for better performance on GPU, or
`tf.contrib.rnn.GRUBlockCellV2` for better performance on CPU.
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.GRUCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None,
name=None,
dtype=None,
**kwargs):
super(GRUCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn("%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnGRU for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
self._kernel_initializer = initializers.get(kernel_initializer)
self._bias_initializer = initializers.get(bias_initializer)
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
input_depth = inputs_shape[-1]
self._gate_kernel = self.add_variable(
"gates/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, 2 * self._num_units],
initializer=self._kernel_initializer)
self._gate_bias = self.add_variable(
"gates/%s" % _BIAS_VARIABLE_NAME,
shape=[2 * self._num_units],
initializer=(
self._bias_initializer
if self._bias_initializer is not None
else init_ops.constant_initializer(1.0, dtype=self.dtype)))
self._candidate_kernel = self.add_variable(
"candidate/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, self._num_units],
initializer=self._kernel_initializer)
self._candidate_bias = self.add_variable(
"candidate/%s" % _BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=(
self._bias_initializer
if self._bias_initializer is not None
else init_ops.zeros_initializer(dtype=self.dtype)))
self.built = True
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, state], 1), self._gate_kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)
value = math_ops.sigmoid(gate_inputs)
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
candidate = math_ops.matmul(
array_ops.concat([inputs, r_state], 1), self._candidate_kernel)
candidate = nn_ops.bias_add(candidate, self._candidate_bias)
c = self._activation(candidate)
new_h = u * state + (1 - u) * c
return new_h, new_h
def get_config(self):
config = {
"num_units": self._num_units,
"kernel_initializer": initializers.serialize(self._kernel_initializer),
"bias_initializer": initializers.serialize(self._bias_initializer),
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
@tf_export("nn.rnn_cell.LSTMStateTuple")
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state
and `h` is the output.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if c.dtype != h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
@tf_export(v1=["nn.rnn_cell.BasicLSTMCell"])
class BasicLSTMCell(LayerRNNCell):
"""DEPRECATED: Please use `tf.nn.rnn_cell.LSTMCell` instead.
Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full `tf.nn.rnn_cell.LSTMCell`
that follows.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.LSTMCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self,
num_units,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
Must set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`. It
could also be string that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
When restoring from CudnnLSTM-trained checkpoints, must use
`CudnnCompatibleLSTMCell` instead.
"""
super(BasicLSTMCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn("%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
input_depth = inputs_shape[-1]
h_depth = self._num_units
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units])
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped
`[batch_size, num_units]`, if `state_is_tuple` has been set to
`True`. Otherwise, a `Tensor` shaped
`[batch_size, 2 * num_units]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
sigmoid = math_ops.sigmoid
one = constant_op.constant(1, dtype=dtypes.int32)
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one)
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, h], 1), self._kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(
value=gate_inputs, num_or_size_splits=4, axis=one)
forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)
# Note that using `add` and `multiply` instead of `+` and `*` gives a
# performance improvement. So using those at the cost of readability.
add = math_ops.add
multiply = math_ops.multiply
new_c = add(multiply(c, sigmoid(add(f, forget_bias_tensor))),
multiply(sigmoid(i), self._activation(j)))
new_h = multiply(self._activation(new_c), sigmoid(o))
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(BasicLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export(v1=["nn.rnn_cell.LSTMCell"])
class LSTMCell(LayerRNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf
Felix Gers, Jurgen Schmidhuber, and Fred Cummins.
"Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.LSTMCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self, num_units,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=None, num_proj_shards=None,
forget_bias=1.0, state_is_tuple=True,
activation=None, reuse=None, name=None, dtype=None, **kwargs):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training. Must set it manually to `0.0` when restoring from
CudnnLSTM trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`. It
could also be string that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
dtype: Default dtype of the layer (default of `None` means use the type
of the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
When restoring from CudnnLSTM-trained checkpoints, use
`CudnnCompatibleLSTMCell` instead.
"""
super(LSTMCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn("%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializers.get(initializer)
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% str(inputs_shape))
input_depth = inputs_shape[-1]
h_depth = self._num_units if self._num_proj is None else self._num_proj
maybe_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_unit_shards)
if self._num_unit_shards is not None
else None)
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units],
initializer=self._initializer,
partitioner=maybe_partitioner)
if self.dtype is None:
initializer = init_ops.zeros_initializer
else:
initializer = init_ops.zeros_initializer(dtype=self.dtype)
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=initializer)
if self._use_peepholes:
self._w_f_diag = self.add_variable("w_f_diag", shape=[self._num_units],
initializer=self._initializer)
self._w_i_diag = self.add_variable("w_i_diag", shape=[self._num_units],
initializer=self._initializer)
self._w_o_diag = self.add_variable("w_o_diag", shape=[self._num_units],
initializer=self._initializer)
if self._num_proj is not None:
maybe_proj_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_proj_shards)
if self._num_proj_shards is not None
else None)
self._proj_kernel = self.add_variable(
"projection/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[self._num_units, self._num_proj],
initializer=self._initializer,
partitioner=maybe_proj_partitioner)
self.built = True
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, must be 2-D, `[batch, input_size]`.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, [batch, state_size]`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
input_size = inputs.get_shape().with_rank(2).dims[1].value
if input_size is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = math_ops.matmul(
array_ops.concat([inputs, m_prev], 1), self._kernel)
lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev +
sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
m = math_ops.matmul(m, self._proj_kernel)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else
array_ops.concat([c, m], 1))
return m, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"use_peepholes": self._use_peepholes,
"cell_clip": self._cell_clip,
"initializer": initializers.serialize(self._initializer),
"num_proj": self._num_proj,
"proj_clip": self._proj_clip,
"num_unit_shards": self._num_unit_shards,
"num_proj_shards": self._num_proj_shards,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure_up_to(shallow_structure,
enumerated_fn, *args, **kwargs)
def _default_dropout_state_filter_visitor(substate):
if isinstance(substate, LSTMStateTuple):
# Do not perform dropout on the memory state.
return LSTMStateTuple(c=False, h=True)
elif isinstance(substate, tensor_array_ops.TensorArray):
return False
return True
@tf_export("nn.rnn_cell.DropoutWrapper")
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
state_keep_prob=1.0, variational_recurrent=False,
input_size=None, dtype=None, seed=None,
dropout_state_filter_visitor=None):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the same dropout mask is applied at every step, as described in:
Y. Gal, Z Ghahramani. "A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks". https://arxiv.org/abs/1512.05287
Otherwise a different dropout mask is applied at every time step.
Note, by default (unless a custom `dropout_state_filter` is provided),
the memory state (`c` component of any `LSTMStateTuple`) passing through
a `DropoutWrapper` is never modified. This behavior is described in the
above article.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the outgoing states of the cell.
**Note** the state components to which dropout is applied when
`state_keep_prob` is in `(0, 1)` are also determined by
the argument `dropout_state_filter_visitor` (e.g. by default dropout
is never applied to the `c` component of an `LSTMStateTuple`).
variational_recurrent: Python bool. If `True`, then the same
dropout pattern is applied across all time steps per run call.
If this parameter is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff**
`variational_recurrent = True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
dropout_state_filter_visitor: (optional), default: (see below). Function
that takes any hierarchical level of the state and returns
a scalar or depth=1 structure of Python booleans describing
which terms in the state should be dropped out. In addition, if the
function returns `True`, dropout is applied across this sublevel. If
the function returns `False`, dropout is not applied across this entire
sublevel.
Default behavior: perform dropout on all terms except the memory (`c`)
state of `LSTMCellState` objects, and don't try to apply dropout to
`TensorArray` objects:
```
def dropout_state_filter_visitor(s):
if isinstance(s, LSTMCellState):
# Never perform dropout on the c state.
return LSTMCellState(c=False, h=True)
elif isinstance(s, TensorArray):
return False
return True
```
Raises:
TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
but not `callable`.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
super(DropoutWrapper, self).__init__()
assert_like_rnncell("cell", cell)
if (dropout_state_filter_visitor is not None
and not callable(dropout_state_filter_visitor)):
raise TypeError("dropout_state_filter_visitor must be callable")
self._dropout_state_filter = (
dropout_state_filter_visitor or _default_dropout_state_filter_visitor)
with ops.name_scope("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = ops.convert_to_tensor(v)
const_value = tensor_util.constant_value(tensor_value)
return (tensor_value, const_value)
for prob, attr in [(input_keep_prob, "input_keep_prob"),
(state_keep_prob, "state_keep_prob"),
(output_keep_prob, "output_keep_prob")]:
tensor_prob, const_prob = tensor_and_const_value(prob)
if const_prob is not None:
if const_prob < 0 or const_prob > 1:
raise ValueError("Parameter %s must be between 0 and 1: %d"
% (attr, const_prob))
setattr(self, "_%s" % attr, float(const_prob))
else:
setattr(self, "_%s" % attr, tensor_prob)
# Set cell, variational_recurrent, seed before running the code below
self._cell = cell
if isinstance(cell, checkpointable.CheckpointableBase):
self._track_checkpointable(self._cell, name="cell")
self._variational_recurrent = variational_recurrent
self._seed = seed
self._recurrent_input_noise = None
self._recurrent_state_noise = None
self._recurrent_output_noise = None
if variational_recurrent:
if dtype is None:
raise ValueError(
"When variational_recurrent=True, dtype must be provided")
def convert_to_batch_shape(s):
# Prepend a 1 for the batch dimension; for recurrent
# variational dropout we use the same dropout mask for all
# batch elements.
return array_ops.concat(
([1], tensor_shape.TensorShape(s).as_list()), 0)
def batch_noise(s, inner_seed):
shape = convert_to_batch_shape(s)
return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype)
if (not isinstance(self._input_keep_prob, numbers.Real) or
self._input_keep_prob < 1.0):
if input_size is None:
raise ValueError(
"When variational_recurrent=True and input_keep_prob < 1.0 or "
"is unknown, input_size must be provided")
self._recurrent_input_noise = _enumerated_map_structure_up_to(
input_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)),
input_size)
self._recurrent_state_noise = _enumerated_map_structure_up_to(
cell.state_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)),
cell.state_size)
self._recurrent_output_noise = _enumerated_map_structure_up_to(
cell.output_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)),
cell.output_size)
def _gen_seed(self, salt_prefix, index):
if self._seed is None:
return None
salt = "%s_%d" % (salt_prefix, index)
string = (str(self._seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
@property
def wrapped_cell(self):
return self._cell
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _variational_recurrent_dropout_value(
self, index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.div(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob,
shallow_filtered_substructure=None):
"""Decides whether to perform standard dropout or recurrent dropout."""
if shallow_filtered_substructure is None:
# Put something so we traverse the entire structure; inside the
# dropout function we check to see if leafs of this are bool or not.
shallow_filtered_substructure = values
if not self._variational_recurrent:
def dropout(i, do_dropout, v):
if not isinstance(do_dropout, bool) or do_dropout:
return nn_ops.dropout(
v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i))
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values])
else:
def dropout(i, do_dropout, v, n):
if not isinstance(do_dropout, bool) or do_dropout:
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values, recurrent_noise])
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input",
self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = self._cell(inputs, state, scope=scope)
if _should_dropout(self._state_keep_prob):
# Identify which subsets of the state to perform dropout on and
# which ones to keep.
shallow_filtered_substructure = nest.get_traverse_shallow_structure(
self._dropout_state_filter, new_state)
new_state = self._dropout(new_state, "state",
self._recurrent_state_noise,
self._state_keep_prob,
shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output",
self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
@tf_export("nn.rnn_cell.ResidualWrapper")
class ResidualWrapper(RNNCell):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell, residual_fn=None):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
"""
super(ResidualWrapper, self).__init__()
self._cell = cell
if isinstance(cell, checkpointable.CheckpointableBase):
self._track_checkpointable(self._cell, name="cell")
self._residual_fn = residual_fn
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
"""Run the cell and then apply the residual_fn on its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
def default_residual_fn(inputs, outputs):
nest.assert_same_structure(inputs, outputs)
nest.map_structure(assert_shape_match, inputs, outputs)
return nest.map_structure(lambda inp, out: inp + out, inputs, outputs)
res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)
return (res_outputs, new_state)
@tf_export("nn.rnn_cell.DeviceWrapper")
class DeviceWrapper(RNNCell):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
"""
super(DeviceWrapper, self).__init__()
self._cell = cell
if isinstance(cell, checkpointable.CheckpointableBase):
self._track_checkpointable(self._cell, name="cell")
self._device = device
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
with ops.device(self._device):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
"""Run the cell on specified device."""
with ops.device(self._device):
return self._cell(inputs, state, scope=scope)
@tf_export(v1=["nn.rnn_cell.MultiRNNCell"])
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells.
Example:
```python
num_units = [128, 64]
cells = [BasicLSTMCell(num_units=n) for n in num_units]
stacked_rnn_cell = MultiRNNCell(cells)
```
"""
@deprecated(None, "This class is equivalent as "
"tf.keras.layers.StackedRNNCells, and will be replaced by "
"that in Tensorflow 2.0.")
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
super(MultiRNNCell, self).__init__()
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
if not nest.is_sequence(cells):
raise TypeError(
"cells must be a list or tuple, but saw: %s." % cells)
if len(set([id(cell) for cell in cells])) < len(cells):
logging.log_first_n(logging.WARN,
"At least two cells provided to MultiRNNCell "
"are the same object and will share weights.", 1)
self._cells = cells
for cell_number, cell in enumerate(self._cells):
# Add Checkpointable dependencies on these cells so their variables get
# saved with this object when using object-based saving.
if isinstance(cell, checkpointable.CheckpointableBase):
# TODO(allenl): Track down non-Checkpointable callers.
self._track_checkpointable(cell, name="cell-%d" % (cell_number,))
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum(cell.state_size for cell in self._cells)
@property
def output_size(self):
return self._cells[-1].output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._state_is_tuple:
return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells)
else:
# We know here that state_size of each cell is not a tuple and
# presumably does not contain TensorArrays or anything else fancy
return super(MultiRNNCell, self).zero_state(batch_size, dtype)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def call(self, inputs, state):
"""Run this multi-layer cell on inputs, starting from state."""
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s" %
(len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(state, [0, cur_state_pos],
[-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
array_ops.concat(new_states, 1))
return cur_inp, new_states
| asimshankar/tensorflow | tensorflow/python/ops/rnn_cell_impl.py | Python | apache-2.0 | 61,366 |
Subsets and Splits
Unique Repositories with URLs
Lists unique repository names along with their GitHub URLs, providing basic identification information for each repository.