content
stringlengths
10
4.9M
<reponame>adamcvj/SatelliteTracker #------------------------------------------------------------------------------ # # Copyright (c) 2005, Enthought, Inc. # All rights reserved. # # This software is provided without warranty under the terms of the BSD # license included in enthought/LICENSE.txt and may be redistributed only # under the conditions described in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! # # Author: Enthought, Inc. # #------------------------------------------------------------------------------ """ Enthought pyface package component """ # Standard library imports. import os import tempfile from six.moves import cStringIO as StringIO # Major package imports. import wx # Enthought library imports. from pyface.resource.api import ResourceFactory from traits.api import Undefined class PyfaceResourceFactory(ResourceFactory): """ The implementation of a shared resource manager. """ ########################################################################### # 'ResourceFactory' toolkit interface. ########################################################################### def image_from_file(self, filename): """ Creates an image from the data in the specified filename. """ # N.B 'wx.BITMAP_TYPE_ANY' tells wxPython to attempt to autodetect the # --- image format. return wx.Image(filename, wx.BITMAP_TYPE_ANY) def image_from_data(self, data, filename=None): """ Creates an image from the specified data. """ try: return wx.ImageFromStream(StringIO(data)) except: # wx.ImageFromStream is only in wx 2.8 or later(?) if filename is Undefined: return None handle = None if filename is None: # If there is currently no way in wx to create an image from data, # we have write it out to a temporary file and then read it back in: handle, filename = tempfile.mkstemp() # Write it out... tf = open(filename, 'wb') tf.write(data) tf.close() # ... and read it back in! Lovely 8^() image = wx.Image(filename, wx.BITMAP_TYPE_ANY) # Remove the temporary file. if handle is not None: os.close(handle) os.unlink(filename) return image #### EOF ######################################################################
Prince Albert made a public apology for the role of the Monegasque police in the Second World War during visit to a Monaco cemetery on Thursday. Here, Albert unveiled a memorial for those who were deported out of Monaco to Nazi concentration camps during WWII. During a speech at the unveiling, the Prince took the opportunity to apologise on behalf of Monaco for its involvement during the war. “We committed the irreparable in handing over to the neighbouring authorities women, men and a child who had taken refuge with us to escape the persecutions they had suffered in France. In distress, they came specifically to take shelter with us thinking they would find neutrality.” Throughout the war, Monaco remained politically neutral but were pressured by neighbouring Italy. This led the Monegasque police to round-up those who had escaped France, having come to Monaco hoping to find safety, and send them to concentration camps. A total of 90 people were deported, including at least 66 Jews. Only nine survived their time in the camps. Thursday’s speech marks the first time that the small Principality has publicly acknowledged its actions in the persecution of Jews during the war. Albert also announced the Monaco government’s plans to provide compensation for property of some of those who had been seized. The Prince’s sentiments were welcomed by Dr. Moshe Kantor, president of the European Jewish Congress, in a statement to The Associated Press saying, “there is no time limit on true introspection and regret.” Photo: Eirik Solheim
def button_empty(self, timeout=5): if not self.dev_mode: state = [0] while len(state) > 0: state = self.device.read(NUM_KEYS+1, timeout_ms=timeout) elif self.dev_mode: self.dev_button_empty()
<reponame>ruizhaoz1/Neuron-Android package com.cryptape.cita_wallet.event; import com.cryptape.cita_wallet.item.App; public class AppHistoryEvent { public App app; public AppHistoryEvent(App app, long timestamp) { this.app = app; this.app.timestamp = timestamp; } }
// // Load up the unit list from the current config, optionally selecting an initial // unit id. // tCIDLib::TVoid TZWaveLevi2CWnd::LoadUnits() { tCIDLib::TStrList colCols(m_c4ColInd_Name + 1); for (tCIDLib::TCard4 c4Index = 0; c4Index <= m_c4ColInd_Name; c4Index++) colCols.objAdd(TString::strEmpty()); TWndPaintJanitor janPaint(m_pwndUnits); m_pwndUnits->RemoveAll(); TString strFmt; const tCIDLib::TCard4 c4UnitCnt = m_dcfgCur.c4UnitCnt(); if (c4UnitCnt) { for (tCIDLib::TCard4 c4Index = 0; c4Index < c4UnitCnt; c4Index++) { const TZWaveUnit& unitCur = m_dcfgCur.unitAt(c4Index); colCols[m_c4ColInd_Enabled] = facCQCKit().strBoolYesNo(!unitCur.bDisabled()); colCols[m_c4ColInd_Id] = L"0x"; if (unitCur.c4Id() < 10) colCols[m_c4ColInd_Id].Append(L'0'); colCols[m_c4ColInd_Id].AppendFormatted(unitCur.c4Id(), tCIDLib::ERadices::Hex); if (unitCur.bMissing()) colCols[m_c4ColInd_Status] = L"Missing"; else if (unitCur.bFailed()) colCols[m_c4ColInd_Status] = L"Failed"; else colCols[m_c4ColInd_Status] = L"Online"; colCols[m_c4ColInd_BasicType] = tZWaveLevi2Sh::strXlatEGenTypes(unitCur.eGenType()); strFmt = unitCur.strMake(); if (!strFmt.bIsEmpty()) { strFmt.Append(kCIDLib::chForwardSlash); strFmt.Append(unitCur.strModel()); } colCols[m_c4ColInd_MakeModel] = strFmt; colCols[m_c4ColInd_Name] = unitCur.strName(); m_pwndUnits->c4AddItem(colCols, unitCur.c4Id()); } m_pwndUnits->SelectByIndex(0, kCIDLib::True); } }
import mock import os import pandas as pd from datetime import datetime from flexmock import flexmock from sportsreference import utils from sportsreference.constants import HOME from sportsreference.nba.constants import BOXSCORE_URL, BOXSCORES_URL from sportsreference.nba.boxscore import Boxscore, Boxscores MONTH = 10 YEAR = 2017 BOXSCORE = '201710310LAL' def read_file(filename): filepath = os.path.join(os.path.dirname(__file__), 'nba', filename) return open('%s' % filepath, 'r', encoding='utf8').read() def mock_pyquery(url): class MockPQ: def __init__(self, html_contents): self.status_code = 200 self.html_contents = html_contents self.text = html_contents if url == BOXSCORES_URL % (2, 4, YEAR): return MockPQ(read_file('boxscores-2-4-2017.html')) if url == BOXSCORES_URL % (2, 5, YEAR): return MockPQ(read_file('boxscores-2-5-2017.html')) boxscore = read_file('%s.html' % BOXSCORE) return MockPQ(boxscore) class MockDateTime: def __init__(self, year, month): self.year = year self.month = month class TestNBABoxscore: @mock.patch('requests.get', side_effect=mock_pyquery) def setup_method(self, *args, **kwargs): self.results = { 'date': '10:30 PM, October 31, 2017', 'location': 'STAPLES Center, Los Angeles, California', 'winner': HOME, 'winning_name': 'Los Angeles Lakers', 'winning_abbr': 'LAL', 'losing_name': '<NAME>', 'losing_abbr': 'DET', 'pace': 97.4, 'away_wins': 5, 'away_losses': 3, 'away_minutes_played': 240, 'away_field_goals': 41, 'away_field_goal_attempts': 94, 'away_field_goal_percentage': .436, 'away_two_point_field_goals': 31, 'away_two_point_field_goal_attempts': 61, 'away_two_point_field_goal_percentage': .508, 'away_three_point_field_goals': 10, 'away_three_point_field_goal_attempts': 33, 'away_three_point_field_goal_percentage': .303, 'away_free_throws': 1, 'away_free_throw_attempts': 3, 'away_free_throw_percentage': .333, 'away_offensive_rebounds': 10, 'away_defensive_rebounds': 34, 'away_total_rebounds': 44, 'away_assists': 21, 'away_steals': 7, 'away_blocks': 3, 'away_turnovers': 12, 'away_personal_fouls': 11, 'away_points': 93, 'away_true_shooting_percentage': .488, 'away_effective_field_goal_percentage': .489, 'away_three_point_attempt_rate': .351, 'away_free_throw_attempt_rate': .032, 'away_offensive_rebound_percentage': 19.2, 'away_defensive_rebound_percentage': 75.6, 'away_total_rebound_percentage': 45.4, 'away_assist_percentage': 51.2, 'away_steal_percentage': 7.2, 'away_block_percentage': 4.6, 'away_turnover_percentage': 11.2, 'away_offensive_rating': 95.5, 'away_defensive_rating': 116.0, 'home_wins': 3, 'home_losses': 4, 'home_minutes_played': 240, 'home_field_goals': 45, 'home_field_goal_attempts': 91, 'home_field_goal_percentage': .495, 'home_two_point_field_goals': 33, 'home_two_point_field_goal_attempts': 65, 'home_two_point_field_goal_percentage': .508, 'home_three_point_field_goals': 12, 'home_three_point_field_goal_attempts': 26, 'home_three_point_field_goal_percentage': .462, 'home_free_throws': 11, 'home_free_throw_attempts': 14, 'home_free_throw_percentage': .786, 'home_offensive_rebounds': 11, 'home_defensive_rebounds': 42, 'home_total_rebounds': 53, 'home_assists': 30, 'home_steals': 9, 'home_blocks': 5, 'home_turnovers': 14, 'home_personal_fouls': 14, 'home_points': 113, 'home_true_shooting_percentage': .582, 'home_effective_field_goal_percentage': .560, 'home_three_point_attempt_rate': .286, 'home_free_throw_attempt_rate': .154, 'home_offensive_rebound_percentage': 24.4, 'home_defensive_rebound_percentage': 80.8, 'home_total_rebound_percentage': 54.6, 'home_assist_percentage': 66.7, 'home_steal_percentage': 9.2, 'home_block_percentage': 8.2, 'home_turnover_percentage': 12.6, 'home_offensive_rating': 116.0, 'home_defensive_rating': 95.5 } flexmock(utils) \ .should_receive('_todays_date') \ .and_return(MockDateTime(YEAR, MONTH)) self.boxscore = Boxscore(BOXSCORE) def test_nba_boxscore_returns_requested_boxscore(self): for attribute, value in self.results.items(): assert getattr(self.boxscore, attribute) == value assert getattr(self.boxscore, 'summary') == { 'away': [22, 23, 27, 21], 'home': [25, 33, 29, 26] } def test_invalid_url_yields_empty_class(self): flexmock(Boxscore) \ .should_receive('_retrieve_html_page') \ .and_return(None) boxscore = Boxscore(BOXSCORE) for key, value in boxscore.__dict__.items(): if key == '_uri': continue assert value is None def test_nba_boxscore_dataframe_returns_dataframe_of_all_values(self): df = pd.DataFrame([self.results], index=[BOXSCORE]) # Pandas doesn't natively allow comparisons of DataFrames. # Concatenating the two DataFrames (the one generated during the test # and the expected one above) and dropping duplicate rows leaves only # the rows that are unique between the two frames. This allows a quick # check of the DataFrame to see if it is empty - if so, all rows are # duplicates, and they are equal. frames = [df, self.boxscore.dataframe] df1 = pd.concat(frames).drop_duplicates(keep=False) assert df1.empty def test_nba_boxscore_players(self): assert len(self.boxscore.home_players) == 13 assert len(self.boxscore.away_players) == 13 for player in self.boxscore.home_players: assert not player.dataframe.empty for player in self.boxscore.away_players: assert not player.dataframe.empty def test_nba_boxscore_string_representation(self): expected = ('Boxscore for Detroit Pistons at Los Angeles Lakers ' '(10:30 PM, October 31, 2017)') boxscore = Boxscore(BOXSCORE) assert boxscore.__repr__() == expected class TestNBABoxscores: def setup_method(self): self.expected = { '2-4-2017': [ {'home_name': 'Atlanta', 'home_abbr': 'ATL', 'home_score': 113, 'boxscore': '201702040ATL', 'away_name': 'Orlando', 'away_abbr': 'ORL', 'away_score': 86, 'winning_name': 'Atlanta', 'winning_abbr': 'ATL', 'losing_name': 'Orlando', 'losing_abbr': 'ORL'}, {'home_name': 'Indiana', 'home_abbr': 'IND', 'home_score': 105, 'boxscore': '201702040IND', 'away_name': 'Detroit', 'away_abbr': 'DET', 'away_score': 84, 'winning_name': 'Indiana', 'winning_abbr': 'IND', 'losing_name': 'Detroit', 'losing_abbr': 'DET'}, {'home_name': 'Miami', 'home_abbr': 'MIA', 'home_score': 125, 'boxscore': '201702040MIA', 'away_name': 'Philadelphia', 'away_abbr': 'PHI', 'away_score': 102, 'winning_name': 'Miami', 'winning_abbr': 'MIA', 'losing_name': 'Philadelphia', 'losing_abbr': 'PHI'}, {'home_name': 'Minnesota', 'home_abbr': 'MIN', 'home_score': 99, 'boxscore': '201702040MIN', 'away_name': 'Memphis', 'away_abbr': 'MEM', 'away_score': 107, 'winning_name': 'Memphis', 'winning_abbr': 'MEM', 'losing_name': 'Minnesota', 'losing_abbr': 'MIN'}, {'home_name': 'New York', 'home_abbr': 'NYK', 'home_score': 104, 'boxscore': '201702040NYK', 'away_name': 'Cleveland', 'away_abbr': 'CLE', 'away_score': 111, 'winning_name': 'Cleveland', 'winning_abbr': 'CLE', 'losing_name': 'New York', 'losing_abbr': 'NYK'}, {'home_name': 'Phoenix', 'home_abbr': 'PHO', 'home_score': 112, 'boxscore': '201702040PHO', 'away_name': 'Milwaukee', 'away_abbr': 'MIL', 'away_score': 137, 'winning_name': 'Milwaukee', 'winning_abbr': 'MIL', 'losing_name': 'Phoenix', 'losing_abbr': 'PHO'}, {'home_name': 'Sacramento', 'home_abbr': 'SAC', 'home_score': 109, 'boxscore': '201702040SAC', 'away_name': '<NAME>', 'away_abbr': 'GSW', 'away_score': 106, 'winning_name': 'Sacramento', 'winning_abbr': 'SAC', 'losing_name': 'Golden State', 'losing_abbr': 'GSW'}, {'home_name': '<NAME>', 'home_abbr': 'SAS', 'home_score': 121, 'boxscore': '201702040SAS', 'away_name': 'Denver', 'away_abbr': 'DEN', 'away_score': 97, 'winning_name': '<NAME>', 'winning_abbr': 'SAS', 'losing_name': 'Denver', 'losing_abbr': 'DEN'}, {'home_name': 'Utah', 'home_abbr': 'UTA', 'home_score': 105, 'boxscore': '201702040UTA', 'away_name': 'Charlotte', 'away_abbr': 'CHO', 'away_score': 98, 'winning_name': 'Utah', 'winning_abbr': 'UTA', 'losing_name': 'Charlotte', 'losing_abbr': 'CHO'}, {'home_name': 'Washington', 'home_abbr': 'WAS', 'home_score': 105, 'boxscore': '201702040WAS', 'away_name': '<NAME>', 'away_abbr': 'NOP', 'away_score': 91, 'winning_name': 'Washington', 'winning_abbr': 'WAS', 'losing_name': '<NAME>', 'losing_abbr': 'NOP'}, ] } @mock.patch('requests.get', side_effect=mock_pyquery) def test_boxscores_search(self, *args, **kwargs): result = Boxscores(datetime(2017, 2, 4)).games assert result == self.expected @mock.patch('requests.get', side_effect=mock_pyquery) def test_boxscores_search_invalid_end(self, *args, **kwargs): result = Boxscores(datetime(2017, 2, 4), datetime(2017, 2, 3)).games assert result == self.expected @mock.patch('requests.get', side_effect=mock_pyquery) def test_boxscores_search_multiple_days(self, *args, **kwargs): expected = { '2-4-2017': [ {'boxscore': '201702040ATL', 'away_name': 'Orlando', 'away_abbr': 'ORL', 'away_score': 86, 'home_name': 'Atlanta', 'home_abbr': 'ATL', 'home_score': 113, 'winning_name': 'Atlanta', 'winning_abbr': 'ATL', 'losing_name': 'Orlando', 'losing_abbr': 'ORL'}, {'boxscore': '201702040IND', 'away_name': 'Detroit', 'away_abbr': 'DET', 'away_score': 84, 'home_name': 'Indiana', 'home_abbr': 'IND', 'home_score': 105, 'winning_name': 'Indiana', 'winning_abbr': 'IND', 'losing_name': 'Detroit', 'losing_abbr': 'DET'}, {'boxscore': '201702040MIA', 'away_name': 'Philadelphia', 'away_abbr': 'PHI', 'away_score': 102, 'home_name': 'Miami', 'home_abbr': 'MIA', 'home_score': 125, 'winning_name': 'Miami', 'winning_abbr': 'MIA', 'losing_name': 'Philadelphia', 'losing_abbr': 'PHI'}, {'boxscore': '201702040MIN', 'away_name': 'Memphis', 'away_abbr': 'MEM', 'away_score': 107, 'home_name': 'Minnesota', 'home_abbr': 'MIN', 'home_score': 99, 'winning_name': 'Memphis', 'winning_abbr': 'MEM', 'losing_name': 'Minnesota', 'losing_abbr': 'MIN'}, {'boxscore': '201702040NYK', 'away_name': 'Cleveland', 'away_abbr': 'CLE', 'away_score': 111, 'home_name': 'New York', 'home_abbr': 'NYK', 'home_score': 104, 'winning_name': 'Cleveland', 'winning_abbr': 'CLE', 'losing_name': 'New York', 'losing_abbr': 'NYK'}, {'boxscore': '201702040PHO', 'away_name': 'Milwaukee', 'away_abbr': 'MIL', 'away_score': 137, 'home_name': 'Phoenix', 'home_abbr': 'PHO', 'home_score': 112, 'winning_name': 'Milwaukee', 'winning_abbr': 'MIL', 'losing_name': 'Phoenix', 'losing_abbr': 'PHO'}, {'boxscore': '201702040SAC', 'away_name': '<NAME>', 'away_abbr': 'GSW', 'away_score': 106, 'home_name': 'Sacramento', 'home_abbr': 'SAC', 'home_score': 109, 'winning_name': 'Sacramento', 'winning_abbr': 'SAC', 'losing_name': '<NAME>', 'losing_abbr': 'GSW'}, {'boxscore': '201702040SAS', 'away_name': 'Denver', 'away_abbr': 'DEN', 'away_score': 97, 'home_name': '<NAME>', 'home_abbr': 'SAS', 'home_score': 121, 'winning_name': '<NAME>', 'winning_abbr': 'SAS', 'losing_name': 'Denver', 'losing_abbr': 'DEN'}, {'boxscore': '201702040UTA', 'away_name': 'Charlotte', 'away_abbr': 'CHO', 'away_score': 98, 'home_name': 'Utah', 'home_abbr': 'UTA', 'home_score': 105, 'winning_name': 'Utah', 'winning_abbr': 'UTA', 'losing_name': 'Charlotte', 'losing_abbr': 'CHO'}, {'boxscore': '201702040WAS', 'away_name': '<NAME>', 'away_abbr': 'NOP', 'away_score': 91, 'home_name': 'Washington', 'home_abbr': 'WAS', 'home_score': 105, 'winning_name': 'Washington', 'winning_abbr': 'WAS', 'losing_name': '<NAME>', 'losing_abbr': 'NOP'} ], '2-5-2017': [ {'boxscore': '201702050BOS', 'away_name': '<NAME>', 'away_abbr': 'LAC', 'away_score': 102, 'home_name': 'Boston', 'home_abbr': 'BOS', 'home_score': 107, 'winning_name': 'Boston', 'winning_abbr': 'BOS', 'losing_name': '<NAME>', 'losing_abbr': 'LAC'}, {'boxscore': '201702050BRK', 'away_name': 'Toronto', 'away_abbr': 'TOR', 'away_score': 103, 'home_name': 'Brooklyn', 'home_abbr': 'BRK', 'home_score': 95, 'winning_name': 'Toronto', 'winning_abbr': 'TOR', 'losing_name': 'Brooklyn', 'losing_abbr': 'BRK'}, {'boxscore': '201702050OKC', 'away_name': 'Portland', 'away_abbr': 'POR', 'away_score': 99, 'home_name': 'Oklahoma City', 'home_abbr': 'OKC', 'home_score': 105, 'winning_name': 'Oklahoma City', 'winning_abbr': 'OKC', 'losing_name': 'Portland', 'losing_abbr': 'POR'} ] } result = Boxscores(datetime(2017, 2, 4), datetime(2017, 2, 5)).games assert result == expected @mock.patch('requests.get', side_effect=mock_pyquery) def test_boxscores_search_string_representation(self, *args, **kwargs): result = Boxscores(datetime(2017, 2, 4)) assert result.__repr__() == 'NBA games for 2-4-2017'
package octopus import ( "testing" ) /* * Integration tests */ // Get machine by Id (successful). func Test_Client_GetMachine_Success(test *testing.T) { testClientRequest(test, &ClientTest{ APIKey: "my-test-api-key", Request: func(test *testing.T, client *Client) { machine, err := client.GetMachine("Machines-1") if err != nil { test.Fatal(err) } verifyGetMachineTestResponse(test, machine) }, Respond: testRespondOK(getMachineTestResponse), }) } /* * Test responses. */ const getMachineTestResponse = ` { "Id": "Machines-1", "Name": "my-server.lab.au.test.cloud", "Thumbprint": "B3092FEA722388E326CFF4D2F6E124B5727AEEDC", "Uri": "https://10.110.21.15:10933/", "IsDisabled": false, "EnvironmentIds": [ "Environments-1", "Environments-2" ], "Roles": [ "auditing-db", "identity-db", "provisioning-db", "semantic-logger" ], "Status": "NeedsUpgrade", "HasLatestCalamari": true, "StatusSummary": "This machine is running an old version of Tentacle (3.2.19).", "Endpoint": { "CommunicationStyle": "TentaclePassive", "Uri": "https://10.110.21.15:10933/", "Thumbprint": "B3092FEA722388E326CFF4D2F6E124B5727AEEDC", "TentacleVersionDetails": { "UpgradeLocked": false, "Version": "3.2.19", "UpgradeSuggested": true, "UpgradeRequired": false }, "Id": null, "LastModifiedOn": null, "LastModifiedBy": null, "Links": {} }, "Links": { "Self": "/api/machines/Machines-1", "Connection": "/api/machines/Machines-1/connection" } } ` func verifyGetMachineTestResponse(test *testing.T, machine *Machine) { expect := expect(test) expect.NotNil("Machine", machine) expect.EqualsString("Machine.ID", "Machines-1", machine.ID) expect.EqualsString("Machine.URI", "https://10.110.21.15:10933/", machine.URI) expect.EqualsString("Machine.Thumbprint", "B3092FEA722388E326CFF4D2F6E124B5727AEEDC", machine.Thumbprint) }
/** * @Author: Kooo * @Date: Created in 2018/9/2 * @Modified By: * @Decription: */ @SpringBootApplication @EnableZuulProxy @EnableEurekaClient @EnableDiscoveryClient public class ApigatewayApplication { public static void main(String[] args) { SpringApplication.run(ApigatewayApplication.class, args); } }
def plot_equity_and_debt(self, additional_monthly_payment): results, _ = self.calculate_all(additional_monthly_payment) fig, ax = plt.subplots(figsize=(10,5)) years = [month / 12.0 for month in results['months']] ax.plot(years, results['debts'], label='Debt', color='r') ax.plot(years, results['values'], label='Property value', color='b') ax.plot(years, results['equities'], label='Equity', color='g') ax.fill_between( years, scipy.zeros(len(results['debts'])), results['debts'], facecolor='r', alpha=0.3) ax.fill_between(years, scipy.zeros(len(results['values'])), results['values'], facecolor='b', alpha=0.15) ax.fill_between( years, scipy.zeros(len(results['equities'])), results['equities'], facecolor='g', alpha=0.3) ax.legend(loc='upper left') ax.set(xlabel='Years', ylabel='Value [$]', title='Equity and debt by month') ax.grid() plt.show()
import { AzureRequest } from './azure-request'; import { AzureReply } from './azure-reply'; export function createHandlerAdapter(handler) { return context => { context.res = context.res || {}; const req = new AzureRequest(context); const res = new AzureReply(context); handler(req, res); }; }
<reponame>cloew/kanji-stroke-scraper from distutils.core import setup setup(name='kanji_stroke_scraper', version='0.3.01', #description='Kao Tessur Deck Package', author='<NAME>', author_email='<EMAIL>', #url='http://www.python.org/sigs/distutils-sig/', packages=['kanji_stroke_scraper'], install_requires=['argparse', 'backoff', 'pyperclip', 'requests-html', 'cached-property'], scripts=['scripts/scrapekanjidiagram'] )
class OmnivoxSemester: """ Represents a semester. """ def __init__(self, semester_id: str, semester_name: str, current: bool): """ Initializes a semester instance. :param semester_id: The ID of the semester. The format is usually Year(+)Index, e.g. 20181 :param semester_name: The name of the semester. Example: Fall 2018 :param current: Whether this semester is the current semester. """ self.id = semester_id self.name = semester_name self.current = current def __repr__(self) -> str: return f"Semester(id={self.name}, name={self.id}, current={self.current})"
Transcatheter closure of very large secundum atrial septal defects: long-term follow-up study Aims To evaluate the efficacy and safety of transcatheter closure of very large secundum atrial septal defects (ASD) with long-term follow-up results. Methods and results From June, 2001 to June, 2008, 74 patients diagnosed as secundum atrial septal defect with a diameter more than 30 mm were enrolled in the study. Percutaneous closure of ASDs were attempted by using the Domestic Shanghai ASD-O guided by transthoracic echocardiography (TTE). For the group of the 74 patients, 68 of them are adults. The closure was successful in 71/74 patients (96%). The mean diameter of the device was 38.2±2.73 mm. During the follow-up, complete closure could be achieved in 100% of patients. 7 adverse events (9.9%) were reported. Most adverse events were categorised as minor and there was one major adverse events (1.4%). Conclusion Transcatheter device closure is an effective management option for patients with very large ASD. Serious complications were rare.
/// Enable the memory pattern optimization /// /// The idea is if the input shapes are the same, we could trace the internal memory allocation and generate a memory pattern for future request. So next time we could just do one allocation with a big chunk for all the internal memory allocation /// /// Note: Memory pattern optimization is only available when Sequential Execution mode is enabled pub fn mem_pattern_enabled(self, mem_pattern_enabled: bool) -> Result<SessionBuilder> { if mem_pattern_enabled { let status = call_ort!(EnableMemPattern, self.session_options_ptr); check_status(status)?; } Ok(self) }
from gyb_syntax_support import SYNTAX_TOKEN_MAP, create_node_map, SYNTAX_NODES from gyb_syntax_support.kinds import SYNTAX_BASE_KINDS from gyb_syntax_support.kinds import lowercase_first_word from ExpressibleAsConformances import SYNTAX_BUILDABLE_EXPRESSIBLE_AS_CONFORMANCES from utils import flat_documentation class SyntaxBuildableChild: """ Wrapper around the `Child` type defined in `gyb_syntax_support` to provide functionality specific to SwiftSyntaxBuilder. """ def __init__(self, child_node): self.child = child_node def name(self): """ The lowercase name of the child. Can be used for variable names. """ return self.child.swift_name def type(self): """ The type of this child, represented by a `SyntaxBuildableType`, which can be used to create the corresponding `Buildable` and `ExpressibleAs` types. """ return SyntaxBuildableType(self.child.syntax_kind, self.child.is_optional) def generate_expr_build_syntax_node(self, var_name, format_name): """ Generate a Swift expression that creates a proper SwiftSyntax node of type `self.type().syntax()` from a variable named `var_name` of type `self.type().buildable()` that represents this child node. """ if self.type().is_token(): if self.child.requires_leading_newline: return var_name + '.withLeadingTrivia(.newlines(1) + ' + format_name + '._makeIndent() + (' + var_name + '.leadingTrivia ?? []))' else: return var_name else: format = format_name if self.child.is_indented: format += '._indented()' expr = var_name if self.type().is_optional: expr = expr + '?' return expr + '.build' + self.type().base_name() + '(format: ' + format + ', leadingTrivia: nil)' def generate_assert_stmt_text_choices(self, var_name): """ If this node is a token that can't contain arbitrary text, generate a Swift `assert` statement that verifies the variable with name var_name and of type `TokenSyntax` contains one of the supported text options. Otherwise return `None`. """ if not self.type().is_token(): return None if self.child.text_choices: text_choices = self.child.text_choices elif self.child.token_choices: text_choices = [SYNTAX_TOKEN_MAP.get(token_choice.name + 'Token').text for token_choice in self.child.token_choices] else: return None if None in text_choices: # If None is in the text choices, one of the token options can contain arbitrary text. # Don't generate an assert statement. return None assert_choices = [] if self.type().is_optional: assert_choices.append('%s == nil' % var_name) unwrap = '!' if self.type().is_optional else '' for text_choice in text_choices: assert_choices.append('%s%s.text == "%s"' % (var_name, unwrap, text_choice)) return 'assert(%s)' % ' || '.join(assert_choices) def documentation(self): """ If the child node has documentation associated with it, return it as a single-line string. Otherwise return an empty string. """ return flat_documentation(self.child.description) class SyntaxBuildableNode: """ Wrapper around the `Node` type defined in `gyb_syntax_support` to provide functionality specific to SwiftSyntaxBuilder. """ def __init__(self, node): """ Create a SyntaxBuildableNode, either from - a `Node` type defined in `gyb_syntax_support` or - a `SwiftBuildableType`, assuming that that type doesn't represent one of the base kinds """ if isinstance(node, SyntaxBuildableType): assert node.base_name() not in SYNTAX_BASE_KINDS, "Syntax base kinds are not represented by Nodes" self.node = create_node_map()[node.base_name()] else: self.node = node def children(self): """ Returns the children of this node as `SyntaxBuildableChild`ren. """ return [SyntaxBuildableChild(child) for child in self.node.children] def documentation(self): """ If documentation exists for this node, return it as a single-line string. Otherwise return an empty string. """ if not self.node.description and self.node.is_syntax_collection(): # Automatically generate documentation for syntax collections. return '`%s` represents a collection of `%s`s.' % (self.node.syntax_kind, self.collection_element_type().buildable()) return flat_documentation(self.node.description) def type(self): return SyntaxBuildableType(self.node.syntax_kind) def base_type(self): """ Returns the base type of this node, e.g. the `Expr` type for `IdentiferExpr`. """ return SyntaxBuildableType(self.node.base_kind) def collection_element_type(self): """ Assuming that this node is a syntax collection, return the type of its elements. """ assert self.node.is_syntax_collection() return SyntaxBuildableType(self.node.collection_element) def elements_separated_by_newline(self): """ Assuming that this node is a syntax colleciton, return whether the elements should be separated by newlines when the syntax tree is printed. """ assert self.node.is_syntax_collection() return self.node.elements_separated_by_newline def single_non_defaulted_child(self): """ Assuming that this node has a single child without a default value, return that child. """ non_defaulted_params = [child for child in self.children() if not child.type().default_initialization()] assert len(non_defaulted_params) == 1 return non_defaulted_params[0] class SyntaxBuildableType: """ Wrapper around the syntax_kind strings in `gyb_syntax_support` to provide functionality specific to SwiftSyntaxBuilder. In particular, this includes the functionality to create the `*Buildable`, `ExpressibleAs*` and `*Syntax` Swift types from the syntax kind. """ def __init__(self, syntax_kind, is_optional = False): self.is_optional = is_optional if syntax_kind.endswith('Token'): # There are different token kinds but all of them are represented by `Token` in the Swift source (see `kind_to_type` in `gyb_syntax_support`). self.syntax_kind = 'Token' self.token_kind = syntax_kind else: self.syntax_kind = syntax_kind self.token_kind = None def __eq__(self, other): return self.syntax_kind == other.syntax_kind and \ self.is_optional == other.is_optional and \ self.token_kind == other.token_kind def _optional_question_mark(self): if self.is_optional: return '?' else: return '' def non_optional(self): """ Returns this type with `is_optional` set to `False`. """ if self.is_token(): return SyntaxBuildableType(self.token_kind) else: return SyntaxBuildableType(self.syntax_kind) def is_token(self): return self.syntax_kind == 'Token' def token(self): return SYNTAX_TOKEN_MAP.get(self.token_kind) def default_initialization(self): """ If the type has a default value (because it is optional or because it is a token with a fixed text), return an expression of the form ` = default_value` that can be used as the default value to for a function parameter. Otherwise, return an empty string. """ if self.is_optional: return ' = nil' elif self.is_token(): token = self.token() if token and token.text: return ' = TokenSyntax.`%s`' % lowercase_first_word(token.name) else: return '' else: return '' def is_syntax_collection(self): if self.syntax_kind == 'SyntaxCollection': return True if self.base_type(): return self.base_type().is_syntax_collection() else: return False def base_name(self): """ Returns the raw base name of this kind. Used for the `build*` methods defined in the buildable types. """ return self.syntax_kind def buildable(self): """ Return the name of the `Buildable` type that is the main entry point for building SwiftSyntax trees using `SwiftSyntaxBuilder`. These names look as follows: - For nodes: The node name, e.g. `IdentifierExpr` (these are implemented as structs) - For base kinds: `<BaseKind>Buildable`, e.g. `ExprBuildable` (these are implemented as protocols) - For token: `TokenSyntax` (tokens don't have a dedicated type in SwiftSyntaxBuilder) If the type is optional, this terminates with a '?'. """ if self.is_token(): # Tokens don't have a dedicated buildable type. return 'TokenSyntax' + self._optional_question_mark() elif self.syntax_kind in SYNTAX_BASE_KINDS: return self.syntax_kind + 'Buildable' + self._optional_question_mark() else: return self.syntax_kind + self._optional_question_mark() def buildable_base_name(self): """ Returns the type from `buildable()` without any question marks attached. This is used for the `create*` methods defined in the `ExpressibleAs*` protocols. """ return self.non_optional().buildable() def expressible_as(self): """ Return the `ExpressibleAs*` Swift type for this syntax kind. Tokens don't have an `ExpressibleAs*` type, so for those this method just returns `TokenSyntax`. If the type is optional, this terminates with a '?'. """ if self.is_token(): # Tokens don't have a dedicated ExpressibleAs type. return self.buildable() else: return 'ExpressibleAs' + self.buildable() def syntax(self): """ Returns the corresponding `*Syntax` type defined in the `SwiftSyntax` module, which will eventually get built from `SwiftSyntaxBuilder`. If the type is optional, this terminates with a '?'. """ if self.syntax_kind == 'Syntax': return 'Syntax' + self._optional_question_mark() else: return self.syntax_kind + 'Syntax' + self._optional_question_mark() def list_buildable(self): """ Assuming that this is a base kind, return the corresponding `*ListBuildable` type. """ assert self.syntax_kind in SYNTAX_BASE_KINDS, "ListBuildable types only exist for syntax base kinds" return self.syntax_kind + 'ListBuildable' + self._optional_question_mark() def result_builder(self): """ Assuming that this is a collection type, the type of the result builder that can be used to build the collection. """ return self.syntax_kind + 'Builder' + self._optional_question_mark() def element_in_collections(self): """ Return the collection types in which this type occurs as an element. We automatically make the `ExpressibleAs*` protocols conform to the `ExpressibleAs*` protocol of the collection they occur in. """ result = [] for node in [SyntaxBuildableNode(node) for node in SYNTAX_NODES if node.is_syntax_collection()]: if node.collection_element_type() == self: result.append(node.type()) return result def convertible_to_types(self): """ Types that take a single non-optional parameter of this types and to which this type is thus convertible. We automatically make the `ExpressibleAs*` of this type conform to the `ExpressibleAs*` protocol of the convertible types. """ expressible_as = SYNTAX_BUILDABLE_EXPRESSIBLE_AS_CONFORMANCES.get(self.buildable(), []) return [SyntaxBuildableType(type) for type in expressible_as] def base_type(self): """ If this type is not a base kind, return its base type (see `SyntaxBuildableNode.base_type()`), otherwise return `None`. """ if self.syntax_kind not in SYNTAX_BASE_KINDS and not self.is_token(): return SyntaxBuildableNode(self).base_type() else: return None def generated_expressible_as_conformances(self): """ The types to which this `ExpressibleAs*` type conforms to via automatically generated conformances. """ conformances = self.element_in_collections() + self.convertible_to_types() if self.base_type() and self.base_type().base_name() != 'SyntaxCollection': conformances.append(self.base_type()) return conformances def transitive_expressible_as_conformances(self): """ The types to which this `ExpressibleAs*` type conforms to via automatically generated conformances, including transitive conformances. """ result = [] for conformance in self.generated_expressible_as_conformances(): result += [conformance] + conformance.transitive_expressible_as_conformances() return result def implied_expressible_as_conformances(self): """ The types to which this `ExpressibleAs*` type implicitly conforms to via transitive conformances. These conformances don't need to be spelled out explicitly in the source code. """ result = [] for conformance in self.generated_expressible_as_conformances(): result += conformance.transitive_expressible_as_conformances() return result def generate_expr_convert_param_type_to_storage_type(self, var_name): """ Generate an expression that converts a variable named `var_name` which is of `expressible_as()` type to an object of type `buildable()`. """ if self.syntax_kind == 'Token': return var_name expr = var_name if self.is_optional: expr += '?' expr += '.' expr += 'create%s()' % self.buildable_base_name() return expr
/** * Background dialog * @author Jacek Furmankiewicz */ public class BackgroundDialog { private Shell shell; @SuppressWarnings("unused") private Label progressLabel; @SuppressWarnings("unused") private ProgressBar progressBar; private BackgroundEvent event = null; /** * Constructor * @throws BuildException * @throws IOException */ public BackgroundDialog(BackgroundEvent event) throws IOException, BuildException { BuilderUtils.validateNotNullAndNotEmpty("event", event); this.event = event; this.event.addPropertyChangeListener(new PropertyChangeListener() { public void propertyChange(PropertyChangeEvent evt) { if (evt.getPropertyName().equals("progressMessage")) { progressLabel.setText((String) evt.getNewValue()); } } }); SwtJavaBuilder.build(this, Builder.getResourceBundle()); } /** * @return The current background event */ public BackgroundEvent getEvent() { return event; } /** * Requests task to be cancelled. */ @SuppressWarnings("unused") private void requestCancel() { boolean answer = MessageDialog.openQuestion(null, Builder.getResourceBundle().getString("message.cancelConfirm"), Builder.getResourceBundle().getString("title.cancelTask")); if (answer) { getEvent().setCancelStatus(CancelStatus.REQUESTED); } } /** * @return Created dialog */ public Shell getShell() { return shell; } }
/** * Adds updated values from the sound settings menu into preferences while * updating them in-game. * * @param musicVolume * 0 to 100 maximum * @param effectsVolume * 0 to 100 maximum */ public void updateVolume(float musicVolume, float effectsVolume) { Preferences pref = Gdx.app.getPreferences(GameCore.TITLE); pref.putFloat(PreferenceStrings.MUSIC_VOLUME, musicVolume); pref.putFloat(PreferenceStrings.EFFECTS_VOLUME, effectsVolume); mAmbience.setVolume(musicVolume / 100.0f); this.mEffectsVolume = effectsVolume / 100.0f; }
def click_record_link(self, get_url: str, component: Dict[str, Any], context: Dict[str, Any], label: str = None, headers: Dict[str, Any] = None, locust_label: str = "") -> Dict[str, Any]: if '_recordRef' not in component: component = component.get('link', "") record_ref = component.get('_recordRef', "") dashboard = component.get('dashboard', "") if not dashboard: dashboard = "summary" record_view_url_stub = f"/view/{dashboard}" if not record_ref: e = Exception("Cannot find _recordRef attribute in RecordLink component.") log_locust_error(e, raise_error=True) record_link_url_suffix = record_ref + record_view_url_stub if "tempo" in get_url: record_link_url = "/suite/tempo/records/item/" + record_link_url_suffix elif "sites" in get_url and "/record/" in get_url: parse_pattern = "/record/" record_link_url = get_url[:get_url.index(parse_pattern) + len(parse_pattern)] + record_link_url_suffix elif match(r'.*\/page\/\w+$', get_url): record_link_url = get_url + "/record/" + record_link_url_suffix elif "sites" in get_url and "/report" in get_url and "/pages/" in get_url: page_search = search(r'(?<=\/pages\/)\w+', get_url) if page_search: page_name = page_search.group() else: e = Exception("Unexpected record link URL - couldn't find page name after /pages/") log_locust_error(e, raise_error=True) parse_pattern = page_name + "/report" url_prefix_index = get_url.index(parse_pattern) + len(page_name) record_link_url = get_url[:url_prefix_index].replace("/pages/", "/page/") + "/record/" + record_link_url_suffix elif "record" in get_url: site_name = component.get('siteUrlStub', "") page_name = component.get('pageUrlStub', "") record_link_url = f"/suite/rest/a/sites/latest/{site_name}/page/{page_name}/record/{record_link_url_suffix}" else: e = Exception("Unexpected record link URL") log_locust_error(e, raise_error=True) if not get_url or not record_link_url: e = Exception("Cannot make Record Link request.") log_locust_error(e, raise_error=True) headers = self.setup_feed_headers() locust_label = locust_label or "Clicking RecordLink: " + component["label"] resp = self.get_page( self.host + record_link_url, headers=headers, label=locust_label ) return resp.json()
<reponame>openharmony-gitee-mirror/account_os_account /* * Copyright (c) 2021 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "account_stub.h" #include <dlfcn.h> #include <ipc_types.h> #include "account_error_no.h" #include "account_helper_data.h" #include "account_info.h" #include "account_log_wrapper.h" #include "account_mgr_service.h" #include "bundlemgr/bundle_mgr_interface.h" #include "if_system_ability_manager.h" #include "ipc_skeleton.h" #include "iservice_registry.h" #include "ohos_account_kits.h" #include "permission/permission_kit.h" #include "string_ex.h" #include "system_ability_definition.h" namespace OHOS { namespace AccountSA { namespace { const std::string OHOS_ACCOUNT_QUIT_TIPS_TITLE = ""; const std::string OHOS_ACCOUNT_QUIT_TIPS_CONTENT = ""; const std::string PERMISSION_MANAGE_USERS = "ohos.permission.MANAGE_LOCAL_ACCOUNTS"; const std::string PERMISSION_INTERACT_ACROSS_USERS = "ohos.permission.INTERACT_ACROSS_LOCAL_ACCOUNTS"; const std::string PERMISSION_INTERACT_ACROSS_USERS_FULL = "ohos.permission.INTERACT_ACROSS_LOCAL_ACCOUNTS_EXTENSION"; const std::string PERMISSION_DISTRIBUTED_DATASYNC = "ohos.permission.DISTRIBUTED_DATASYNC"; const std::string DEFAULT_ACCOUNT_NAME = "no_<PASSWORD>"; constexpr std::int32_t SYSTEM_UID = 1000; constexpr std::int32_t ROOT_UID = 0; std::int32_t GetBundleNamesForUid(std::int32_t uid, std::string &bundleName) { sptr<ISystemAbilityManager> systemMgr = SystemAbilityManagerClient::GetInstance().GetSystemAbilityManager(); if (systemMgr == nullptr) { ACCOUNT_LOGE("Fail to get system ability mgr"); return ERR_ACCOUNT_ZIDL_ACCOUNT_STUB_ERROR; } sptr<IRemoteObject> remoteObject = systemMgr->GetSystemAbility(BUNDLE_MGR_SERVICE_SYS_ABILITY_ID); if (remoteObject == nullptr) { ACCOUNT_LOGE("Fail to get bundle manager proxy"); return ERR_ACCOUNT_ZIDL_ACCOUNT_STUB_ERROR; } sptr<OHOS::AppExecFwk::IBundleMgr> bundleMgrProxy = iface_cast<OHOS::AppExecFwk::IBundleMgr>(remoteObject); if (bundleMgrProxy == nullptr) { ACCOUNT_LOGE("Bundle mgr proxy is nullptr"); return ERR_ACCOUNT_ZIDL_ACCOUNT_STUB_ERROR; } if (!bundleMgrProxy->GetBundleNameForUid(uid, bundleName)) { ACCOUNT_LOGE("Get bundle name failed"); return ERR_ACCOUNT_ZIDL_ACCOUNT_STUB_ERROR; } return ERR_OK; } } // namespace const std::map<std::uint32_t, AccountStubFunc> AccountStub::stubFuncMap_{ std::make_pair(UPDATE_OHOS_ACCOUNT_INFO, &AccountStub::CmdUpdateOhosAccountInfo), std::make_pair(QUERY_OHOS_ACCOUNT_INFO, &AccountStub::CmdQueryOhosAccountInfo), std::make_pair(QUERY_OHOS_ACCOUNT_QUIT_TIPS, &AccountStub::CmdQueryOhosQuitTips), std::make_pair(QUERY_DEVICE_ACCOUNT_ID, &AccountStub::CmdQueryDeviceAccountId), std::make_pair(QUERY_DEVICE_ACCOUNT_ID_FROM_UID, &AccountStub::CmdQueryDeviceAccountIdFromUid), std::make_pair(GET_APP_ACCOUNT_SERVICE, &AccountStub::CmdGetAppAccountService), }; std::int32_t AccountStub::CmdUpdateOhosAccountInfo(MessageParcel &data, MessageParcel &reply) { if (!IsRootOrSystemAccount() && !HasAccountRequestPermission(PERMISSION_MANAGE_USERS)) { ACCOUNT_LOGE("Check permission failed"); return ERR_ACCOUNT_ZIDL_CHECK_PERMISSION_ERROR; } // ignore the real account name Str16ToStr8(data.ReadString16()); const std::string accountName = DEFAULT_ACCOUNT_NAME; const std::string uid = Str16ToStr8(data.ReadString16()); if (uid.empty()) { ACCOUNT_LOGE("invalid user id"); return ERR_ACCOUNT_ZIDL_ACCOUNT_STUB_ERROR; } const std::string eventStr = Str16ToStr8(data.ReadString16()); ACCOUNT_LOGI("CmdUpdateOhosAccountInfo eventStr: %s", eventStr.c_str()); std::int32_t ret = ERR_OK; bool result = UpdateOhosAccountInfo(accountName, uid, eventStr); if (!result) { ACCOUNT_LOGE("Update ohos account info failed"); ret = ERR_ACCOUNT_ZIDL_ACCOUNT_STUB_ERROR; } if (!reply.WriteInt32(ret)) { ACCOUNT_LOGE("Write result data failed"); ret = ERR_ACCOUNT_ZIDL_WRITE_RESULT_ERROR; } return ret; } std::int32_t AccountStub::CmdQueryOhosAccountInfo(MessageParcel &data, MessageParcel &reply) { if (!IsRootOrSystemAccount() && !HasAccountRequestPermission(PERMISSION_MANAGE_USERS)) { ACCOUNT_LOGE("Check permission failed"); return ERR_ACCOUNT_ZIDL_CHECK_PERMISSION_ERROR; } std::pair<bool, OhosAccountInfo> info = QueryOhosAccountInfo(); if (!info.first) { ACCOUNT_LOGE("Query ohos account info failed"); return ERR_ACCOUNT_ZIDL_ACCOUNT_STUB_ERROR; } std::string name = info.second.name_; std::string id = info.second.uid_; if (!reply.WriteString16(Str8ToStr16(name))) { ACCOUNT_LOGE("Write name data failed"); return ERR_ACCOUNT_ZIDL_WRITE_NAME_ERROR; } if (!reply.WriteString16(Str8ToStr16(id))) { ACCOUNT_LOGE("Write id data failed"); return ERR_ACCOUNT_ZIDL_WRITE_UID_ERROR; } if (!reply.WriteInt32(info.second.status_)) { ACCOUNT_LOGE("Write status data failed"); return ERR_ACCOUNT_ZIDL_WRITE_ACCOUNT_STATUS_ERROR; } return ERR_OK; } std::int32_t AccountStub::CmdQueryOhosQuitTips(MessageParcel &data, MessageParcel &reply) { if (!IsRootOrSystemAccount() && !HasAccountRequestPermission(PERMISSION_MANAGE_USERS)) { ACCOUNT_LOGE("Check permission failed"); return ERR_ACCOUNT_ZIDL_CHECK_PERMISSION_ERROR; } if (!reply.WriteString16(Str8ToStr16(OHOS_ACCOUNT_QUIT_TIPS_TITLE))) { ACCOUNT_LOGE("Write quit tips title failed"); return ERR_ACCOUNT_ZIDL_WRITE_RESULT_ERROR; } if (!reply.WriteString16(Str8ToStr16(OHOS_ACCOUNT_QUIT_TIPS_CONTENT))) { ACCOUNT_LOGE("Write quit tips content failed"); return ERR_ACCOUNT_ZIDL_WRITE_RESULT_ERROR; } ACCOUNT_LOGI("CmdQueryOhosQuitTips exit"); return ERR_OK; } std::int32_t AccountStub::CmdQueryDeviceAccountId(MessageParcel &data, MessageParcel &reply) { if (!IsRootOrSystemAccount()) { ACCOUNT_LOGE("Check permission failed"); return ERR_ACCOUNT_ZIDL_CHECK_PERMISSION_ERROR; } std::int32_t id; auto ret = QueryDeviceAccountId(id); if (ret != ERR_OK) { ACCOUNT_LOGE("QueryDevice AccountId failed: %d", ret); return ret; } if (!reply.WriteInt32(id)) { ACCOUNT_LOGE("Write result data failed"); return ERR_ACCOUNT_ZIDL_WRITE_RESULT_ERROR; } return ERR_OK; } std::int32_t AccountStub::CmdQueryDeviceAccountIdFromUid(MessageParcel &data, MessageParcel &reply) { std::int32_t uid = data.ReadInt32(); auto ret = QueryDeviceAccountIdFromUid(uid); if (ret < 0) { ACCOUNT_LOGE("QueryDevice accountid from uid failed: %d", ret); return ret; } if (!reply.WriteInt32(ret)) { ACCOUNT_LOGE("Write result data failed"); return ERR_ACCOUNT_ZIDL_WRITE_RESULT_ERROR; } return ERR_OK; } std::int32_t AccountStub::CmdGetAppAccountService(MessageParcel &data, MessageParcel &reply) { ACCOUNT_LOGI("enter"); auto remoteObject = GetAppAccountService(); if (!reply.WriteParcelable(remoteObject)) { ACCOUNT_LOGE("Write result data failed"); return ERR_ACCOUNT_ZIDL_WRITE_RESULT_ERROR; } return ERR_OK; } std::int32_t AccountStub::OnRemoteRequest( std::uint32_t code, MessageParcel &data, MessageParcel &reply, MessageOption &option) { ACCOUNT_LOGI("Received stub message: %{public}d", code); if (!IsServiceStarted()) { ACCOUNT_LOGE("account mgr not ready"); return ERR_ACCOUNT_ZIDL_MGR_NOT_READY_ERROR; } if (!CheckCallerForTrustList()) { const std::u16string descriptor = AccountStub::GetDescriptor(); const std::u16string remoteDescriptor = data.ReadInterfaceToken(); if (descriptor != remoteDescriptor) { ACCOUNT_LOGE("Check remote descriptor failed"); return ERR_ACCOUNT_ZIDL_ACCOUNT_STUB_ERROR; } } const auto &itFunc = stubFuncMap_.find(code); if (itFunc != stubFuncMap_.end()) { return (this->*(itFunc->second))(data, reply); } ACCOUNT_LOGW("remote request unhandled: %{public}d", code); return IPCObjectStub::OnRemoteRequest(code, data, reply, option); } bool AccountStub::HasAccountRequestPermission(const std::string &permissionName) { if (permissionName.empty()) { return false; } if (!IsServiceStarted()) { ACCOUNT_LOGE("account mgr not ready"); return false; } const std::int32_t uid = IPCSkeleton::GetCallingUid(); if (uid == ROOT_UID || uid == SYSTEM_UID) { return true; } std::string bundleName; if (GetBundleNamesForUid(uid, bundleName) != ERR_OK) { return false; } ACCOUNT_LOGI("Check permission: %{public}s", permissionName.c_str()); const std::int32_t userId = QueryDeviceAccountIdFromUid(uid); return (Security::Permission::PermissionKit::VerifyPermission(bundleName, permissionName, userId) == Security::Permission::PermissionState::PERMISSION_GRANTED); } bool AccountStub::IsRootOrSystemAccount() { const auto id = IPCSkeleton::GetCallingUid(); return (id == ROOT_UID || id == SYSTEM_UID); } bool AccountStub::CheckCallerForTrustList() { if (!IsServiceStarted()) { ACCOUNT_LOGE("account mgr not ready"); return false; } const std::int32_t uid = IPCSkeleton::GetCallingUid(); if (uid == ROOT_UID || uid == SYSTEM_UID) { return false; } std::string bundleName; if (GetBundleNamesForUid(uid, bundleName) != ERR_OK) { return false; } std::vector<std::string> trustList = AccountHelperData::GetBundleNameTrustList(); if (std::find(trustList.begin(), trustList.end(), bundleName) == trustList.end()) { return false; } return true; } } // namespace AccountSA } // namespace OHOS
<gh_stars>0 // Copyright 2021 Matrix Origin // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package engine import ( "bytes" "sync" catalog3 "github.com/matrixorigin/matrixone/pkg/catalog" "github.com/matrixorigin/matrixone/pkg/container/batch" "github.com/matrixorigin/matrixone/pkg/vm/engine" "github.com/matrixorigin/matrixone/pkg/vm/engine/aoe" "github.com/matrixorigin/matrixone/pkg/vm/engine/aoe/storage/aoedb/v1" ) // aoe engine type aoeEngine struct { catalog *catalog3.Catalog config *EngineConfig } type EngineConfig struct { ReaderBufferCount uint64 `toml:"reader_buffer_count"` // The number of buffers allocated by each reader QueueMaxReaderCount uint64 `toml:"queue_max_reader_count"` // The number of readers allocated per queue } type SegmentInfo struct { Version uint64 Id string GroupId string TabletId string Node engine.Node } type filterExtent struct { filterType int32 attr string param1 interface{} param2 interface{} } type filterContext struct { extent []filterExtent blocks []aoe.Block } type aoeReader struct { reader *store id int32 prv *batData dequeue int64 enqueue int64 workerid int32 filter []filterContext } type store struct { rel *relation readers []engine.Reader rhs []chan *batData chs []chan *batData blocks []aoe.Block start bool mutex sync.RWMutex iodepth int } type batData struct { bat *batch.Batch cds []*bytes.Buffer dds []*bytes.Buffer use bool id int8 zs []int64 } type worker struct { id int32 bufferCount int batDatas []*batData blocks []aoe.Block storeReader *store enqueue int64 allocLatency int64 readLatency int64 } type AoeSparseFilter struct { storeReader *store reader *aoeReader } func (a AoeSparseFilter) Eq(s string, i interface{}) (engine.Reader, error) { panic("implement me") } func (a AoeSparseFilter) Ne(s string, i interface{}) (engine.Reader, error) { panic("implement me") } func (a AoeSparseFilter) Lt(s string, i interface{}) (engine.Reader, error) { panic("implement me") } func (a AoeSparseFilter) Le(s string, i interface{}) (engine.Reader, error) { panic("implement me") } func (a AoeSparseFilter) Gt(s string, i interface{}) (engine.Reader, error) { panic("implement me") } func (a AoeSparseFilter) Ge(s string, i interface{}) (engine.Reader, error) { panic("implement me") } func (a AoeSparseFilter) Btw(s string, i interface{}, i2 interface{}) (engine.Reader, error) { panic("implement me") } type database struct { id uint64 //id of the database typ int //type of the database catalog *catalog3.Catalog //the catalog of the aoeEngine cfg *EngineConfig } type relation struct { mu sync.Mutex pid uint64 //database id tbl *aoe.TableInfo //table of the tablets catalog *catalog3.Catalog //the catalog nodes engine.Nodes segments []SegmentInfo //segments of the table tablets []aoe.TabletInfo //tablets of the table mp map[string]*aoedb.Relation //a map of each tablet and its relation reader *store cfg *EngineConfig }
export async function promiseOrCallback<T = void>(action: () => Promise<T>, cb: (err?: Error, result?: T) => void) { let caught: Error; let res: T; try { res = await action(); if (!cb) { return res; } } catch (err) { caught = err; if (!cb) { throw err; } } finally { if (cb) { process.nextTick(() => cb(caught, res)); } } }
To help, local comedians are coming together to throw a benefit party. The event will feature a star-studded lineup, including Louie Anderson, Mystery Science Theater 3000 star Joel Hodgson, Mary Jo Pehl, Alex Jackson, Tom Baumgartner, and Kristin Andersen. "Joe Minjares has been a good friend and creative presence in the Minneapolis standup and theater scene for almost 40 years," says Hodgson. "Joe also gave me free Mexican food when I really needed it. So yeah, performing at his benefit is the least I can do." "I used to eat at Pepito's before I was even thinking of becoming a comedian," says Anderson. "Then I became a comedian and moved away to Los Angeles. I always missed Pepito's and thought about that cheese dip often: There are many imitators but they have never even come close." The benefit show will take place Saturday, November 25, at 8 p.m. at the Parkway. Tickets are $20 in advance from vitalculture.com and $25 at the door. The event is 21+.
Hibiki Kono had a dream — he wanted to be like his hero, Spiderman. Most little kids would have their parents buy them a costume that they could wear to school. Not Kono — the 13 year-old set to work making his dream a reality. He’s used two 1,400-watt recycled vacuum cleaners and a little bit of elbow grease to make a machine that allows him to scale walls — just like his spindly hero! This DIY genius was shunned at first. When he announced his plans his teachers and parents were doubtful that he’d succeed. Kono’s design technology teacher, Angus Gent, told reporters, “I’m hugely proud of him. When he came to me with the idea at the beginning I had my doubts.” But to the surprise and delight of his community, the 13 year-old’s design was successful and Kono says he completely trusts the machine to hold him up on any wall. However, his mom’s not so sure, she won’t let him climb the walls in his bedroom for fear that he, “may pull down the ceiling.” Kono thinks that the machine could be helpful for window washers, and in some of his demonstrations shows how one could easily use one hand to support themselves on his DIY machine and the other to clean the surface they’re climbing. Kono showed his nifty technology off to a school assembly, but unlike Spiderman, Spiderboy has limitations. His mom won’t let him climb higher than the vacuum cleaner power chords will let him. You see, he’s got to keep his machine plugged in to work. Via DVICE
<gh_stars>1-10 /* * Copyright 2019 The gRPC Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.grpc.internal; import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.fail; import io.grpc.internal.ServiceConfigUtil.LbConfig; import java.util.List; import java.util.Map; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** * Unit test for {@link ServiceConfigUtil}. */ @RunWith(JUnit4.class) public class ServiceConfigUtilTest { @Test public void getChildPolicyFromXdsConfig() throws Exception { String rawLbConfig = "{" + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]" + "}"; LbConfig expectedChildPolicy1 = ServiceConfigUtil.unwrapLoadBalancingConfig( checkObject(JsonParser.parse("{\"round_robin\" : {}}"))); LbConfig expectedChildPolicy2 = ServiceConfigUtil.unwrapLoadBalancingConfig( checkObject(JsonParser.parse("{\"lbPolicy2\" : {\"key\" : \"val\"}}"))); List<LbConfig> childPolicies = ServiceConfigUtil.getChildPolicyFromXdsConfig( checkObject(JsonParser.parse(rawLbConfig))); assertThat(childPolicies).containsExactly(expectedChildPolicy1, expectedChildPolicy2); } @Test public void getChildPolicyFromXdsConfig_null() throws Exception { String rawLbConfig = "{" + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]" + "}"; List<LbConfig> childPolicies = ServiceConfigUtil.getChildPolicyFromXdsConfig( checkObject(JsonParser.parse(rawLbConfig))); assertThat(childPolicies).isNull(); } @Test public void getFallbackPolicyFromXdsConfig() throws Exception { String rawLbConfig = "{" + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]" + "}"; LbConfig expectedFallbackPolicy1 = ServiceConfigUtil.unwrapLoadBalancingConfig( checkObject(JsonParser.parse("{\"lbPolicy3\" : {\"key\" : \"val\"}}"))); LbConfig expectedFallbackPolicy2 = ServiceConfigUtil.unwrapLoadBalancingConfig( checkObject(JsonParser.parse("{\"lbPolicy4\" : {}}"))); List<LbConfig> childPolicies = ServiceConfigUtil.getFallbackPolicyFromXdsConfig( checkObject(JsonParser.parse(rawLbConfig))); assertThat(childPolicies).containsExactly(expectedFallbackPolicy1, expectedFallbackPolicy2); } @Test public void getFallbackPolicyFromXdsConfig_null() throws Exception { String rawLbConfig = "{" + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]" + "}"; List<LbConfig> fallbackPolicies = ServiceConfigUtil.getFallbackPolicyFromXdsConfig( checkObject(JsonParser.parse(rawLbConfig))); assertThat(fallbackPolicies).isNull(); } @Test public void getEdsServiceNameFromXdsConfig() throws Exception { String rawLbConfig = "{" + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]," + "\"edsServiceName\" : \"dns:///eds.service.com:8080\"" + "}"; String edsServiceName = ServiceConfigUtil.getEdsServiceNameFromXdsConfig( checkObject(JsonParser.parse(rawLbConfig))); assertThat(edsServiceName).isEqualTo("dns:///eds.service.com:8080"); } @Test public void getEdsServiceNameFromXdsConfig_null() throws Exception { String rawLbConfig = "{" + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]" + "}"; String edsServiceName = ServiceConfigUtil.getEdsServiceNameFromXdsConfig( checkObject(JsonParser.parse(rawLbConfig))); assertThat(edsServiceName).isNull(); } @Test public void getLrsServerNameFromXdsConfig() throws Exception { String rawLbConfig = "{" + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]," + "\"lrsLoadReportingServerName\" : \"dns:///lrs.service.com:8080\"" + "}"; String lrsServerName = ServiceConfigUtil.getLrsServerNameFromXdsConfig( checkObject(JsonParser.parse(rawLbConfig))); assertThat(lrsServerName).isEqualTo("dns:///lrs.service.com:8080"); } @Test public void getLrsServerNameFromXdsConfig_null() throws Exception { String rawLbConfig = "{" + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]," + "\"fallbackPolicy\" : [{\"lbPolicy3\" : {\"key\" : \"val\"}}, {\"lbPolicy4\" : {}}]" + "}"; String lrsServerName = ServiceConfigUtil.getLrsServerNameFromXdsConfig( checkObject(JsonParser.parse(rawLbConfig))); assertThat(lrsServerName).isNull(); } @Test public void unwrapLoadBalancingConfig() throws Exception { String lbConfig = "{\"xds_experimental\" : { " + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]" + "}}"; LbConfig config = ServiceConfigUtil.unwrapLoadBalancingConfig(checkObject(JsonParser.parse(lbConfig))); assertThat(config.getPolicyName()).isEqualTo("xds_experimental"); assertThat(config.getRawConfigValue()).isEqualTo(JsonParser.parse( "{\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]" + "}")); } @Test public void unwrapLoadBalancingConfig_failOnTooManyFields() throws Exception { // A LoadBalancingConfig should not have more than one field. String lbConfig = "{\"xds_experimental\" : { " + "\"childPolicy\" : [{\"round_robin\" : {}}, {\"lbPolicy2\" : {\"key\" : \"val\"}}]" + "}," + "\"grpclb\" : {} }"; try { ServiceConfigUtil.unwrapLoadBalancingConfig(checkObject(JsonParser.parse(lbConfig))); fail("Should throw"); } catch (Exception e) { assertThat(e).hasMessageThat().contains("There are 2 fields"); } } @Test public void unwrapLoadBalancingConfig_failOnEmptyObject() throws Exception { // A LoadBalancingConfig should not exactly one field. String lbConfig = "{}"; try { ServiceConfigUtil.unwrapLoadBalancingConfig(checkObject(JsonParser.parse(lbConfig))); fail("Should throw"); } catch (Exception e) { assertThat(e).hasMessageThat().contains("There are 0 fields"); } } @Test public void unwrapLoadBalancingConfig_failWhenConfigIsString() throws Exception { // The value of the config should be a JSON dictionary (map) String lbConfig = "{ \"xds\" : \"I thought I was a config.\" }"; try { ServiceConfigUtil.unwrapLoadBalancingConfig(checkObject(JsonParser.parse(lbConfig))); fail("Should throw"); } catch (Exception e) { assertThat(e).hasMessageThat().contains("is not object"); } } @Test public void unwrapLoadBalancingConfigList() throws Exception { String lbConfig = "[ " + "{\"xds_experimental\" : {\"unknown_field\" : \"dns:///balancer.example.com:8080\"} }," + "{\"grpclb\" : {} } ]"; List<LbConfig> configs = ServiceConfigUtil.unwrapLoadBalancingConfigList( checkObjectList(JsonParser.parse(lbConfig))); assertThat(configs).containsExactly( ServiceConfigUtil.unwrapLoadBalancingConfig(checkObject(JsonParser.parse( "{\"xds_experimental\" : " + "{\"unknown_field\" : \"dns:///balancer.example.com:8080\"} }"))), ServiceConfigUtil.unwrapLoadBalancingConfig(checkObject(JsonParser.parse( "{\"grpclb\" : {} }")))).inOrder(); } @Test public void unwrapLoadBalancingConfigList_failOnMalformedConfig() throws Exception { String lbConfig = "[ " + "{\"xds_experimental\" : \"I thought I was a config\" }," + "{\"grpclb\" : {} } ]"; try { ServiceConfigUtil.unwrapLoadBalancingConfigList(checkObjectList(JsonParser.parse(lbConfig))); fail("Should throw"); } catch (Exception e) { assertThat(e).hasMessageThat().contains("is not object"); } } @SuppressWarnings("unchecked") private static List<Map<String, ?>> checkObjectList(Object o) { return (List<Map<String, ?>>) o; } @SuppressWarnings("unchecked") private static Map<String, ?> checkObject(Object o) { return (Map<String, ?>) o; } }
<reponame>GeliuZhou/OOP-Test-2021-Starter<gh_stars>0 package ie.tudublin; import java.util.ArrayList; import processing.core.PApplet; public class ScoreDisplay extends PApplet { String score = "DEFGABcd"; //String score = "D2E2F2G2A2B2c2d2"; //String score = "DEF2F2F2EFA2A2B2AFD2E2D2D2D2"; ArrayList<Note> notes = new ArrayList<Note>(); public void settings() { size(1000, 500); // How to convert a character to a number char c = '7'; // c holds the character 7 (55) int i = c - '0'; // i holds the number 7 (55 - 48) println(i); } public void loadScore() { //populating array list for(int i = 0; i < score.length(); i++) { char c = score.charAt(i); int cDuration = 1; Note n = new Note(c, cDuration); notes.add(n); } } public void printScore() { for(Note n : notes) { if(n.getDuration() == 1) { println(n.getNote() + "\t" + n.getDuration() + "\t" + "quaver"); } if (n.getDuration() == 2) { println(n.getNote() + "\t" + n.getDuration() + "\t" + "crotchet"); } } } float border = width * 0.9f ; public void draw() { stroke(0); strokeWeight(3); line(border, (float)(border * 2), width - border, (float)(border * 2)); line(border, (float)(border * 2) + 25, width - border, (float) (border * 2) + 25); line(border, (float)(border * 2) + 50, width - border, (float) (border * 2) + 50); line(border, (float)(border * 2) + 75, width - border, (float) (border * 2) + 75); line(border, (float)(border * 2) + 100, width - border, (float) (border * 2) + 100); fill(0); textSize(20); for(int i = 0; i < notes.size(); i++) { float x = map(i, 0, notes.size(), (float)(border * 2.5), width - border); Note note = notes.get(i); text(note.getNote(), x, (float)(border * 2.5)); } } public void setup() { loadScore(); printScore(); } void drawnotes() { fill(0); for(int i = 0; i < notes.size(); i++) { float x = map(i, 0, notes.size(), (float)(border * 2), width - border); float y = map(i, 0, notes.size() + 10, (float)(border * 2) + 20, height - border); if(mouseX == x && mouseX < x + (width / 2)) { // filling in the colour of the notes fill(255, 0, 0); } circle(x, height - y, 25); //line(x, y + 250, x , y); } } public void draw() { draw(); drawnotes(); background(255); } }
<reponame>trussworks/edd """ Custom database fields to use on EDD models. """ from django import forms from django.db import models class VarCharField(models.TextField): """ Take advantage of postgres VARCHAR = TEXT, to have unlimited CharField, using TextInput widget (<input type="text"> instead of <textarea>). """ def formfield(self, **kwargs): defaults = {"widget": forms.TextInput} defaults.update(kwargs) return super().formfield(**defaults) class FileField(models.FileField): """ Django default FileField sets a max_length of 100 if none is otherwise set. This is not what we want to do with a Postgres database, where `varchar` is more prefered than `varchar(100)`. """ def __init__(self, **kwargs): # in parent __init__() # kwargs.setdefault("max_length", 100) is called # which then sets self.max_length explicit_max = "max_length" in kwargs super().__init__(**kwargs) # unless explicitly added a max_length # remove it, so Postgres can use varchar over varchar(100) if not explicit_max: self.max_length = None
/** * The parentheses in a string are balanced if and only if these 2 conditions are met: * There are the same number of "(" and ")" in the string. * Scanning through the string from left to right and counting how many "(" and ")" there are so far, * there should never be a time where there are more ")" than "(". We call count("(") - count(")") the balance of the string.. * * So, here we use a set to store the indexes of the characters which we plan to remove (we cannot directly remove the character because if the length of the * string changed, the indexes changed too) * if we encounter a '(', we store it's index into the stack. * whenever we encounter a ')', we pop out a '(' from the stack. * if, the stack is Empty when we want to pop out a '(', it means this ')' is invalid, then we can put it into the plannedToRemove set. * after going through the whole string, if there's still some '(' in the stack, means removed these '(' would make the string valid. * So, we put all the '(' indexes in the stack into the plannedToRemove set. * * We use another for-loop to go through the string again, if the character is not in plannedToRemove set, we append it toe the result StringBuilder. */ class Solution { public String minRemoveToMakeValid(String s) { Set<Integer> indexToRemove = new HashSet(); Stack<Integer> stack = new Stack(); for(int i=0; i<s.length(); ++i) { char c = s.charAt(i); if(c == '(') { stack.push(i); } else if(c == ')') { if(!stack.isEmpty()) { stack.pop(); } else { indexToRemove.add(i); } } } while(!stack.isEmpty()) { indexToRemove.add(stack.pop()); } StringBuilder sb = new StringBuilder(); for(int i=0; i<s.length(); ++i) { if(!indexToRemove.contains(i)) sb.append(s.charAt(i)); } return sb.toString(); } }
class ChunkWriterStdOut: """ Write the generated source code chunks into stdout. """ name = 'stdout' def write(self, source_file, chunk_iterable, mark=None): hdr, inl, src = CodeChunk.join(chunk_iterable) print(hdr) print() print(inl) print() print(src) def outfiles(self, source_file): """ get the list of files output by this writer :param source_file: the source file :return: the list of files """ return None
The Decline of Human Capital in Ukrainian Education and Science: Experience of Reversal Preservation and enhancement of the quality of education should be a priority in the development of any society. But the Ukrainian higher education system has ceased to be competitive, and to date has continued to reduce the prestige. The Ukrainian higher education diplomas are not recognized in worldwide, in contrast to the diplomas of European and American style. The vast majority of graduates are not competitive on the European labor market. 84% of the Ukrainian students do not associate their professional future with Ukraine. Wages in Ukrainian education system over the years are as unprecedented is low compared with other countries. Corruption and bribery have penetrated into all areas of higher education of the country.
package com.lsm; import java.util.Arrays; public interface ListSortMerger { static int[] createResultArray(InsertPosition insertPosition, int[] left, int[] right) { if (insertPosition == InsertPosition.BACK) { // result array = left + right int[] result = Arrays.copyOf(left, left.length + right.length); System.arraycopy(right, 0, result, left.length, right.length); return result; } if (insertPosition == InsertPosition.FRONT) { // result array = right + left int[] result = Arrays.copyOf(right, left.length + right.length); System.arraycopy(left, 0, result, right.length, left.length); return result; } // result array = left/2 + right + left/2 assert insertPosition == InsertPosition.MIDDLE : insertPosition; int[] result = new int[left.length + right.length]; int splitPos = left.length / 2; System.arraycopy(left, 0, result, 0, splitPos); System.arraycopy(right, 0, result, splitPos, right.length); System.arraycopy(left, splitPos, result, splitPos + right.length, splitPos); return result; } /** * Takes two arrays, combines and sorts them * Assumption: "left" array is already sorted * * @param left the "left" array * @param right the "right" array * @return left appended with right, sorted */ int[] sortMerge(int[] left, int[] right); String getName(); }
/** * Create a custom domain for your fax services * * REST: POST /me/fax/customDomains * @param domain [required] The custom domain of your fax services */ public OvhMailDomain2Service fax_customDomains_POST(String domain) throws IOException { String qPath = "/me/fax/customDomains"; StringBuilder sb = path(qPath); HashMap<String, Object>o = new HashMap<String, Object>(); addBody(o, "domain", domain); String resp = exec(qPath, "POST", sb.toString(), o); return convertTo(resp, OvhMailDomain2Service.class); }
Efficacy of Melatonin on Postoperative Outcomes after Hysterectomy: A Randomized, Double-blind, Placebo-controlled Trial Background: Melatonin became a part of multimodal analgesia in several recent studies because of its analgesic, anxiolytic and anti-inflammatory properties. Incidence of anxiety and pain in patients who underwent hysterectomy are not low. Moreover, preoperative anxiety was related to postoperative pain. The hypothesis of this study was whether melatonin could improve pain and other postoperative conditions after hysterectomy. Methods: A randomized, double-blinded, placebo-controlled trial recruitment of 54 patients, aged between 18 and 65 years old, planned to undergo hysterectomy, with or without oophorectomy under spinal anesthesia. The patients were allocated to receive 4 mg prolonged-release melatonin at night before surgery and in the morning before surgery or 2 doses of placebo at the same time point. Morphine consumption within 24 hours with patient-controlled analgesia machine and visual analog scale (VAS) pain score were recorded. In addition, quality of sleep, Thai standard anxiety level score, fatigue, general well-being and satisfaction score were measured by a blinded assessor and analyzed. Results: Mean of cumulative dose of morphine consumption in melatonin and placebo group were 33.04 ±10.42 and 42.63 ±8.21 mg, respectively. (p < 0.001) Mean of postoperative VAS pain scale was lower in the melatonin group at recovery room arrival (T0) (23.41 vs 8.07, p = 0.01). However, there was not a significant difference between postoperative groups at 1(T1), 6 (T6) and 24 h (T24). Fatigue and general well-being score in the melatonin group were better than the placebo group. Conclusion: Prolonged-release formulation of melatonin decreased pain intensity in post anesthetic care room and lowered doses of postoperative morphine within 24 hours after surgery. Postoperative fatigue, general well-being and satisfaction scores were better in the treatment group. However, there was no anxiety and sleep quality improvement. Melatonin may be an additional choice of multimodal analgesia for hysterectomy. for analgesic effect. higher clinical practice, 2-mg once daily prolonged-release sleep morphine Background A major problem after many surgical procedures is inadequate postoperative analgesia even after the multimodal analgesia concept was introduced. 1 A national survey in the United States found that 80% of patients experienced acute pain after surgery. Most of these patients had moderate or severe pain. 2 Uncontrollable postoperative pain is associated with respiratory function. 3 In contrast, efficient pain management is related to positive physiologic effects in specific organs such as reduced thromboembolic or cardiac complications, including early recovery. 4 After the gate control theory was proposed, a wider understanding of pain mechanisms was achieved. The last update of pain definition recognized that pain is a multidimensional and subjective distressing experience in terms of sensory, emotional, social or cognitive components. 5,6 Uterine fibroids are the most common benign pelvic problem. Therefore, hysterectomy is a treatment option from a recent survey-based study in United States. The leading reasons for undergoing operation were healthcare provider recommendation, significant pain and distress. 7 Common postoperative problems after hysterectomy are not only acute postoperative pain but also high preoperative anxiety levels. 8 Diagnosis-related symptoms negatively result in physical and social activities including patients' quality of life. 9 Preoperative anxiety in hysterectomy is strongly related to postoperative pain score as well. 10,11 Benzodiazepine is a common medication as preoperative anxiolytic but may impair psychomotor performance. Melatonin (N-acetyl-5-methoxytryptamine) is a pineal hormone regulating sleep-wake cycle in mammals. More than circadian rhythm stabilizing, exogenous melatonin has been investigated for other effects such as modulation of blood pressure, body temperature and cortisol control, immune function and anti-oxidative defense. 12 Its strong chronobiotic properties and the ability to regulate circadian rhythm make melatonin a good choice for sleep disorders in the elderly. 13 The exact analgesic mechanisms of melatonin are not known but may be involved with many receptors in brain. Involvement of β-endorphins, GABA receptor, opioid receptors and nitric oxide-arginine pathway were proposed. 14 Several recent studies showed some benefits of perioperative melatonin in many aspects among different groups of patients such as quality of recovery after surgery, diminished depressive symptoms and pain score reduction. 15-17 However, melatonin's analgesic effect remains controversial in the perioperative period and requires more investigation. 8 The hypothesis of the study was whether preoperative oral melatonin can potentiate the analgesic effects of intravenous morphine and improve sleep quality and anxiety levels. The aim of the present study was to examine the analgesic effects of oral longacting melatonin premedication, compared to placebo, on morphine consumption in patients undergoing abdominal hysterectomy with or without oopherectomy. In addition, other therapeutic perspectives in clinical anesthesia such as anxiolytic effect, sleep quality, and also quality of life after hysterectomy were compared. Methods The present randomized, controlled and double-blinded study was conducted between April and December 2014 in King Chulalongkorn Memorial Hospital after approval of the Institutional Review Board of Faculty of Medicine, Chulalongkorn University (IRB No.428/56, COA No. 781/2013, Chairperson Associate Prof U. Jaisamrarn) on December 19, 2013. All written informed patient consents were obtained. Fifty-four patients, ASA physical status I-II and age 18-65 year scheduled for elective abdominal hysterectomy (with or without ovarian surgery) were enrolled into the randomised, double blind, placebo-controlled study. Patient exclusion criteria included history of heart disease, hepatic or renal failure, psychiatric disorders, sleep disorders, chronic pain syndromes, mental impairment, drug or alcohol abuse, patients receiving drugs with known analgesic and sedative properties, BMI over 30 kg/m 2 and patients who declined spinal anesthesia. The patients were randomly divided into 2 groups (27 patients each) by using a random number table (1:1) and concealed opaque envelopes. These patients received either 4 mg of prolonged-release formulation of oral melatonin (Circadin®) (M group) or placebo (P group) at the night (8 PM) before the procedure and another dose 2 hours before surgery from a pharmacist who generated the random sequence and was not involved to the study. No other preoperative medication was given. Blinding and randomization were performed by an investigator who was not involved in patient evaluation. Other personnel involved in the patient's care were unaware of patient group assignment. Preoperative visit was conducted the day before surgery. All patients were evaluated by the same anesthesia resident, who provided information on the preoperative course and instructed them on how to use the patientcontrol anesthesia (PCA) machine. Each patient was multidimensionally assessed; level of anxiety by the Amsterdam Preoperative Anxiety and Information Scale (APAIS) Thai version. 18 Anxiety score ranged from 4-20 (anxiety score > 13 possible to high level of anxiety up to 12 times). Sleep quality was measured by a questionnaire about subjective sleep quality using 100 mm VAS (visual analog scale; 0 = best conceivable sleep and 100 = worst conceivable sleep). Level of physical fatigue and general well-being were evaluated by using 10-point ordinal scale (1 = least fatigue feeling and 10 = most fatigue feeling and 100 mm VAS (0 = extremely well and 100 = extreme malaise), respectively, at the night before and 24 hours after the surgery. Upon arrival in the operating room, all patients underwent standard monitoring. Before spinal anesthesia, 10 ml/kg of physiologic crystalloid solution was administered intravenously. Spinal anesthesia was performed by spinal needle at lumbar segment L2/3 or L3/4 with 0.5% hyperbaric bupivacaine 16-20 mg according to attending anaesthesiologists. If any patient had anxiety or discomfort, continuous propofol 0.08-0.1 mg/kg/min was given to maintain conscious sedation during the surgery. At the end of the surgery, sedation was stopped. At recovery room, all patients received morphine via patient-controlled analgesia (PCA) machine; the PCA dose was 1 mg, a 6-min lockout and maximum dose of 30 mg within 4 hours and no basal rate was applied. In the first 2-hour postoperative period, if the patients had VAS pain score more than 40 mm after being connected to morphine PCA, morphine 0.1 mg/kg was further injected. PCA pump was continued for 24 hours after surgery. Four mg of ondansetron every 6 hours was administered for nausea/vomiting as requirement. No other analgesic was allowed. The primary outcome with respect to the efficacy of the study drug was postoperative morphine consumption in 24 hours. Secondary outcomes were postoperative pain score, anxiety, sleep quality, general well-being and satisfaction with pain treatment. Postoperative pain was assessed using 100 mm VAS (0 = no pain and 100 = worst imaginable pain) when arriving at recovery room (T0), 1(T1), 6(T6) and 24(T24) hours. Satisfaction with pain treatment and nausea/vomiting were assessed using 100 mm VAS at 24 h postoperatively. Other adverse effects, surgical and anesthetic complications were recorded. Statistical analysis The sample size of 25 patients in each group was required to detect difference between groups in reduce postoperative morphine consumption 0.1 mg/kg/min with a confidence level of 90% and a significance level of 5% according to data from the previous study. 19 Twenty-seven patients were enrolled in each group, allowing a 10% drop-out rate. Statistical analysis was calculated by using SPSS software version 17.0. Data are presented as mean ± standard deviation unless stated otherwise. Comparison of morphine consumption was analyzed using unpaired t-test. Pain score, anxiety and sleep quality were analyzed using repeated measure ANOVA. Satisfaction and nausea/ vomiting were analyzed using Chi-squared test (Fisher's exact test if appropriate). A pvalue < 0.05 was considered statistically significant. Results Fifty-four patients were enrolled into the study. No patient was excluded from the study after enrollment. The patient characteristics in each group, 27 patients, including diagnosis and types of operations were comparable between both groups (as shown in Table 1). Doses of bupivacaine, propofol and ephedrine were similar. There was no significant difference in anesthetic level of bupivacaine and number of patients who required sedation. There was no statistical difference in surgical variables including operation time and amount of blood loss. The number of intraoperative events such as hypotension needed treatments, bradycardia (Heart rate < 60/min). were comparable in both groups and shown in Table 2. The patients in the M and P group who required cumulative dose of morphine consumption in 24 h were 33.04 ±10.42 and 42.63 ±8.21 mg, respectively (p < 0.001). Postoperative VAS of pain was significantly lower in the M group at recovery room arrival (T0) (23.41 vs 8.07, p = 0.01). However, there was no significant difference of VAS pain score between groups at 1(T1), 6 (T6) and 24 hours (T24) postoperatively. Satisfaction with pain treatment in the M group was significantly higher than in the P group. (8.56 ± 1.25 vs 7.78 ± 1.50, p = 0.02). (Table 3) There was no significant difference between groups in preoperative and postoperative anxiety level. VAS scale for fatigue and general well-being scores in the first postoperative day were significantly lower in the M group compared with the placebo group. (3.30 ± 2.22 vs. 5.15 ± 1.85, p = 0.002 and 31.59 ± 24.14 vs. 49.78 ± 14.87, p = 0.002, respectively) However, subjective sleep quality was no significant difference between groups. Discussion The present study demonstrates that long-acting oral melatonin improved VAS pain score and reduced cumulative dose of PCA morphine consumption in 24 hours. These results were similar to the previous studies in other procedures, such as prostatectomy, 20 dental surgery, 15 hand surgery, 21 cataract surgery under topical anesthesia 22 and abdominal hysterectomy. 19,23 In contrast, some studies failed to show the effectiveness of perioperative melatonin in terms of analgesic outcomes. 24,25 The variation of dose, route and timing of melatonin administration might affect these individual results, which remain inconclusive even after systematic review were conducted. 8, 26 Caumo et al. revealed the analgesic effect of preoperative oral melatonin. Melatonin reduced pain scores on VAS scale within postoperative period of 48 hours and lowered morphine consumption for 24 hours after abdominal hysterectomy, compared to placebo. 23 Such a study proposed that postoperative anxiolytic effect of melatonin treatment led to anti-nociceptive effect. 23 Contrast to that study, this study could not show a significant difference of anxiolysis, as well as VAS pain score after immediate postoperative phase at post-anesthesia care unit arrival. The present study investigated a 4 mg of prolonged-release formulation of melatonin (Circadin®, Neurim Pharmaceuticals, Tel-Aviv, Israel). This was a lower dose than other previous studies as premedication for analgesic effect. Forms of melatonin in all previous studies might be a short acting formulation or higher doses. However, from general clinical practice, 2-mg dose once daily of prolonged-release melatonin showed clinical benefits in terms of sleep quality and quality of life in patients aged 55 years and older without unexpected effects. 27 The therapeutic indication of this novel formulation melatonin is primary insomnia in elderly due to long duration of action and safety profiles. 8 Exogenous melatonin modulates via activation of the MT1 and/or MT2 melatonin receptors in the central nervous system. 13,27 In addition, there were several in vitro studies which demonstrated that the anti-nociceptive effects of melatonin could be reversed by various mechanism such as flumazenil, naloxone, potassium or calcium ion-channel-blockers. 12 The present study is the first clinical study of prolonged-release formulation in perioperative period. A recent study in patients who underwent orthognathic surgery showed that prophylactic oral melatonin significantly decreased pain, numbness perception and were also correlated to lower serum hydrogen peroxide but higher antioxidant enzyme levels. 28 Patients with postoperative sleep disturbance can suffer from delirium, delayed recovery and pain. 29 Correlation between pharmacologic sleep promotion and perioperative pain control are still controversial. 30 The present study failed to demonstrate the improved postoperative sleep quality. Similar to a recent meta-analysis in cholecystectomy, melatonin interventions showed no substantial impact on sleep quality and pain score after 1 and 3 hours. 31 However, Kirksey A. et al concluded melatonin did not have effect on subjective sleep assessment but improved sleep efficiency and sleep time by actigraphy wrist bracelet measurement. 32 Acute postoperative pain after hysterectomy may be complicated by anxiety state and psychological factors. A qualitative systematic review demonstrated that anxiety was a significant predictor for postoperative pain. 33 Such result was similar to another study in patients who underwent hysterectomy, in which preoperative anxiety was a positive predictor of immediate postoperative pain, pain on wards and also pain at home. 10 Moreover, Pinto et al. showed that anxiety predicted pain intensity at 48 hours after hysterectomy and also mediated pain catastrophizing. 6 In several clinical studies and systematic reviews, the outcome of preoperative melatonin administered to reduce preoperative anxiety was still controversial among varied population and doses. 15,17,19,34 Whereas another systematic review from Cochrane database concluded melatonin can reduce preoperative anxiety at the same rate as standard medication with midazolam if it was given within appropriate timing. 35 However, the present study could not exhibit the benefit of melatonin as an anxiolytic. In addition, the concept of immune-pineal axis influencing postoperative pain in patients who underwent hysterectomy was proposed. There was an inverse correlation between tumor necrosis factor (TNF) and nocturnal melatonin level. Moreover, the lower melatonin level was accompanied by lower cortisol levels and patients required higher doses of analgesics. 36 Therefore, exogenous melatonin might play a role for perioperative period especially in hysterectomy. Fatigue has been defined as the lack of energy or exhaustion which is a complex, multifactorial symptom distinct from sleepiness or sadness. 37 The incidence of postoperative fatigue following hysterectomy was frequent regardless of general or spinal anesthesia. 38 Intensity of postoperative fatigue was the result of many biological factors, such as surgical stress response, anemia, declined nutritional status etc, psychological and social factors. 38 Fatigue was associated to poor quality of life in cancer patients who underwent surgery. 37 From the present study, melatonin enhanced subjective fatigue, general well-being VAS pain score and satisfaction score compared to placebo. These results were different from previous studies. Ivry M. et al. revealed melatonin improved quality of recovery following bariatric surgery in terms of sleep and pain levels. 16 Although differing in definition and measurement, the present study demonstrated advantages of melatonin administration in early postoperative fatigue and recovery, but no improvement of sleep quality. This may be due to lower morphine requirement. Limitations of this study include the quality of recovery questionnaire in Thai version, which was not validated at the time the study was conducted. Likert and VAS pain score were measured to represent overall subjective recovery condition. The details of each standard domain may be inconclusive. Second, the results were focused only on perioperative and acute postoperative periods. Future studies should evaluate the effect of melatonin on chronic pain after hysterectomy. Third, the present study revealed only benefits of preoperative 2 doses of 4 mg of prolonged-release melatonin. Continuation of melatonin in postoperative period or earlier timing to load rather than one night before the surgery might be more appropriate with melatonin's pharmacokinetics and patient's metabolism. Moreover, to our knowledge, the appropriate dose and timing of oral prolonged-released melatonin was not established in perioperative period. Conclusion Preoperative orally prolonged-release melatonin had advantages in patients who underwent hysterectomy and/or oophorectomy under spinal anesthesia in terms of decreased morphine consumption, pain score in PACU. Postoperative fatigue, subjective general well-being, VAS pain score and also patients' satisfaction score in treatment group were better than placebo without adverse effects. Results showed in mean ± S.D or n(%) Results showed in mean ± S.D or n(%) Figure 1 CONSORT diagram
/** * True if these statistics satisfy the given threshold statistics. * <p> * Specifically, compares the values of the following attributes: * <ul> * <li>{@link #addedEdges()} * <li>{@link #addedEdgeWeight()} * <li>{@link #maxCliqueSize()} * <li>{@link #maxCliqueCardinality()} * </ul> * If for each these attribute of {@code threshold} that have a non-negative value, the * current object has value that is less than or equal to the threshold value, then the * threshold is satisfied. */ public boolean meetsThreshold(Stats threshold) { return (threshold._addedEdges < 0 || threshold._addedEdges >= _addedEdges) && (threshold._addedEdgeWeight < 0 || threshold._addedEdgeWeight >= _addedEdgeWeight) && (threshold._maxClique < 0 || threshold._maxClique >= _maxClique) && (threshold._maxCliqueCardinality < 0 || threshold._maxCliqueCardinality >= _maxCliqueCardinality) ; }
package vn.techmaster.relation.model.manymany.noextracolumns; import java.util.ArrayList; import java.util.List; import javax.persistence.Entity; import javax.persistence.FetchType; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.ManyToMany; import javax.persistence.Table; import com.fasterxml.jackson.annotation.JsonBackReference; import lombok.Data; import lombok.NoArgsConstructor; @Entity(name = "tag") @Table(name = "tag") @Data @NoArgsConstructor public class Tag { @Id @GeneratedValue(strategy = GenerationType.AUTO) private Long id; private String name; public Tag(String name) { this.name = name; } @JsonBackReference @ManyToMany(mappedBy = "tags", fetch = FetchType.LAZY) List<Article> articles = new ArrayList<>(); }
/** * Splits the current object into an array of RiTexts, one per word, * maintaining the x and y position of each. Note: In most cases the original * RiText should be disposed manually to avoid text a doubling effect (via * RiText.dispose(originalRiText)). */ public RiText[] splitWords() { Object pf = font(); List result = new ArrayList(); String[] txts = text().split(SP); for (int i = 0; i < txts.length; i++) { if (txts[i] != null && txts[i].length() > 0) { float xPos = wordOffsetWith(pf, txts, i); result.add(new RiText(pApplet, txts[i], xPos, this.y)); } } return toArray(result); }
n=int(input()) s=str((input())) l=list(map(int,s)) # print(l) req = n//3 saw = [0,0,0] got = [0,0,0] for num in l: got[num]+=1 # print(got) def do_work(num): for i in range(n): if(got[num] < req and l[i] > num and got[l[i]] > req): got[l[i]]-=1 got[num]+=1 l[i]=num def do_back_work(num): for i in range(n-1,-1,-1): if(got[num] < req and l[i] < num and got[l[i]] > req): got[l[i]]-=1 got[num]+=1 l[i]=num if(got[0] == got[1] == got[2] == req): print(s) else: do_work(0) do_work(1) do_back_work(2) do_back_work(1) print("".join(map(str,l)))
PD06-07 PROSPECTIVE ANALYSIS OF OBESITY AND THE EFFECT OF BARIATRIC SURGERY ON URINARY INCONTINENCE INTRODUCTION AND OBJECTIVES: Introduction Epidemiological studies have shown that obesity is an independent risk factor for prevalent and incident incontinence. The prevalence of incontinence has been reported to be as high as 60% to 70% among severely obese women. Objective: Determine the urinary incontinence in women with obesity and the effect of bariatric surgery at 3, 6 and 12 months later. METHODS: Materials and Methods: The present study is observational, analitic, prospective and longitudinal. Obese adult women, who underwent bariatric surgery from May 2017 to February 2018, were included. Performing a bivariate statistical analysis using Chi square and inferential statistics. RESULTS: Results: 67 patients in total, 73.1% had some type of UI, average age 42.7 years, pre-surgical weight 108.3 kg and BMI of 42.7kg / m2 (Table1). Stres UI (55.1%). 73% had UI, stress UI is the most frequent type in 55%. In 65% mild incontinence was present, 30% affecting their quality of life. At 12 months after surgery: weight mean: 78 kg, and body mass index: 30.9 Kg/m2 and the persistence of UI was 13.4%. With chi-square analysis: Type and frequency of Incontinence as well as Quality of life were statistically significant difference between before and after surgery. (Table. 2) The complete resolution was present in 81% of patients. CONCLUSIONS: Conclusions: In our study confirmed that weight loss after bariatric surgery has a high resolution of UI, and should be considered as the first line of treatment. Although the main objectives of bariatric surgery are to reduce cardiovascular morbidities, the recovery and / or improvement of UI must be evaluated routinely, offering another important long-term benefit. Table. No title available. Table. No title available. Source of Funding: NONE
def stepsize(name=None,templates=None): queryDiagnostic("Step",name,templates)
/** * @author oktfolio [email protected] * @date 2020/06/08 */ public class AuthorityGroup { private AuthorityGroupEnum group; private List<Authority> authorities; public AuthorityGroup(AuthorityGroupEnum group, List<Authority> authorities) { this.group = group; this.authorities = authorities; } public AuthorityGroupEnum getGroup() { return group; } public void setGroup(AuthorityGroupEnum group) { this.group = group; } public List<Authority> getAuthorities() { return authorities; } public void setAuthorities(List<Authority> authorities) { this.authorities = authorities; } }
/** * To cancel the already generated notification. * * @param context of the class. * @param id the id */ public static void cancelNotify(Context context, int id) { try { NotificationManager nMgr = (NotificationManager) context .getSystemService(Context.NOTIFICATION_SERVICE); nMgr.cancel(id); } catch (Exception e) { LogUtils.LOGI(TAG, e.getMessage()); } }
import { produce } from 'immer' import { Reducer } from 'redux' import * as actionTypes from './actionTypes' export interface ISnapshotState { snapshotData: any[] // 编辑器快照数据 snapshotIndex: number // 快照索引 } export const defaultState = { snapshotData: [], // 编辑器快照数据 snapshotIndex: -1, // 快照索引 } const reducer: Reducer = (state = defaultState, action: any) => produce(state, (draft: ISnapshotState) => { switch (action.type) { case actionTypes.SET_SNAPSHOT_DATA: draft.snapshotData = action.payload return case actionTypes.SET_SNAPSHOT_INDEX: draft.snapshotIndex = action.payload // case actionTypes.CONTEXT_MENU_LEFT: // draft.menuLeft = action.payload // return // case actionTypes.CONTEXT_MENU_TOP: // draft.menuTop = action.payload // return } }) export default reducer
def process_weddings(date, weddings): for wedding_date, couples in weddings.items(): if wedding_date.month == date.month and wedding_date.day == date.day: for couple in couples: yield dict( names=' & '.join(couple), age=date.year - wedding_date.year )
/** * Performs those allocations that are absolutely necessary for local initiation * It initializes a table of chunk tables and the PINS model. */ model_t global_and_model_init () { model_t model; global_create (HRE_PROCS); model = create_pins_model (); global_init (model, SPEC_REL_PERF, timed_model); return model; }
TV Reviews All of our TV reviews in one convenient place. It’s relatively rare for Doctor Who to comment directly on contemporary issues. The classic series was only occasionally interested in situating itself in terms of present-day controversies, and these instances are mostly confined to the Earthbound adventures of Jon Pertwee’s 3rd Doctor: “The Green Death,” for instance, has a lot to say about environmentalism, that most 1970s of hot-button topics (which isn’t to say we’ve fixed, well, anything about the environment since then, but “The Green Death” is almost dizzyingly 1970s in its approach to the topic). With stories like the present-day “Aliens Of London”/“World War Three” and the far-future “Bad Wolf,” the first season of the revived series pretty much set the template for how the show would address issues of the day: When the end of the world comes, people are just going to sit around and watch it happen on television. It’s the kind of satirical point that isn’t exactly shocking a TV writer came up with, and it had the added benefit of setting up a new round of celebrity cameos every time the Doctor watched the latest global peril begin to unfold. Russell T. Davies’ era never really developed much of a cohesive take on modern issues beyond a vague distrust of modern technology, be it the upgrades of “Rise Of The Cybermen”/“The Age Of Steel” or the killer GPS of “The Sontaran Stratagem”/“The Poison Sky.” Advertisement Under Steven Moffat’s stewardship, the show has drifted still further from any real-world parallels. Stories like “The Hungry Earth”/“Cold Blood” and “The Rebel Flesh”/“The Almost People” feel like they ought to have something to say about any number of contemporaries issues, but they end up being largely focused on the broader philosophical premises raised by the science fiction aspects of their premises. Those stories aren’t especially interested in setting up an allegory to contemporary events, but rather in asking something along the lines of, “If this preposterous thing happened, how would people actually react?” Indeed, through all 52 years of its existence, Doctor Who has been primarily interested in exploring more universal and more philosophical questions, rather than specifically engaging with our present political and social milieu. And honestly, that’s probably for the best, if the few alternately halfhearted and hamfisted efforts in that direction are any indication (with the one big, big exception of the Torchwood miniseries “Children Of Earth,” which is every bit as dark and bleak and incredible as people say it is). All of which is to say that when something like “The Zygon Invasion” comes along, it feels fundamentally different from what the show usually gets up to. Episode writer Peter Harness, making his return after penning last season’s most divisive entry in “Kill The Moon,” crafts a story with some unmistakable parallels with the current chaos in the Middle East, with a particular emphasis on ISIS. The episode doesn’t go overboard in making these connections. The Zygon insurgents aren’t allegorical to real-world terrorists, but they are—to borrow J.R.R. Tolkien’s way of thinking—applicable to such groups. The Doctor points out that it plays directly into the radicals’ hands to attack them, as that only serves to radicalize the moderates. The rebel Zygons are savvy in their use of video and internet to strike fear into UNIT. They take the real-world use of innocent civilians as human shields to its most terrible logical extreme by raiding their enemies’ memories to turn into the very loved ones the soldiers would be least able to kill. And, on the other side, we see the Doctor Who debut of drone warfare, though that scene primarily acts an affecting bit of foreshadowing for Hitchley’s confrontation with his “mother” rather than any particular commentary on the use of drones. Advertisement That’s probably about the extent of the parallels, though, and it would be a mistake to say “The Zygon Invasion” is making any particularly deep point about the current geopolitical situation. It doesn’t need to. Rather, these connections serve to anchor the story in something more vital than your typical alien invasion plotline, while also offering viewers an opportunity to reflect on what this story might have to say about real-world situations, should they so choose—here again Tolkien’s observation that applicability “resides in the freedom of the reader” rings true. The Zygons’ motivations here are, not coincidentally, rather more nuanced than those in their two previous appearances. In fairness, both the Tom Baker classic “Terror Of The Zygons” and the 50th anniversary extravaganza “The Day Of the Doctor” established the Zygons as more than just evil galactic conquerors: Each story established the Zygons’ planet had been destroyed, and Earth was the chosen replacement, which is just the kind of dire situation that would excuse a certain degree of ruthlessness. But the Zygon radicals here have more specific, more relatable goals, as they demand the right to live openly, to not have to deny their own identities. And, yeah, there’s a bit about global domination mixed in there, but that’s just the outgrowth of what began as justifiable grievances from the younger brood. The return of Osgood after her demise in “Death In Heaven” is handled about as well as one could reasonably hope, with the only real clunkiness coming down to the unanswered question of where precisely the surviving Osgood was when Missy and the Cybermen’s plans were kicking off. There wasn’t any indication in that previous episode that Osgood was living a life in duplicate, but then it wasn’t strictly relevant, and it’s not as though what we learn here isn’t a logical extension of the apparent bond the two Osgoods began to form in “The Day Of The Doctor.” This is probably a bit of a retcon, but it’s a minor one, as these things go, and “The Zygon Invasion” earns this de facto resurrection by having this Osgood refuse to reveal whether she began life as a human or a Zygon. I phrase it that way because it’s the only way to accept her premises while still wanting to know the answer to the essential question. Either way, her non-answer gets the Doctor once again thinking about hybrids, tying back to Davros’ rantings in “The Witch’s Familiar” and his similar musings on Ashildr at the end of “The Girl Who Died.” Osgood herself is used minimally here, serving to set up the plotline with her “sister” in the beginning and then chatting with the Doctor about his old question-mark collars. Her ongoing sartorial impersonation of the Doctor now officially encompasses the period of the Doctor’s lives in which he was just randomly slapping questions on all his clothing—get a gander at the 7th Doctor-approved question-mark jumper in the pre-credits sequence—which in real life happened because then-producer John Nathan-Turner insisted the Doctor ought to be mysterious and had a crushingly direct way of realizing this brief. “The Zygon Invasion” almost justifies this past silliness by having Osgood wonder what the question actually was, which both ties the collar in with an ongoing preoccupation of the Moffat era and, more importantly, totally wrongfoots the Doctor as he attempts to interrogate Osgood about her own identity. There’s likely rather more to tease out here between the Doctor and Osgood, but what we see is enough to establish that she has become rather more formidable than her initial fannish awkwardness might indicate. Her vision of maintaining the peace is rather more idealistic than the Doctor’s, who appears intent on finding every last scrap of information he can to defeat the radicals and restore the ceasefire. The two are absolutely working to the same goals, but their methods and priorities don’t necessarily align. Advertisement As is often the case with the first halves of two-parters, much of what happens tonight ranges from setup to slow burn. There’s that one big twist, of course, and we’ll get to that momentarily, but much of what we see here tonight might best be understood as characterizing the radical Zygon threat as opposed to really engaging with it. Take Kate Stewart’s trip to Truth or Consequences. Beyond proving that Doctor Who still has the money for the occasional transatlantic filming excursion, that whole sequence primarily serves to build up tension until the final reveal. There’s not necessarily anything new we actually learn about the Zygon threat—or, for that matter, Kate as a person—leading up to the reveal that Norlander was a Zygon all along and Kate’s latest apparent death. (I say “apparent,” because never count out a Lethbridge-Stewart.) I’m hesitant to call this padding, because that implies scenes like these can’t be worthwhile just because they help generate tension, but I suppose that goes back to the great challenge of reviewing only the first 45 minutes of a 90-minute story: We’re still almost entirely in the buildup phase, and we can still only guess at how next week’s payoff recasts the opening episode. Even so, “The Zygon Invasion” definitely isn’t the most energetic of setup episodes, and a lot of that has to do with the amount of time the Doctor spends on the sidelines. Peter Capaldi is tons of fun here, randomly nicknaming himself Dr. Disco and revealing that he does quite enjoy poncing about in a big plane, but he has only the briefest of interactions with the Zygons themselves. Osgood’s text message theoretically brings him into the main plot straight away, but the episode still wants to spend some time showing how humans deal with the threat the Zygons pose without the Doctor there to save them. This becomes most apparent with the Doctor’s trip to Turmezistan, where he just sort of stands around a lot, first when the drone operator finds herself unable to fire and later when Hitchley is confronted with the woman who might be but almost certainly isn’t his mother. Now, neither instance of the Doctor holding back is all that egregious, especially when Rebecca Front’s Colonel Walsh is there both times to block him from any interference he might care to do. “The Zygon Invasion” is methodical in how it paces the setup for next week’s story—which does rather mean we’re limited in how much we judged the effectiveness of that creative decision without first seeing next week’s “The Zygon Inversion.” Advertisement But no matter, because there are still plenty of some standout scenes here, in particular Hitchley’s standoff with his mother in front of the church. The impossibility of the scenario makes it play like something out of The Twilight Zone, although I was actually most put in mind of “The Third Expedition” from Ray Bradbury’s The Martian Chronicles. Despite the presence of Zygon duplicates, it’s the humanity of the scene that elevates it: As much as it’s obvious that Hitchley’s mother is deflecting when she refuses to answer his questions, it’s also asking far too much of the man to ask him to kill someone who looks and acts exactly like his mother because she can’t remember the name of his favorite teddy bear. The scenario has a dreamlike quality, as more and more apparent beloved hostages come out of the church door, punctuated only by Colonel Walsh’s ignored orders. The Doctor here barely registers as a presence in the scene, but then he’s beside the point. The big twist of “The Zygon Invasion,” and the one element that is likeliest to drive a good chunk of next week’s episode, is the reveal that the Clara we spend time with throughout most of this episode is, in fact, an impostor. I’ve had a chance to watch this episode a couple times now, and I can say that the twist works brilliantly either way: Perhaps I’m just naive, but I had no inkling that Clara had been replaced on first viewing, while the second time round I picked up on all the clues that really ought to have given the game away. Advertisement Admittedly, the show is playing a bit of a dirty trick on its audience, as the concealment of the twist relies in part on the fact that, well, Doctor Who has long since trained us to look past apparent inconsistencies in Clara’s character. It really ought to be obvious something is up when Clara leaves the apartment of a frightened child, fixes her hair, and nonchalantly calls back the Doctor with the flip, “Did you just call yourself Dr. Disco?” It really ought to be obvious when Clara interrupts a top-secret, highest-priority military operation to ask Jac if they can drop by her apartment and pick up some things, a request to which Jac is clearly a bit dumbfounded to hear. If this were a companion Doctor Who wrote a bit more tightly—any of the other new series companions, in other words—I suspect it would be obvious that her blase reactions indicate something is wrong. As it is, Clara’s murkier characterization, not to mention the Moffat era’s tendency to prioritize narrative coolness over character consistency, makes it easier to shrug off those bits of weirdness. I’m not sure that’s something the show really ought to be given credit for, exactly, given the twist works in large part because it takes advantage of a more systemic weakness of this current era. But damn if it doesn’t work, and there are other moments that would work well with any companion—does the Doctor look agog at Clara because she just admitted she memorized Trivial Pursuit cards, or because he already suspects something might be up?—so I’m not going to get too hung up on this. That’s a good way to sum up “The Zygon Invasion” as a whole, honestly: This isn’t like “Under The Lake” or “The Girl Who Died,” both of which function beautifully on their own terms, without their narrative partner. This episode is more like a typical first half of a two-parter, in that this is always—at least as of next week—going to be judged entirely in tandem with “The Zygon Inversion.” As such, the only real questions to answer are does this episode generate anticipation for next week, and does this episode position “The Zygon Inversion” to go to places and explore things it couldn’t reach if it weren’t the back half of a two-parter? I’d say yes on both counts—assuming any of our heroes are actually still alive to be there next week, that is. Stray observations: I’m not completely sure, but this may well be the first Doctor Who episode that not only passes the Bechdel test but also fails the reverse Bechdel test, as I’m not sure two male characters actually talk to each other directly at any point in this episode. (Yes, the Doctor addresses the soldiers in a general sort of way, but nobody answers him, and I suppose the Zygons are a bit tricky in terms of how we classify their gender, but for all intents and purposes, there aren’t any men talking to men here, as far as I can see.) Does this mean anything? Nah, probably not, in that the Bechdel test is better understood in terms of how it talks to broad trends in gender representation rather than any sort of judgment of a specific episode or movie. But it’s still kind of cool that this episode is, without ever making note of it, so women-centric, if only because that’s still such a rare thing on television. If Osgood does make it out of next week alive, we need to figure out her future Doctor-inspired wardrobe choices. We’ve already had the 4th Doctor’s scarf, the 7th Doctor’s jumper, the 11th Doctor’s bowtie, and the question-mark collars favored by the 4th through 7th Doctors. I’m going to go ahead and say the 3rd Doctor’s frills or the 9th Doctor’s jacket and jumper combo would be fun and a bit less obvious than a 10th Doctor-inspired look, though that would probably be the favorite if she does return once more. Also … she needs to wear the 6th Doctor’s coat, if only to see the look on the 12th Doctor’s face. And you thought the Doctor was furious when he takes on the Daleks… I almost titled this review “Doctor Who is running the Battlestar Galactica playbook,” but I changed my mind upon realizing this isn’t really that great a match for the Cylon threat (other than the similarity in name with Zygon, I suppose). The terrorism parallels and the use of duplicate infiltrators both do feel very BSG, but I’m not sure that’s quite enough, on reflection. I love the fact that this Doctor apparently spends all his downtime playing the electric guitar. Of all the convergences between Doctor and actor, this might well be my favorite. It’s a big, big credit to Peter Capaldi and Rebecca Front’s versatility as actors that I never once thought of the Doctor and Colonel Walsh’s scenes together as a reunion of The Thick Of It. And The Thick Of It is one of my favorite shows! On that note, let’s make this week’s random comedy clip a reminder of Malcolm Tucker and Nicola Murray in … well, certainly not happier times. Less Zygon-y, I suppose! I, uh, don’t need to tell any of you that the language isn’t family-friendly, right?
Second-Harmonic Generation As A Probe Of Thin Film And Monolayer Microstructure In recent years second-harmonic generation (SHG) has gained popularity as a surface analytical technique. Its intrinsic surface sensitivity can make it useful for studying thin films, or even monolayers. In particular, numerous works have appeared where SHG was used to study monolayer or multilayer films of organic molecules deposited by the Langmuir-Blodgett (LB) deposition technique.Often the interest has been in deriving information about molecular hyperpolarizibilities, or orientational properties of single molecules or parts of molecules. In this work we show that structural information is also available at the supramolecular level.
//----------------------------------------------------------------------------- // Protect the page where we put the marker if a debugger is attached. That way, you get an AV right away // when you blow the guard when running under a debugger. //----------------------------------------------------------------------------- void BaseStackMarker::ProtectMarkerPageInDebugger() { WRAPPER_CONTRACT; DEBUG_ONLY_FUNCTION; if (!g_ProtectStackPagesInDebugger) { return; } if (m_numPages < MINIMUM_PAGES_FOR_DEBUGGER_PROTECTION) { return; } DWORD flOldProtect; LOG((LF_EH, LL_INFO100000, "BSM::PMP: m_pMarker 0x%p, value 0x%p\n", m_pMarker, *m_pMarker)); #undef VirtualProtect BOOL fSuccess = ::VirtualProtect(m_pMarker, 1, PAGE_NOACCESS, &flOldProtect); _ASSERTE(fSuccess); #define VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) \ Dont_Use_VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) m_fProtectedStackPage = fSuccess; }
import { ECDSAParameters, SigningAlgorithm } from "./securityParameters"; const stableStringify = require("json-stable-stringify"); interface TransactionPayload { sender: string; recipient: string; amount: number; message?: string; tags?: string[]; } interface SignedTransaction extends TransactionPayload { signature: string; } export const signTransaction = async ( privateKey: string, payload: TransactionPayload ): Promise<SignedTransaction> => { const signingJwk = JSON.parse( Buffer.from(privateKey, "base64").toString("utf-8") ); const signingKey = await crypto.subtle.importKey( "jwk", signingJwk, ECDSAParameters, false, ["sign"] ); const data = Buffer.from(stableStringify(payload)); const signatureBuffer = await crypto.subtle.sign( SigningAlgorithm, signingKey, data ); const signature = Buffer.from(signatureBuffer).toString("base64"); return { ...payload, signature, }; };
Not to be confused with Bank PIN A Player identification number (often referred to as PID) is a unique identification number that each player possesses. It previously determined the priority between two or more players when simultaneously performing tasks throughout RuneScape. The update on 17 September 2013 removed the system, with each action now being completely randomised.[1] Calculating The effect of PID on player was significantly altered after an update. This article is retained to provide information on past elements of RuneScape. A random player I.D. is assigned to a player upon logging into the game and it changes each time they log in. Prior to the player I.D. system, priority between players was based on the players' IP addresses. “ When the server processes players, it goes through them each in turn. If you are first in the list, you get processed first. In early versions of Classic, you always got put at the end of the list (if I remember rightly), so if you stayed logged in you would end up at the beginning of the list eventually. Being processed first meant you could catch up with people more easily when following them, which gave an advantage particularly in the wilderness with PVP. A low PID means you were at the beginning of the list, high PIDs were at the end. This is no longer true, as you are now placed in a random place in the list and everyone else is shuffled up, so you don’t retain the same PID throughout your session. This makes things much fairer. ” — Game Engine Team Players could approximate their player I.D. by following a player and having them run. The delay between that other player moving and the player's character moving was believed to be caused by the player's player I.D. If the delay was longer than normal, the player has a higher player I.D. than the other player. Examples of PID prioritisation The following list contains examples of when a player I.D. became a factor in determining which player has the higher and lower priority. Dueling - Determined which player gets the first hit in a duel or when both players simultaneously attack each other at the beginning of the duel. Setting up barricades in various activities - Determined if another player can walk through a barricade that was just set up by another player. Closing doors - Determined if a player can walk through a doorway while another player closes it at the same time. "Catching" in RuneScape Classic - Player I.D.s determines if a player can "catch" another player when PKing in the Wilderness. "Catching" is a RuneScape Classic term where after three rounds of combat, a player runs from their opponent and the other player can "catch" them by chasing after them and attacking them again, bringing them back into combat. - Player I.D.s determines if a player can "catch" another player when PKing in the Wilderness. "Catching" is a term where after three rounds of combat, a player runs from their opponent and the other player can "catch" them by chasing after them and attacking them again, bringing them back into combat. Picking up items amongst other players - Affected which player gets an item if two or more players pick it up at the same time. Competing for items like chronicle fragments, in which two players could click at the same time and be within similar distance to the item, but only one player can get it.
#include <stdio.h> #include<string.h> int main() { int a=0,b=0,i,m; char n[105]; scanf("%s",n); m=strlen(n); for(i=0;i<m;i++) { if( n[i]>='A' && n[i]<='Z' ) a++; else b++; } if(a>b) { for(i=0;i<m;i++) { printf("%c",toupper(n[i]) ); } } else for(i=0;i<m;i++) { printf("%c",tolower(n[i]) ); } printf("\n"); }
import pytest from nuage_metroae_config.variable_reader import VariableReader QUERY_NESTED_RANGE_SORT_CASES = [ ({}, ["a1", "a3", "a5", "a2", "a4"]), ({"%end": 3}, ["a1", "a3", "a5"]), ({"%end": 99}, ["a1", "a3", "a5", "a2", "a4"]), ({"%end": 0}, []), ({"%end": -2}, ["a1", "a3", "a5"]), ({"%end": -5}, []), ({"%start": 0}, ["a1", "a3", "a5", "a2", "a4"]), ({"%start": 3}, ["a2", "a4"]), ({"%start": 99}, []), ({"%start": -1}, ["a4"]), ({"%start": -2}, ["a2", "a4"]), ({"%start": -6}, ["a1", "a3", "a5", "a2", "a4"]), ({"%start": 0, "%end": 5}, ["a1", "a3", "a5", "a2", "a4"]), ({"%start": 1, "%end": 3}, ["a3", "a5"]), ({"%start": 3, "%end": 99}, ["a2", "a4"]), ({"%start": 3, "%end": 4}, ["a2"]), ({"%start": 3, "%end": 3}, []), ({"%start": 4, "%end": 3}, []), ({"%start": -5, "%end": -1}, ["a1", "a3", "a5", "a2"]), ({"%start": -1, "%end": 0}, []), ({"%start": -3, "%end": -1}, ["a5", "a2"]), ({"%start": -3, "%end": -2}, ["a5"]), ({"%start": -3, "%end": 5}, ["a5", "a2", "a4"]), ({"%sort": "name"}, ["a1", "a2", "a3", "a4", "a5"]), ({"%sort": "name", "%end": 3}, ["a1", "a2", "a3"]), ({"%sort": "name", "%end": 99}, ["a1", "a2", "a3", "a4", "a5"]), ({"%sort": "name", "%end": 0}, []), ({"%sort": "name", "%end": -2}, ["a1", "a2", "a3"]), ({"%sort": "name", "%end": -5}, []), ({"%sort": "name", "%start": 0}, ["a1", "a2", "a3", "a4", "a5"]), ({"%sort": "name", "%start": 3}, ["a4", "a5"]), ({"%sort": "name", "%start": 99}, []), ({"%sort": "name", "%start": -1}, ["a5"]), ({"%sort": "name", "%start": -2}, ["a4", "a5"]), ({"%sort": "name", "%start": -6}, ["a1", "a2", "a3", "a4", "a5"]), ({"%sort": "name", "%start": 0, "%end": 5}, ["a1", "a2", "a3", "a4", "a5"]), ({"%sort": "name", "%start": 1, "%end": 3}, ["a2", "a3"]), ({"%sort": "name", "%start": 3, "%end": 99}, ["a4", "a5"]), ({"%sort": "name", "%start": 3, "%end": 4}, ["a4"]), ({"%sort": "name", "%start": 3, "%end": 3}, []), ({"%sort": "name", "%start": 4, "%end": 3}, []), ({"%sort": "name", "%start": -5, "%end": -1}, ["a1", "a2", "a3", "a4"]), ({"%sort": "name", "%start": -1, "%end": 0}, []), ({"%sort": "name", "%start": -3, "%end": -1}, ["a3", "a4"]), ({"%sort": "name", "%start": -3, "%end": -2}, ["a3"]), ({"%sort": "name", "%start": -3, "%end": 5}, ["a3", "a4", "a5"]), ({"%sort_asc": "name"}, ["a1", "a2", "a3", "a4", "a5"]), ({"%sort_asc": "name", "%end": 3}, ["a1", "a2", "a3"]), ({"%sort_asc": "name", "%end": 99}, ["a1", "a2", "a3", "a4", "a5"]), ({"%sort_asc": "name", "%end": 0}, []), ({"%sort_asc": "name", "%end": -2}, ["a1", "a2", "a3"]), ({"%sort_asc": "name", "%end": -5}, []), ({"%sort_asc": "name", "%start": 0}, ["a1", "a2", "a3", "a4", "a5"]), ({"%sort_asc": "name", "%start": 3}, ["a4", "a5"]), ({"%sort_asc": "name", "%start": 99}, []), ({"%sort_asc": "name", "%start": -1}, ["a5"]), ({"%sort_asc": "name", "%start": -2}, ["a4", "a5"]), ({"%sort_asc": "name", "%start": -6}, ["a1", "a2", "a3", "a4", "a5"]), ({"%sort_asc": "name", "%start": 0, "%end": 5}, ["a1", "a2", "a3", "a4", "a5"]), ({"%sort_asc": "name", "%start": 1, "%end": 3}, ["a2", "a3"]), ({"%sort_asc": "name", "%start": 3, "%end": 99}, ["a4", "a5"]), ({"%sort_asc": "name", "%start": 3, "%end": 4}, ["a4"]), ({"%sort_asc": "name", "%start": 3, "%end": 3}, []), ({"%sort_asc": "name", "%start": 4, "%end": 3}, []), ({"%sort_asc": "name", "%start": -5, "%end": -1}, ["a1", "a2", "a3", "a4"]), ({"%sort_asc": "name", "%start": -1, "%end": 0}, []), ({"%sort_asc": "name", "%start": -3, "%end": -1}, ["a3", "a4"]), ({"%sort_asc": "name", "%start": -3, "%end": -2}, ["a3"]), ({"%sort_asc": "name", "%start": -3, "%end": 5}, ["a3", "a4", "a5"]), ({"%sort_desc": "name"}, ["a5", "a4", "a3", "a2", "a1"]), ({"%sort_desc": "name", "%end": 3}, ["a5", "a4", "a3"]), ({"%sort_desc": "name", "%end": 99}, ["a5", "a4", "a3", "a2", "a1"]), ({"%sort_desc": "name", "%end": 0}, []), ({"%sort_desc": "name", "%end": -2}, ["a5", "a4", "a3"]), ({"%sort_desc": "name", "%end": -5}, []), ({"%sort_desc": "name", "%start": 0}, ["a5", "a4", "a3", "a2", "a1"]), ({"%sort_desc": "name", "%start": 3}, ["a2", "a1"]), ({"%sort_desc": "name", "%start": 99}, []), ({"%sort_desc": "name", "%start": -1}, ["a1"]), ({"%sort_desc": "name", "%start": -2}, ["a2", "a1"]), ({"%sort_desc": "name", "%start": -6}, ["a5", "a4", "a3", "a2", "a1"]), ({"%sort_desc": "name", "%start": 0, "%end": 5}, ["a5", "a4", "a3", "a2", "a1"]), ({"%sort_desc": "name", "%start": 1, "%end": 3}, ["a4", "a3"]), ({"%sort_desc": "name", "%start": 3, "%end": 99}, ["a2", "a1"]), ({"%sort_desc": "name", "%start": 3, "%end": 4}, ["a2"]), ({"%sort_desc": "name", "%start": 3, "%end": 3}, []), ({"%sort_desc": "name", "%start": 4, "%end": 3}, []), ({"%sort_desc": "name", "%start": -5, "%end": -1}, ["a5", "a4", "a3", "a2"]), ({"%sort_desc": "name", "%start": -1, "%end": 0}, []), ({"%sort_desc": "name", "%start": -3, "%end": -1}, ["a3", "a2"]), ({"%sort_desc": "name", "%start": -3, "%end": -2}, ["a3"]), ({"%sort_desc": "name", "%start": -3, "%end": 5}, ["a3", "a2", "a1"]), ] QUERY_NESTED_ATTR_FILTER_CASES = [ ({}, ["a1", "a3", "a5", "a2", "a4"]), ({"avatartype": "URL"}, ["a1", "a3", "a5"]), ({"avatartype": "URL", "%sort_desc": "dhcpleaseinterval"}, ["a5", "a3", "a1"]), ({"avatartype": "URL", "%start": 1, "%end": 2}, ["a3"]), ({"avatartype": "BASE64"}, ["a2", "a4"]), ({"avatartype": "BASE64", "name": "a2"}, ["a2"]), ({"avatartype": "URL", "name": "a2"}, []), ({"name": ["a1", "a2", "a3"]}, ["a1", "a3", "a2"]), ({"avatartype": "BASE64", "name": ["a1", "a2", "a3"]}, ["a2"]), ] class TestVariableReaderSession(object): def test_start__success(self): var_reader = VariableReader() var_reader.start_session() def test_stop__success(self): var_reader = VariableReader() var_reader.stop_session() def test_connect__not_supported(self): var_reader = VariableReader() with pytest.raises(NotImplementedError): var_reader.connect("host") class TestVariableReaderQuery(object): def test_attrs_single__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None} ] attributes = "timestamp" mock_entry_1 = { "timestamp": 100, "cpu": "10.1" } var_reader.set_data([mock_entry_1]) result = var_reader.query(objects, attributes) assert result == [100] def test_attrs_single__unknown(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None} ] attributes = ["timestamp", "memory"] mock_entry_1 = { "timestamp": 100, "cpu": 10.1, "memory": 500.2 } var_reader.set_data([mock_entry_1]) result = var_reader.query(objects, attributes) assert result == [{"timestamp": 100, "memory": 500.2}] def test_attrs_multiple__unknown(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None} ] attributes = ["timestamp", "memory", "unknown"] mock_entry_1 = { "timestamp": 100, "cpu": 10.1, "memory": 500.2 } var_reader.set_data([mock_entry_1]) result = var_reader.query(objects, attributes) assert result == [{"timestamp": 100, "memory": 500.2}] def test_attrs_all__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None} ] attributes = ["*"] mock_entry_1 = { "timestamp": 100, "cpu": 10.1, "memory": 500.2 } var_reader.set_data([mock_entry_1]) result = var_reader.query(objects, attributes) assert result == [mock_entry_1] def test_single__none_found(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None} ] attributes = "timestamp" result = var_reader.query(objects, attributes) assert result == [] def test_multiple__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None} ] attributes = "timestamp" mock_entry_1 = { "timestamp": 100, "cpu": "10.1" } mock_entry_2 = { "timestamp": 200, "cpu": "20.2" } mock_entry_3 = { "timestamp": 300, "cpu": "30.3" } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3]) result = var_reader.query(objects, attributes) assert result == [100, 200, 300] def test_multiple_2__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None} ] attributes = ["timestamp", "cpu"] mock_entry_1 = { "timestamp": 100, "cpu": "10.1" } mock_entry_2 = { "timestamp": 200, "cpu": "20.2" } mock_entry_3 = { "timestamp": 300, "cpu": "30.3" } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3]) result = var_reader.query(objects, attributes) assert result == [mock_entry_1, mock_entry_2, mock_entry_3] def test_no_attr_1__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None} ] attributes = None mock_entry_1 = { "timestamp": 100, "cpu": "10.1" } mock_entry_2 = { "timestamp": 200, "cpu": "20.2" } mock_entry_3 = { "timestamp": 300, "cpu": "30.3" } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3]) result = var_reader.query(objects, attributes) assert result == [mock_entry_1, mock_entry_2, mock_entry_3] def test_no_attr_2__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": {"%start": 1}} ] attributes = None mock_entry_1 = { "timestamp": 100, "cpu": "10.1" } mock_entry_2 = { "timestamp": 200, "cpu": "20.2" } mock_entry_3 = { "timestamp": 300, "cpu": "30.3" } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3]) result = var_reader.query(objects, attributes) assert result == [mock_entry_2, mock_entry_3] def test_nested_object_multiple__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "disks", "filter": None}, ] attributes = "name" mock_entry_1 = { "timestamp": 100, "cpu": "10.1", "disks": [{"name": "cf1:"}, {"name": "cf2:"}] } mock_entry_2 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf3:"}, {"name": "cf4:"}] } var_reader.set_data([mock_entry_1, mock_entry_2]) result = var_reader.query(objects, attributes) assert result == ["cf1:", "cf2:", "cf3:", "cf4:"] def test_nested_object_multiple__empty_child(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "disks", "filter": None}, ] attributes = "name" mock_entry_1 = { "timestamp": 100, "cpu": "10.1", "disks": [] } mock_entry_2 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf3:"}, {"name": "cf4:"}] } var_reader.set_data([mock_entry_1, mock_entry_2]) result = var_reader.query(objects, attributes) assert result == ["cf3:", "cf4:"] def test_nested_object_multiple__no_child(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "disks", "filter": None}, ] attributes = "name" mock_entry_1 = { "timestamp": 100, "cpu": "10.1" } mock_entry_2 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf3:"}, {"name": "cf4:"}] } var_reader.set_data([mock_entry_1, mock_entry_2]) result = var_reader.query(objects, attributes) assert result == ["cf3:", "cf4:"] def test_nested_no_attr_1__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "disks", "filter": None}, ] attributes = None mock_entry_1 = { "timestamp": 100, "cpu": "10.1", "disks": [{"name": "cf1:"}, {"name": "cf2:"}, {"name": "cf3:"}] } mock_entry_2 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf4:"}, {"name": "cf5:"}, {"name": "cf6:"}] } mock_entry_3 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf7:"}, {"name": "cf8:"}, {"name": "cf9:"}] } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3]) result = var_reader.query(objects, attributes) assert result == [ {"name": "cf1:"}, {"name": "cf2:"}, {"name": "cf3:"}, {"name": "cf4:"}, {"name": "cf5:"}, {"name": "cf6:"}, {"name": "cf7:"}, {"name": "cf8:"}, {"name": "cf9:"}, ] def test_nested_no_attr_2__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "disks", "filter": {"%start": 1}}, ] attributes = None mock_entry_1 = { "timestamp": 100, "cpu": "10.1", "disks": [{"name": "cf1:"}, {"name": "cf2:"}, {"name": "cf3:"}] } mock_entry_2 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf4:"}, {"name": "cf5:"}, {"name": "cf6:"}] } mock_entry_3 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf7:"}, {"name": "cf8:"}, {"name": "cf9:"}] } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3]) result = var_reader.query(objects, attributes) assert result == [ {"name": "cf2:"}, {"name": "cf3:"}, {"name": "cf5:"}, {"name": "cf6:"}, {"name": "cf8:"}, {"name": "cf9:"}, ] def test_nested_no_attr_3__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": {"%start": 1}}, {"name": "disks", "filter": None}, ] attributes = None mock_entry_1 = { "timestamp": 100, "cpu": "10.1", "disks": [{"name": "cf1:"}, {"name": "cf2:"}, {"name": "cf3:"}] } mock_entry_2 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf4:"}, {"name": "cf5:"}, {"name": "cf6:"}] } mock_entry_3 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf7:"}, {"name": "cf8:"}, {"name": "cf9:"}] } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3]) result = var_reader.query(objects, attributes) assert result == [ {"name": "cf4:"}, {"name": "cf5:"}, {"name": "cf6:"}, {"name": "cf7:"}, {"name": "cf8:"}, {"name": "cf9:"}, ] def test_nested_no_attr_4__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": {"%start": 1}}, {"name": "disks", "filter": {"%start": 1}}, ] attributes = None mock_entry_1 = { "timestamp": 100, "cpu": "10.1", "disks": [{"name": "cf1:"}, {"name": "cf2:"}, {"name": "cf3:"}] } mock_entry_2 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf4:"}, {"name": "cf5:"}, {"name": "cf6:"}] } mock_entry_3 = { "timestamp": 200, "cpu": "20.2", "disks": [{"name": "cf7:"}, {"name": "cf8:"}, {"name": "cf9:"}] } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3]) result = var_reader.query(objects, attributes) assert result == [ {"name": "cf5:"}, {"name": "cf6:"}, {"name": "cf8:"}, {"name": "cf9:"}, ] def test_group_parent__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": {"%group": "dhcpleaseinterval"}}, {"name": "domains", "filter": None}, ] attributes = "name" mock_entry_1 = { "name": "enterprise_1", "dhcpleaseinterval": 10, "domains": [ {"name": "domain_1", "bgpenabled": True}, {"name": "domain_2", "bgpenabled": False} ] } mock_entry_2 = { "name": "enterprise_2", "dhcpleaseinterval": 20, "domains": [] } mock_entry_3 = { "name": "enterprise_3", "dhcpleaseinterval": 10, "domains": [ {"name": "domain_3", "bgpenabled": True}, {"name": "domain_4", "bgpenabled": True} ] } mock_entry_4 = { "name": "enterprise_4", "dhcpleaseinterval": 30, "domains": [ {"name": "domain_5", "bgpenabled": True}, {"name": "domain_6", "bgpenabled": True} ] } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3, mock_entry_4]) results = var_reader.query(objects, attributes) assert results == [ [10, ["domain_1", "domain_2", "domain_3", "domain_4"]], [20, []], [30, ["domain_5", "domain_6"]] ] def test_group_child__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "domains", "filter": {"%group": "bgpenabled"}}, ] attributes = "name" mock_entry_1 = { "name": "enterprise_1", "dhcpleaseinterval": 10, "domains": [ {"name": "domain_1", "bgpenabled": True}, {"name": "domain_2", "bgpenabled": False} ] } mock_entry_2 = { "name": "enterprise_2", "dhcpleaseinterval": 20, "domains": [] } mock_entry_3 = { "name": "enterprise_3", "dhcpleaseinterval": 10, "domains": [ {"name": "domain_3", "bgpenabled": True}, {"name": "domain_4", "bgpenabled": True} ] } mock_entry_4 = { "name": "enterprise_4", "dhcpleaseinterval": 30, "domains": [ {"name": "domain_5", "bgpenabled": True}, {"name": "domain_6", "bgpenabled": True} ] } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3, mock_entry_4]) results = var_reader.query(objects, attributes) assert results == [ [True, ["domain_1", "domain_3", "domain_4", "domain_5", "domain_6"]], [False, ["domain_2"]] ] def test_group_both__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": {"%group": "dhcpleaseinterval"}}, {"name": "domains", "filter": {"%group": "bgpenabled"}}, ] attributes = "name" mock_entry_1 = { "name": "enterprise_1", "dhcpleaseinterval": 10, "domains": [ {"name": "domain_1", "bgpenabled": True}, {"name": "domain_2", "bgpenabled": False} ] } mock_entry_2 = { "name": "enterprise_2", "dhcpleaseinterval": 20, "domains": [] } mock_entry_3 = { "name": "enterprise_3", "dhcpleaseinterval": 10, "domains": [ {"name": "domain_3", "bgpenabled": True}, {"name": "domain_4", "bgpenabled": True} ] } mock_entry_4 = { "name": "enterprise_4", "dhcpleaseinterval": 30, "domains": [ {"name": "domain_5", "bgpenabled": True}, {"name": "domain_6", "bgpenabled": True} ] } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3, mock_entry_4]) results = var_reader.query(objects, attributes) assert results == [ [10, [[True, ["domain_1", "domain_3", "domain_4"]], [False, ["domain_2"]]]], [20, []], [30, [[True, ["domain_5", "domain_6"]]]] ] def test_group_both_filtered__success(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": {"%group": "dhcpleaseinterval"}}, {"name": "domains", "filter": {"%group": "bgpenabled", "bgpenabled": True}} ] attributes = "name" mock_entry_1 = { "name": "enterprise_1", "dhcpleaseinterval": 10, "domains": [ {"name": "domain_1", "bgpenabled": True}, {"name": "domain_2", "bgpenabled": False} ] } mock_entry_2 = { "name": "enterprise_2", "dhcpleaseinterval": 20, "domains": [] } mock_entry_3 = { "name": "enterprise_3", "dhcpleaseinterval": 10, "domains": [ {"name": "domain_3", "bgpenabled": True}, {"name": "domain_4", "bgpenabled": True} ] } mock_entry_4 = { "name": "enterprise_4", "dhcpleaseinterval": 30, "domains": [ {"name": "domain_5", "bgpenabled": True}, {"name": "domain_6", "bgpenabled": True} ] } var_reader.set_data([mock_entry_1, mock_entry_2, mock_entry_3, mock_entry_4]) results = var_reader.query(objects, attributes) assert results == [ [10, [[True, ["domain_1", "domain_3", "domain_4"]]]], [20, [[True, []]]], [30, [[True, ["domain_5", "domain_6"]]]] ] @pytest.mark.parametrize("filter, expected", QUERY_NESTED_RANGE_SORT_CASES) def test_nested_range_sort__success(self, filter, expected): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "name", "filter": filter} ] attributes = "name" mock_object_1 = {"name": "a1"} mock_object_3 = {"name": "a3"} mock_object_5 = {"name": "a5"} mock_object_2 = {"name": "a2"} mock_object_4 = {"name": "a4"} mock_entry = [mock_object_1, mock_object_3, mock_object_5, mock_object_2, mock_object_4] var_reader.set_data([mock_entry]) results = var_reader.query(objects, attributes) assert results == expected @pytest.mark.parametrize("filter, expected", QUERY_NESTED_ATTR_FILTER_CASES) def test_nested_attr_filter__success(self, filter, expected): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "objects", "filter": filter} ] attributes = "name" mock_object_1 = {"name": "a1", "dhcpleaseinterval": 300, "avatartype": "URL"} mock_object_3 = {"name": "a3", "dhcpleaseinterval": 400, "avatartype": "URL"} mock_object_5 = {"name": "a5", "dhcpleaseinterval": 500, "avatartype": "URL"} mock_object_2 = {"name": "a2", "dhcpleaseinterval": 100, "avatartype": "BASE64"} mock_object_4 = {"name": "a4", "dhcpleaseinterval": 200, "avatartype": "BASE64"} mock_entry = {"objects": [mock_object_1, mock_object_3, mock_object_5, mock_object_2, mock_object_4]} var_reader.set_data([mock_entry]) results = var_reader.query(objects, attributes) assert results == expected def test_nested__not_found(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "foobar", "filter": None} ] attributes = "name" mock_object_1 = {"name": "a1", "dhcpleaseinterval": 300, "avatartype": "URL"} mock_entry = {"objects": [mock_object_1]} var_reader.set_data([mock_entry]) results = var_reader.query(objects, attributes) assert results == [] def test_nested__not_dict(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "not_a_dict", "filter": None} ] attributes = "name" mock_object_1 = "not_a_dict" mock_entry = {"objects": [mock_object_1]} var_reader.set_data([mock_entry]) results = var_reader.query(objects, attributes) assert results == [] def test_nested__no_attr(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "objects", "filter": None} ] attributes = "foobar" mock_object_1 = {"name": "a1", "dhcpleaseinterval": 300, "avatartype": "URL"} mock_entry = {"objects": [mock_object_1]} var_reader.set_data([mock_entry]) results = var_reader.query(objects, attributes) assert results == [] def test_nested__no_attr_multiple(self): var_reader = VariableReader() objects = [ {"name": "v", "filter": None}, {"name": "objects", "filter": None} ] attributes = ["name", "foobar"] mock_object_1 = {"name": "a1", "dhcpleaseinterval": 300, "avatartype": "URL"} mock_entry = {"objects": [mock_object_1]} var_reader.set_data([mock_entry]) results = var_reader.query(objects, attributes) assert results == [{"name": "a1"}] def test_query_attribute__success(self): var_reader = VariableReader() mock_object = {"name": "object name"} value = var_reader.query_attribute(mock_object, "name") assert value == "object name" def test_query_attribute__not_found(self): var_reader = VariableReader() mock_object = {"name": "object name"} value = var_reader.query_attribute(mock_object, "foobar") assert value is None def test_query_attribute__bad_type(self): var_reader = VariableReader() mock_object = ["name", "object name"] value = var_reader.query_attribute(mock_object, "name") assert value is None
“ Liberty is the only thing you cannot have unless you are willing to give it to others.” Wm. Allen White, [1868-1944], 1923 Pulitzer prize winner known as the ‘sage of Emporia’, gained national fame in the election of 1896 by his ferocious anti-populist edictorial, ‘What’s the Matter with Kansas.’ “We must keep our laws few, simple and clear.” Gail K Lightfoot, CA LP Candidate for US Senate, 2010 The reason governments exist is people decide to create governing bodies to protect their individual rights and keep the peace with: A Court system that Investigates crime Seeks restitution Isolates violent individuals A Diplomatic Service that Interacts with other nation states A Defense Department that Defends the country from invasion with an Army, Navy and Air Force. The American government is a Constitutional Republic with a limited federal government that says the states shall provide a Republican government for their residents. 2004 Statement in Primary Voter Pamphlet I am a Charter member of the Libertarian Party but I cut my political teeth as a Young Republican and Young American for Freedom, walking precincts for Barry Goldwater in 1964. My beliefs mirror this statement from his book, The Conscience of a Conservative, "I have no interest in streamlining government or in making it more efficient, for I mean to reduce its size. I do not undertake to promote welfare, for I propose to extend freedom. My aim is not to pass laws, but to cancel old ones that do violence to the Constitution, or that have failed in their purpose, or that impose on the people an unwarranted financial burden. I will not attempt to discover whether legislation is needed before I have first determined whether it is constitutionally permissible. And if I should later be attacked for neglecting my constituents interests, I shall reply that I was informed their main interest is liberty and that in that cause I am doing the very best I can." In 2001, I watched a biography of Harry Truman which inspired me to obtain a book of his quotes. Here in one expressing my belief. "In the cause of freedom, we have to battle for the rights of people with whom we do not agree; and whom, in many cases, we may not like. These people test the strength of the freedoms which protect all of us. If we do not defend their rights, we endanger our own." 2010 Candidate Statement in the Nov Voter Pamphlet: Gail K. Lightfoot Libertarian 849 Mesa Dr. Arroyo Grande, CA 93420 (805) 709-1130 www.gailklightfoot.com "Career politicians, lobbyists and the parties in power failed us. With no political/corporate ties, pledged to serve one term, I will defend our Constitution; vote to cut taxes, spending and regulations; withdraw U.S. troops from overseas; protect 2nd Amendment; and audit the Federal Reserve." ISSUES - Gail's views on CORE ISSUES mentioned by the media. War on Drugs is a failed policy. Wars in Iraq and Afghanistan are resulting in more violence. National Security can be preserved without endangering Civil Liberties. Right to Bear Arms is an individual right of self defense. Right to Life is for the living. Abortion is a personal medical decision for the individuals concerned. Late term abortions occur in other states and result in the death of a viable individual. Abortions should not occur after the first 3 months of a pregnancy for good, sound, medical reasons. Physicians should be informing their patients about these reasons. Big Government is the problem, not a solution. Separation of Church and State means the state can't endorse religion, not that you cannot follow a religion of your choice. Government According to Gail Lightfoot It is the Cities and Counties that need to work together so that roads meet at their jurisdictional lines not the state or federal governments. We, the People, as individuals, need to understand that government [since it uses force] is not a solution for every problem. Problems are best solved by individuals, either on their own or with friends, family or groups set up by and for voluntary individual action. THE CONSTITUTION Restore it. The Constitution grants certain carefully ‘enumerated powers’ to the federal government. The purpose is to limit the federal government to just those listed powers and no others. A ll other non-enumerated ‘powers’ are “reserved to the states . . . or to the people.” Cut the federal government back to those ‘powers’ by returning all unconstitutional agencies and regulations back to the states. Individuals are ultimately responsible for the decisions and may suffer from poor decisions. The government is not there to fix them or their mistakes. Individuals can choose to help others but should not be forced to by government. FOREIGN POLICY Commerce, not coercion, creates fewer enemies. Notify the world that the U.S. is not their policeman. Bring our troops home. Return control of Iraq to the people of Iraq and responsibility for peace in the Middle East and everywhere else to each country's citizens. They have more to lose than we do. Individuals and/or non governmental organizations [NGOs] can and should work to reduce conflict around the world and provide aid when and where it is needed. We and others should only go where it is safe and where safe havens for those in danger can be provided. We and others seeking to help those in need should use diplomatic channels to provide permanent relocation so lives are rebuilt instead of waiting years hoping to return ‘home’ whether it is a natural disaster or some sort of civil dispute resulting in violence. THE ECONOMY Let the private sector work its wonders. If we respect private property, reduce government control by agencies, licenses, regulations and subsidies everyone can work towards the goals they think are most important. TAXATION is legalized theft. Stop it. Find other funding methods [fees]. If a service cannot attract financial support, maybe we don't need it. ENVIRONMENT AND SAFETY Reform the legal system for easier access by individuals. Set up user friendly web sites and make them available in Kiosks or at the local library so anyone can sue for damages to their person or property. If polluters had to pay, they would think more carefully about what they do. Possession or use of a substance is not a crime, ‘Decriminalize Drugs’, educate to promote responsible drug use. Nuclear waste should be reused so the final byproduct is so small that storage is not a problem. Different life styles are not a crime. If force or violence is used, authorities can investigate and ask for a court hearing prior to taking any action unless a life is in immediate danger. It is up to the court to provide appropriate solutions. HEALTH CARE Pass the “Health Freedom Protection Act,” to deregulate the health care industry. Leave all medical decisions, including abortions, to individuals, families and physicians. Let individuals shop for the health insurance they need. When we individually select health insurance, it can follow us from job to job and state to state. Insurance companies will compete for our business if we have the right to pick and choose the coverage we need. EDUCATION Allow parents to choose the schooling they wish for their children. Gradually end all government funding of education beginning with vouchers that follow the child. Recognize that children learn best when they choose to learn and learning is a lifelong process. With today's technology no one need remain uneducated even if they never leave home. Exceptions to this would come under the heading of Welfare. WELFARE Privately funded and supported agencies do a better job of helping individuals get back on their feet or have long term assistance with dignity. This is as true internationally as it is here at home [see foreign policy]. SOCIAL SECURITY Encourage personal retirement savings accounts. Merge all taxpayer funded and government agency retirement plans for everyone over 55 into one identical plan for all retirees – including Congress. Allow anyone to opt out along the way transferring what they paid, with reasonable interest added, into their personal retirement account. IMMIGRATION Set up a simple registration system to know who is here, why and for how long. Do not punish those who came here without ‘permission’ because we failed to allow for their entry. They came because we had jobs waiting for them. That is a sure sign we were not permitting enough immigration to meet our needs [not theirs]. ID cards. Passports, Visas and licenses or privately issued IDs can be tamper proof and insured as such. POLITICAL REFORM No retirement benefits for elected officials. Let them, like everyone else, earn retirement by working and paying into a fund for that purpose. With no ‘retirement benefits’, we might not need to limit terms. Term limits for all elected officials – I prefer just one office, one term. That sounds drastic but it would be very effective. The government of the people can be run by the people. We don't need ‘experts’ or career politicians. In fact, that is the last thing we need. Meanwhile, the term limits we now have are continuously being challenged. If the people really want long terms for elected officials, let them set them longer. Enact “Fully Informed Jury” laws. Let Jurors know they can ‘judge’ the law as well as the defendant. Immediately post vote counts at all polling places with next day pick up and posting at a central public location. Inform the voters which candidates are publicly posting all contributions and expenditures. The power to overturn existing problems lies in the hands of the voters. Know who you are voting for, what they believe and why they seek office. When voters begin to pay attention and turn the rascals out, change will happen. SECOND AMENDMENT Secure our right of Self Defense by keeping the Second Amendment intact and understood as a protection of the individual right of all Americans to keep and bear arms for protection. EMINENT DOMAIN Eminent Domain is for ‘public’ use of land only! GAY RIGHTS and CIVIL UNIONS Marriage is no longer a legal issue of property so it is no longer a government issue unless the individuals wish to have a legal contract. It has become a religious or Church issue. Civil unions are legal contracts which can be taken to a court. The government does not need to write our contracts for us. We can do it ourselves and/or hire competent help. Employee benefits are up to the business and its employees, not the government. STEM CELL RESEARCH Let private enterprise deal with Stem cell and DNA research and development. Let the discussion of morals and ethics take place in the public arena where all sides can be heard and individuals can decide what projects to support according to their own beliefs or business goals.
def print_qbin(program, file = sys.stdout, *args, **kwargs): if hasattr(program, '__call__'): program = assemble(program, *args, **kwargs) code = memoryview(program).tobytes() code = map(ord, code) if type(code) is str else code assert(len(code) % 8 == 0) for i in range(len(code) // 8): for j in range(7, -1, -1): print("%08d" % int(bin(code[i * 8 + j])[2:]), end = ' ' if j != 0 else '', file = file) print(file = file)
n = int(input()) ar = list(map(int,input().split())) ma = max(ar) mi = min(ar) v1 = 0 v2 = 0 for i in range(n): if mi == ar[i]: v1 = i for i in range(n): if ma == ar[i]: v2 = i break if v1 < v2: print(n -v1 + v2-2) else: print(n - v1-1 +v2)
// Echo responds with the message that was sent to it func (c *Host) Echo(message *string, reply *command.EchoResponse) error { cmd := command.NewHostEcho(*message) utils.LogInfo(cmd.String()) *reply = command.EchoResponse{ ID: cmd.ID, Error: "", Message: cmd.Body.(string), } return nil }
class SourceRef: """ Reference to a segment of source. This is used to provide useful context to the original RDL source when reporting compiler messages. """ def __init__(self, start=None, end=None, filename=None, seg_map=None): #: SegmentMap object that provides character coordinate mapping table self.seg_map = seg_map #: Character position of start of selection self.start = start #: Character position of end of selection self.end = end #: Path to file from start of selection self.filename = filename #: Line number of start of selection self.start_line = None #: Column of first character in selection self.start_col = None #: Line number of end of selection self.end_line = None #: Column of last character in selection self.end_col = None #: Raw line of text that corresponds to start_line self.start_line_text = None self._coordinates_resolved = False def derive_coordinates(self): """ Depending on the compilation source, some members of the SourceRef object may be incomplete. Calling this function performs the necessary derivations to complete the object. """ if self._coordinates_resolved: # Coordinates were already resolved. Skip return if self.seg_map is not None: # Translate coordinates self.start, self.filename, include_ref = self.seg_map.derive_source_offset(self.start) self.end, end_filename, _ = self.seg_map.derive_source_offset(self.end, is_end=True) else: end_filename = self.filename line_start = 0 lineno = 1 file_pos = 0 # Skip deriving end coordinate if selection spans multiple files if self.filename != end_filename: get_end = False elif self.end is None: get_end = False else: get_end = True if (self.filename is not None) and (self.start is not None): with open(self.filename, 'r', newline='', encoding='utf_8') as fp: while True: line_text = fp.readline() file_pos += len(line_text) if line_text == "": break if (self.start_line is None) and (self.start < file_pos): self.start_line = lineno self.start_col = self.start - line_start self.start_line_text = line_text.rstrip("\n").rstrip("\r") if not get_end: break if get_end and (self.end_line is None) and (self.end < file_pos): self.end_line = lineno self.end_col = self.end - line_start break lineno += 1 line_start = file_pos # If no end coordinate was derived, just do a single char selection if not get_end: self.end_line = self.start_line self.end_col = self.start_col self.end = self.start self._coordinates_resolved = True @classmethod def from_antlr(cls, antlr_ref): from .preprocessor.preprocessor import PreprocessedInputStream # pylint: disable=import-outside-toplevel # Normalize if isinstance(antlr_ref, CommonToken): token = antlr_ref end_token = None elif isinstance(antlr_ref, TerminalNodeImpl): token = antlr_ref.symbol end_token = None elif isinstance(antlr_ref, ParserRuleContext): # antlr_ref is an entire context (token range) token = antlr_ref.start end_token = antlr_ref.stop else: print(antlr_ref) raise NotImplementedError # Get source segment map inputStream = token.getInputStream() if isinstance(inputStream, PreprocessedInputStream): seg_map = inputStream.seg_map else: seg_map = None # Extract selection coordinates start = token.start if end_token is None: end = token.stop else: end = end_token.stop # Create object src_ref = cls(start, end, seg_map=seg_map) return src_ref
Intrinsic Certified Robustness of Bagging against Data Poisoning Attacks In a \emph{data poisoning attack}, an attacker modifies, deletes, and/or inserts some training examples to corrupt the learnt machine learning model. \emph{Bootstrap Aggregating (bagging)} is a well-known ensemble learning method, which trains multiple base models on random subsamples of a training dataset using a base learning algorithm and uses majority vote to predict labels of testing examples. We prove the intrinsic certified robustness of bagging against data poisoning attacks. Specifically, we show that bagging with an arbitrary base learning algorithm provably predicts the same label for a testing example when the number of modified, deleted, and/or inserted training examples is bounded by a threshold. Moreover, we show that our derived threshold is tight if no assumptions on the base learning algorithm are made. We empirically evaluate our method on MNIST and CIFAR10. For instance, our method can achieve a certified accuracy of $70.8\%$ on MNIST when arbitrarily modifying, deleting, and/or inserting 100 training examples. Introduction Machine learning models trained on user-provided data are vulnerable to data poisoning attacks , in which malicious users carefully poison (i.e., modify, delete, and/or insert) some training examples such that the learnt model is corrupted and makes predictions for testing examples as an attacker desires. In particular, the corrupted model predicts incorrect labels for a large fraction of testing examples indiscriminately (i.e., a large testing error rate) or for some attacker-chosen testing examples. Unlike adversarial examples , which carefully perturb each testing example such that a model predicts an incorrect label for the perturbed testing example, data poisoning attacks corrupt the model such that it predicts incorrect labels for many clean testing examples. Like adversarial examples, data poisoning attacks pose severe security threats to machine learning systems. To mitigate data poisoning attacks, various defenses have been proposed in the literature. Most of these defenses achieve empirical robustness against certain data poisoning attacks and are often broken by strong adaptive attacks. To end the cat-and-mouse game between attackers and defenders, certified defenses were proposed. We say a learning algorithm is certifiably robust against data poisoning attacks if it can learn a classifier that provably predicts the same label for a testing example when the number of poisoned training examples is bounded. For instance, Ma et al. showed that a classifier trained with differential privacy certifies robustness against data poisoning attacks. Rosenfeld et al. leveraged randomized smoothing , which was originally designed to certify robustness against adversarial examples, to certify robustness against a particular type of data poisoning attacks called label flipping attacks, which only flip the labels of existing training examples. This randomized smoothing based defense can also be generalized to certify robustness against data poisoning attacks that modify both features and labels of existing training examples. However, these certified defenses suffer from two major limitations. First, they are only applicable to limited scenarios, i.e., Ma et al. is limited to learning algorithms that can be differentially private, while Rosenfeld et al. is limited to data poisoning attacks that only modify existing training examples. Second, their certified robustness guarantees are loose, meaning that a learning algorithm is certifiably more robust than their guarantees indicate. We note that Steinhardt et al. derives an approximate upper bound of the loss function for data poisoning attacks. However, their method cannot certify that the learnt model predicts the same label for a testing example. We aim to address these limitations in this work. Our approach is based on a well-known ensemble learning method called Bootstrap Aggregating (bagging) . Given a training dataset, bagging first generates N subsamples by sampling from the training dataset with replacement uniformly at random, where each subsample includes k training examples. Then, bagging uses a base learning algorithm to train a base classifier on each subsample. Given a testing example, bagging uses each base classifier to predict its label and takes majority vote among the predicted labels as the final predicted label. We show that bagging with any base learning algorithm is certifiably robust against data poisoning attacks. Figure 1 shows a toy example to illustrate why bagging certifies robustness against data poisoning attacks. When the poisoned training examples are minority in the training dataset, the sampled k training examples do not include any poisoned training examples with a high probability. Therefore, a majority of the N base classifiers in bagging and bagging's predicted labels for testing examples are not influenced by the poisoned training examples. Formally, we show that bagging predicts the same label for a testing example when the number of poisoned training examples is no larger than a threshold. We call the threshold certified poisoning size. Moreover, we show that our derived certified poisoning size is tight if no assumptions on the base learning algorithm are made. Our certified poisoning size is the optimal solution to an optimization problem and we design an efficient algorithm to solve the optimization problem. We also empirically evaluate our method on MNIST and CIFAR10. For instance, our method can achieve a certified accuracy of 70.8% on MNIST when 100 training examples are arbitrarily poisoned, where k = 100 and N = 1, 000. Under the same setting, Ma et al. and Rosenfeld et al. achieve 0 certified accuracy. Finally, we show that training the base classifiers using transfer learning can significantly improve the certified accuracy. Our contributions are summarized as follows: • We derive the first intrinsic certified robustness of bagging against data poisoning attacks and prove the tightness of our robustness guarantee. • We develop an efficient algorithm to compute the certified poisoning size in practice. • We empirically evaluate our method on MNIST and CIFAR10. All the proofs to our theorems are shown in the Appendix. Certified Robustness of Bagging against Data Poisoning Attacks Assuming we have a training dataset D = {(x 1 , y 1 ), (x 2 , y 2 ), · · · , (x n , y n )} with n examples, where x i and y i are the feature vector and label of the ith training example, respectively. Moreover, we are given an arbitrary deterministic or randomized base learning algorithm A, which takes a training dataset D as input and outputs a classifier f , i.e., f = A(D). f (x) is the predicted label for a testing example x. For convenience, we jointly represent the training and testing processes as A(D, x), which is x's label predicted by a classifier that is trained using algorithm A and training dataset D. Data poisoning attacks: In a data poisoning attack, an attacker poisons the training dataset D such that the learnt classifier makes predictions for testing examples as the attacker desires. In particular, the attacker can carefully modify, delete, and/or insert some training examples in D such that A(D, x) = A(D , x) for many testing examples x or some attackerchosen x, where D is the poisoned training dataset. We note that modifying a training example means modifying its feature vector and/or label. We denote the set of poisoned training datasets with at most r poisoned training examples as follows: Bootstrap aggregating (Bagging) : Bagging is a well-known ensemble learning method. Roughly speaking, bagging creates many subsamples of a training dataset with replacement and trains a classifier on each subsample. For a testing example, bagging uses each classifier to predict its label and takes majority vote among the predicted labels as the label of the testing example. Next, we describe a probabilistic view of bagging, which makes it possible to theoretically analyze the certified robustness of bagging against data poisoning attacks. Specifically, we denote by g(D) a list of k examples that are sampled from D with replacement uniformly at random. Since g(D) is random, the predicted label A(g(D), x) is also random. We denote by p j = Pr(A(g(D), x) = j) the label probability for label j, where j ∈ {1, 2, · · · , c}. Bagging predicts the label with the largest label probability for x, i.e., h(D, x) =arg max j∈{1,2,··· ,c} p j is the label that bagging predicts for x. h(D , x) is the label that bagging predicts for x when the training dataset is poisoned. Certified robustness of bagging: We show the certified robustness of bagging. In particular, we show that bagging predicts the same label for a testing example when the number of poisoned training examples is no larger than some threshold (called certified poisoning size). Moreover, we show that our derived certified poisoning size is tight. Our major theoretical results are summarized in the following two theorems. Theorem 1 (Certified Poisoning Size of Bagging). Suppose we have a training dataset D, a base learning algorithm A, and a testing example x. g(D) is a list of k training examples sampled from D uniformly at random with replacement. l, s, p l ∈ , and p s ∈ satisfy the following: where l and s are the labels with the largest and second largest probabilities under bagging, respectively. p l is a lower bound of the largest label probability, while p s is an upper bound of the second largest label probability. Then, bagging predicts label l for x when the number of poisoned training examples is bounded by r * , i.e., we have: where r * is called certified poisoning size and is the solution to the following optimization problem: where n = |D|, n = |D |, δ l = p l − ( p l · n k )/n k , and δ s = ( p s · n k )/n k − p s . n + r and n − r are respectively the maximum and minimum sizes of the poisoned training dataset when the number of poisoned training examples is r. Given Theorem 1, we have the following corollaries. (4) is Theorem 2 (Tightness of the Certified Poisoning Size). Assuming we have p l + p s ≤ 1, p l + (c − 1) · p s ≥ 1, and δ l = δ s = 0. Then, for any r > r * , there exist a base learning algorithm A * consistent with (2) and a poisoned training dataset D with r poisoned training examples such that arg max j∈{1,2,··· ,c} Pr(A * (g(D ), x) = j) = l or there exist ties. We have several remarks about our theorems. Remark 1: Our Theorem 1 is applicable for any base learning algorithm A. In other words, bagging with any base learning algorithm is provably robust against data poisoning attacks. Remark 2: For any lower bound p l of the largest label probability and upper bound p s of the second largest label probability, our Theorem 1 derives a certified poisoning size. In particular, our certified poisoning size is related to the gap between the two probability bounds. If we can estimate tighter probability bounds, then we may certify a larger poisoning size. We use the probability bounds instead of the exact label probabilities p l and p s , because it is challenging to exactly compute them. Remark 3: Theorem 2 shows that when no assumptions on the base learning algorithm are made, it is impossible to certify a poisoning size that is larger than ours. Computing the Certified Poisoning Size Given a learning algorithm A, a training dataset D, parameter k, and e testing examples in D e , we aim to estimate the predicted label and certified poisoning size for each testing example. Specifically, for a testing example, our certified poisoning size relies on a lower bound of the largest label probability and an upper bound of the second largest label probability. Therefore, we use a Monte-Carlo method to estimate these probability bounds with a probabilistic guarantee. Next, we describe estimating the probability bounds, solving the optimization problem in (4) using the probability bounds, and our complete certification algorithm. Estimating probability bounds p l and p s : One way to estimate p l and p s is to use the Monte-Carlo method proposed by . In particular, p l is estimated using the one-sided Clopper-Pearson method and p s is estimated as 1 − p l . However, such estimated p s may be loose. To address the challenge, we adopt the simultaneous confidence interval estimation method called SimuEM to estimate p l and p s simultaneously. Specifically, we first randomly sample N subsamples L 1 , L 2 , · · · , L N from D with replacement, each of which has k training examples. Then, we train a classifier f o for each subsample L o using the learning algorithm A, where o = 1, 2, · · · , N . We can use the N classifiers to estimate the predicted label l, p l , and p s for x with a confidence level at least 1 − α. A naive procedure is to train such N classifiers for each testing example, which is very computationally expensive. To address the computational challenge, we propose to train such N classifiers for each e testing examples. Our key idea is to divide the confidence level among e testing examples such that we can estimate their predicted labels and certified poisoning sizes using the same N classifiers with a simultaneous confidence level at least 1 − α. Specifically, for each testing example x i in D e , we count the frequency of each label predicted by the N classifiers, i.e., where I is the indicator function. Each n j follows a binomial distribution with parameters N and p j . Thus, we can adopt the Clopper-Pearson method to obtain a one-sided confidence interval for each label probability p j . Then, we can leverage Bonferroni correction to obtain simultaneous confidence intervals for all label probabilities. Formally, we estimate l as the label with the largest frequency n l and we have the following probability bounds : Output: Predicted label and certified poisoning size for each testing example. c} l i , s i ← top two indices in counts (ties are broken uniformly at random). where 1 − α/e is the confidence level and Beta(β; λ, θ) is the βth quantile of the Beta distribution with shape parameters λ and θ. One natural method to estimate p s is that p s = max j =l p j . However, this bound may be loose. For example, p l + p s may be larger than 1. Therefore, we estimate p s as p s = min(max j =l p j , 1 − p l ). Computing the certified poisoning size: Given the estimated label probability bounds for a testing example, we solve the optimization problem in (4) to obtain its certified poisoning size r * . We design an efficient binary search based method to solve r * . Specifically, we use binary search to find the largest r such that the constraint in (4) is satisfied. We denote the left-hand side of the constraint as max n−r≤n ≤n+r L(n). For a given r, a naive way to check whether the constraint max n−r≤n ≤n+r L(n ) < 0 holds is to check whether L(n ) < 0 holds for each n in the range , which could be inefficient when r is large. To reduce the computation cost, we derive the following analytical form of n at which L(n ) reaches its maximum value for a given r: Therefore, for a given r, we only need to check whether L(n ) < 0 holds for at most two different n . The details of deriving (7) are shown in Supplemental Material. Complete certification algorithm: Algorithm 1 shows our certification process to estimate the predicted labels and certified poisoning sizes for e testing examples in D e . The function TrainUnderSample randomly samples N subsamples and trains N classifiers. The function SimuEM estimates the probability bounds p l i and p s i . The function BinarySearch solves the optimization problem in (4) to obtain the certified poisoning sizer * i for testing example x i . Roughly speaking, the following theorem shows that, with probability at least 1 − α, if Certify does not ABSTAIN, then it returns a valid certified poisoning size, for every testing example in D e . In other words, the probability that Certify returns an incorrect certified poisoning size for at least one testing example is at most α. Theorem 3. Algorithm Certify has the following probabilistic guarantee: Experimental Setup Datasets and classifiers: We perform experiments on MNIST and CIFAR10. The base learning algorithm is neural network, and we use the example neural network architectures 12 in Keras for the two datasets. The number of training examples in the two datasets are 60, 000 and 50, 000, respectively, which are the training datasets that we aim to certify. Both datasets have 10,000 testing examples, which are the D e in our algorithm. Evaluation metric: We use certified accuracy as our evaluation metric. In particular, for a given r (i.e., number of poisoned training examples), the certified accuracy can be computed as follows: where y i is the ground truth label for testing example x i , andŷ i andr * i respectively are the predicted label and certified poisoning size returned by our Certify algorithm for x i . Intuitively, the certified accuracy is the fraction of testing examples whose labels are correctly predicted and whose certified poisoning sizes are no smaller than r. In other words, when the number of poisoned training examples is r, bagging's testing accuracy for D e is at least CA r with a confidence level 1 − α. Parameter setting: Our method has three parameters, i.e., k, α, and N . Unless otherwise mentioned, we adopt the following default settings for them: k = 100, α = 0.001, and N = 1, 000 for MNIST; and k = 1, 000, α = 0.001, and N = 1, 000 for CIFAR10. In our experiments, we will study the impact of each parameter while setting the remaining parameters to their default values. Note that training the N classifiers can be easily parallelized. We performed experiments on a server with 80 [email protected], 8 GPUs (RTX 6,000), and 385 GB main memory. Compared methods: We compare with a differential privacy based method and a randomized smoothing based method . Since these methods are not scalable because they train N classifiers on the entire training dataset, we perform comparisons on the MNIST 1/7 dataset that just consists of the digits 1 and 7. This subset includes 13,007 training examples and 2,163 testing examples. • Ma et al. . Ma et al. showed that a classifier trained with differential privacy achieves certified robustness against data poisoning attacks. Suppose ACC r is the testing accuracy on D e of a differentially private classifier that is trained using a poisoned training dataset with r poisoned training examples. Based on the Theorem 3 in (i.e., via treating the testing accuracy as their cost function), we have the expected testing accuracy E(ACC r ) is lower bounded by a certain function of E(ACC), r, and ( , δ) (the function can be found in their Theorem 3), where E(ACC) is the expected testing accuracy of a differentially private classifier that is trained using the clean training dataset and ( , δ) are the differential privacy parameters. The randomness in E(ACC r ) and E(ACC) are from differential privacy. This lower bound is the certified accuracy that the method achieves. A lower bound of E(ACC) can be further estimated with confidence level 1 − α via training N differentially private classifiers on the entire clean training dataset. However, for simplicity, we estimate E(ACC) as the average testing accuracies of the N differentially private classifiers, which gives advantages for this method. We use DP-SGD 3 to train differentially private classifiers. Moreover, we set = 0.3 and δ = 10 −5 such that this method and our method achieve comparable certified accuracies when r = 0. • Rosenfeld et al. . Rosenfeld proposed a randomized smoothing based method to certify robustness against label flipping attacks, which only flip the labels of existing training examples. This method can be generalized to certify robustness against data poisoning attacks that modify both features and labels of existing training examples via randomly flipping both features and labels of training examples. In particular, we binarize the features to apply this method. Like our method, they also train N classifiers to estimate the certified accuracy with a confidence level 1 − α. 4 However, unlike our method, when training a classifier, they flip each feature/label value in the training dataset with probability β and use the entire noisy training dataset. When predicting the label of a testing example, this method takes a majority vote among the N classifiers. We set β = 0.3 such that this method and our method achieve comparable certified accuracies when r = 0. We note that this method certifies the number of poisoned features/labels in the training dataset. We transform this certificate to the number of poisoned training examples as F d+1 , where F is the certified number of features/labels and d + 1 is the number of features/label of a training example (d features + one label). We have d = 784 for MNIST. Experimental Results Impact of k, α, and N : Figure 2 shows the impact of k, α, and N on the certified accuracy of our method. As the results show, k controls a tradeoff between accuracy under no poisoning and robustness. Specifically, when k is larger, our method has a higher accuracy when there are no data poisoning attacks (i.e., r = 0) but the certified accuracy drops more quickly as the number of poisoned training examples increases. The reason is that a larger k makes it more likely to sample poisoned training examples when creating the subsamples in bagging. The certified accuracy increases as α or N increases. The reason is that a larger α or N produces tighter estimated probability bounds, which make the certified poisoning sizes larger. We also observe that the certified accuracy is relatively insensitive to α. Transfer learning improves certified accuracy: Our method trains multiple classifiers and each classifier is trained using k training examples. Improving the accuracy of each classifier can improve the certified accuracy. We explore using transfer learning to train more accurate classifiers. Specifically, we use the Inception-v3 classifier pretrained on ImageNet to extract features and we leverage a public implementation 5 to train our classifiers on CIFAR10. Figure 3(a) shows that transfer learning can significantly increase our certified accuracy, where k = 100, α = 0.001, and N = 1, 000. Comparing with Ma et al. and Rosenfeld et al. : Figure 3(b) compares our method with previous methods on the MNIST 1/7 dataset, where k = 50, α = 0.001, and N = 1, 000. Our method significantly outperforms existing methods. For example, our method can achieve 96.95% certified accuracy when the number of poisoned training examples is r = 50, while the certified accuracy is 0 under the same setting for the two existing methods. Figure 3(c) shows that our method is also more efficient than existing methods. The reason is that our method trains classifiers on a small number of training examples while existing methods train classifiers on the entire training dataset. Ma et al. outperforms Rosenfeld et al. because differential privacy directly certifies robustness against modification/deletion/insertion of training examples while randomized smoothing was designed to certify robustness against modifications of features/labels. One category of defenses aim to detect the poisoned training examples based on their negative impact on the error rate of the learnt model. Another category of defenses aim to design new loss functions, solving which detects the poisoned training examples and learns a model simultaneously. For instance, Jagielski et al. proposed to jointly optimize the selection of a subset of training examples with a given size and a model that minimizes the loss function; and the unselected training examples are treated as poisoned ones. Steinhardt et al. assumes that a model is trained only using examples in a feasible set and derives an approximate upper bound of the loss function for any data poisoning attacks under these assumptions. However, all of these defenses cannot certify that the learnt model predicts the same label for a testing example under data poisoning attacks. Ma et al. shows that differentially private models certify robustness against data poisoning attacks. Rosenfeld et al. leverages randomized smoothing to certify robustness against label flipping attacks, which can be generalized to certify robustness against data poisoning attacks that modify both features and labels of existing training examples. Wang et al. proposes to use randomized smoothing to certify robustness against backdoor attacks, which is also applicable to certify robustness against data poisoning attacks. However, these defenses achieve loose certified robustness guarantees. Moreover, Ma et al. is only applicable to learning algorithms that can be differentially private, while Rosenfeld et al. and Wang et al. are only applicable to data poisoning attacks that modify existing training examples. Biggio et al. proposed bagging as an empirical defense against data poisoning attacks. However, they did not derive the certified robustness of bagging. Conclusion Data poisoning attacks pose severe security threats to machine learning systems via poisoning the training dataset. In this work, we show the intrinsic certified robustness of bagging against data poisoning attacks, i.e., bagging can transform any learning algorithm to be certifiably robust against data poisoning attacks. Specifically, we show that bagging predicts the same label for a testing example when the number of poisoned training examples is bounded. Moreover, we show that our derived bound is tight if no assumptions on the learning algorithm are made. We also empirically demonstrate the effectiveness of our method using MNIST and CIFAR10. Our results show that our method achieves much better certified robustness and is more efficient than existing certified defenses. Interesting future work includes: 1) generalizing our method to other types of data, e.g., graphs, and 2) improving our method by leveraging meta-learning. A Proof of Theorem 1 We first define some notations that will be used in our proof. Given a training dataset D and its poisoned version D , we define the following two random variables: where X and Y respectively are two random lists with k examples sampled from D and D with replacement uniformly at random. We denote by I = D ∩ D the set of overlapping training examples in the two datasets. We use Ω to denote the space of random lists g(D ∪ D ), i.e., each element in Ω is a list with k examples sampled from D ∪ D with replacement uniformly at random. For convenience, we define operators , as follows: Assuming ω ∈ Ω is a list of k examples and S is a set of examples, we say ω S if ∀w ∈ ω, w ∈ S. We say ω S if ∃w ∈ ω, w ∈ S. For instance, we have X D and Y D . Before proving our theorem, we show a variant of the Neyman-Pearson Lemma that will be used in our proof. Lemma 1 (Neyman-Pearson Lemma). Suppose X and Y are two random variables in the space Ω with probability distributions µ x and µ y , respectively. Let M : Ω − → {0, 1} be a random or deterministic function. Then, we have the following: ≥0. Next, we prove our Theorem 1. Our goal is to show that h(D , x) = l, i.e., Pr(A(Y, x) = l) > max j =l Pr(A(Y, x) = j). Our key idea is to derive a lower bound of Pr(A(Y, x) = l) and an upper bound of max j =l Pr(A(Y, x) = j), where the lower bound and upper bound can be easily computed. We derive the lower bound and upper bound using the Neyman-Pearson Lemma. Then, we derive the certified poisoning size by requiring the lower bound to be larger than the upper bound. Next, we derive the lower bound, the upper bound, and the certified poisoning size. Deriving a lower bound of Pr(A(Y, x) = l): We first define the following residual: We define a binary function M (ω) = I(A(ω, x) = l) over the space Ω, where ω ∈ Ω and I is the indicator function. Then, we have Pr(A(Y, x) = l) = Pr(M (Y ) = 1). Our idea is to construct a subspace for which we can apply the first part of Lemma 1 to derive a lower bound of Pr(M (Y ) = 1). We first divide the space Ω into three subspaces as follows: Since we sample k training examples with replacement uniformly at random, we have the following: We denote by m the size of I, i.e., m = |I|. Then, we have the following: We have Pr(X ∈ E) = ( m n ) k because each of the k examples is sampled independently from I with probability m n . Furthermore, since Pr(X ∈ B)+Pr(X ∈ E) = 1, we obtain Pr(X ∈ B) = 1−( m n ) k . Since X D , we have Pr(X ∈ C) = 0. Similarly, we can compute the probabilities in (28). We assume p l − δ l − (1 − ( m n ) k ) ≥ 0. We can make this assumption because we only need to find a sufficient condition for h(D , x) = l. We define B ⊆ E, i.e., B is a subset of E, such that we have the following: We can find such subset because p l − δ l is an integer multiple of 1 n k . Moreover, we define R as follows: Then, based on (2), we have: Therefore, we have the following: Furthermore, we have Pr(X = ω) > γ · Pr(Y = ω) if and only if ω ∈ B and Pr(X = ω) = γ · Pr(Y = ω) if ω ∈ B , where γ = ( n n ) k . Therefore, based on the definition of R in (30) and the condition (32), we can apply Lemma 1 to obtain the following: Pr(Y ∈ R) is a lower bound of Pr(A(Y, x) = l) and can be computed as follows: =Pr(X ∈ B )/γ (37) where we have (36) from (35) because Pr(Y ∈ B) = 0, (37) from (36) because Pr(X = ω) = γ · Pr(Y = ω) for ω ∈ B , and the last equation from (29). Deriving an upper bound of max j =l Pr(A(Y, x) = j): We define the following residual: We leverage the second part of Lemma 1 to derive such an upper bound. We assume Pr(X ∈ E) ≥ p j + δ j , ∀j ∈ {1, 2, · · · , c} \ {l}. We can make the assumption because we derive a sufficient condition for h(D , x) = l. For ∀j ∈ {1, 2, · · · , c} \ {l}, we define C j ⊆ E such that we have the following: We can find such C j because p j + δ j is an integer multiple of 1 n k . Moreover, we define the following space: Therefore, based on (2), we have: We define a function M j (ω) = I(A(ω, x) = j), where ω ∈ Ω. Based on Lemma 1, we have the following: where Pr(Y ∈ Q j ) can be computed as follows: Therefore, we have: where p s + δ s ≥ max j =l (p j + δ j ). Deriving the certified poisoning size: To reach the goal Pr(A(Y, x) = l) > max j =l , it is sufficient to have the following: Taking all poisoned training datasets D (i.e., n − r ≤ n ≤ n + r) into consideration, we have the following sufficient condition: Note that m = max(n, n ) − r. Furthermore, when the above condition (55) is satisfied, we have p l − δ l − (1 − ( m n ) k ) ≥ 0 and Pr(X ∈ E) = ( m n ) k ≥ p j + δ j , ∀j ∈ {1, 2, · · · , c} \ {l}, which are the conditions when we can construct the spaces B and C j . The certified poisoning size r * is the maximum r that satisfies the above sufficient condition. In other words, our certified poisoning size r * is the solution to the following optimization problem: B Proof of Theorem 2 Our idea is to construct a learning algorithm A * such that the label l is not predicted by the bagging predictor or there exist ties. When r > r * and δ l = δ s = 0, there exists a poisoned training dataset D with a certain n ∈ such that we have: where m = max(n, n ) − r and γ = ( n n ) k . We let Q s = C ∪ C s , where C s satisfies the following: C s ⊆ E, C s ∩ B = ∅, and Pr(X ∈ C s ) = p s . Note that we can construct such C s because p l + p s ≤ 1. Then, we divide the remaining space Ω \ (R ∪ Q s ) into c − 2 subspaces such that Pr(X ∈ Q j ) ≤ p s , where j ∈ {1, 2, · · · , c} \ {l, s}. Therefore, the learning algorithm A * is consistent with (2). Next, we show that l is not predicted by the bagging predictor or there exist ties when the training dataset is D . In particular, we have the following: =Pr(Y ∈ R) (67) where γ = ( n n ) k and we have (69) from (68) because of (60). Therefore, label l is not predicted for x or there exist ties when the training dataset is D . C Proof of Theorem 3 Based on the definition of SimuEM and , we have: Therefore, the probability that Certify returns an incorrect certified poisoning size for a testing example x i is at most α e , i.e., we have: Then, we have the following: ≥ 1 − e · α e (77) We have (76) from (75) according to the Boole's inequality. D Derivation of Equation 7 L(n ) = ( n n ) k − 2 · ( max(n, n ) − r n We aim to derive arg max n−r≤n ≤n+r L(n ). When n − r ≤ n ≤ n, we have the following: Therefore, when n − r ≤ n ≤ n, L(n ) increases as n increases. Thus, L(n ) reaches its maximum value when n ≤ n ≤ n + r. When n ≤ n ≤ n + r, we have the following:
def prevent_default(self): self._run_default = False
Introduction Critics of SkyTrain as a technology and rapid transit option are everywhere. Largely motivated by a fear of all megaprojects with high capital costs, SkyTrain critics are vocal, active, and will stop at nothing to act on this fear. They deny the productivity and developmental benefits that the system has given our region, and they refuse the potential that SkyTrain has to continue to be useful to our region if extended further. With a $3 billion capital cost, it’s no surprise that numerous SkyTrain critics, fearing the investment cost, have scrambled to promote or find alternatives. “Just another SkyTrain critic” was my first response when I first read about an “alternate proposal” for a Light Rail Transit (LRT) line on 16th over a Broadway subway crafted together by Adam Fitch – a planning technician for the Thompson Nicola Regional District – just over 1 year ago when it was featured suddenly in the Vancouver Sun. But, the response seemed to be triggered by an absolutely valid series of concerns over the impact on businesses on Central and West Broadway if an at-grade LRT were to be built on Broadway itself – including the loss of parking, impacts to parallel cycling routes, and expropriation at a few properties (particularly at Broadway and Kingsway/Main) where it would indeed be necessary. I suspected that Fitch crafted the idea to counter the imminent disappearance of LRT consideration from public policy in Vancouver. Forget about a Broadway subway, think LRT along West 16th – Vancouver Sun …The most appropriate solution, with due consideration for costs, regional transit priorities (i.e. Surrey, etc.) and time frame (10 years from now to build the subway at a minimum) is to build a mainly street-level light rail along the CPR corridor, the Arbutus corridor, and West 16th Avenue to UBC. Compare this route with a Broadway subway on cost, construction time and capacity, and it prevails. [READ MORE] Over the years, this idea continued to circulate in the local discussion scene. It has been featured on a number of regional transit issues outlets, including: Price Tags, Stephen Rees, Rail for the Valley (obviously) and – most notably, but not surprisingly given the paper’s perpetual pro-LRT bias – the Georgia Straight newspaper, in a feature with an intimidating headline that immediately implies that the alternative is “better” – or at least, as reviewed by editor Stephen Hui. Critics of the planned Broadway SkyTrain wasted no time backing this idea, calling it the next big thing, triumphing it as a “realistic priority” and denouncing the SkyTrain extension proposal as “another megaproject” in the comments for this article. I find it unfortunate that these people were given this opportunity to further this cause, because it honestly surprises me that the idea – despite the objections from many others aside from myself – has not already died. The fact of the matter that some editors at the Georgia Straight (among others) haven’t seen is that the 16th Ave LRT and B-Line combo idea is a poor, discredited and badly planned idea from someone who doesn’t have a clue how this city works. A 16th Ave LRT just doesn’t work I contend that advocating for this idea is a collosal waste of time and money for three simple reasons: No benefits to Central Broadway riders Few, if any, benefits to UBC students Doubling of annual operating debt Let’s put it into context: Take a look at this stylized map showing both the routings for the planned Broadway SkyTrain extension and Adam Fitch’s LRT proposal. Notice how the planned Broadway SkyTrain services all of the busy business and activity districts on Broadway, but the LRT misses them – making the only possible benefactors the rider from either existing SkyTrain Lines, Broadway & Arbutus, and residents along the 16th Avenue corridor. What this show is that there are clearly no benefits to Central Broadway riders – which actually make up a significant majority of the current 99 B-Line’s ridership, as opposed to UBC – and the West Broadway business district is missed as well. That’s millions of dollars in economic potential that could be unlocked, but that isn’t happening under Fitch’s plan. In what would quite possibly be the least equitable planning move in Metro Vancouver transportation planning history, billions of dollars would be spent to benefit only a small portion of the tens of thousands who are actually facing the problems that riders are facing on the Broadway corridor daily. As many, the Adam’s proposal apparently assumes that the main demand is on UBC. It is worth to mention that the numbers ran by Translink suggests that the highest demand is on the central Broadway portion (Voony’s Blog) It was one of the shortfalls I mentioned in my original letter to the sun responding to the concept. The City of Vancouver’s report on Broadway rapid transit finds that the Central Broadway area generates at least as many trips as UBC, if not more. It is extremely important to bring any rapid transit to where the anchors and trip generators actually are – both at the ends and along the route itself. That’s why the Canada Line uses the Cambie Street corridor, as opposed to either Granville or Arbutus Streets – because it provides strategic connections to busy anchors like Central Broadway, City Hall, various major hospitals, Queen Elizabeth Park, and Oakridge Mall along the way. The indirectness of the proposal also has some other consequences: the proposal is 2.3km longer than any route going down Broadway, an additional distance that not only adds to the proposal’s capital costs – it brings up the travel times as well. On top of the longer line distance, while parts of the line will be capable of 80-90km/h operation like SkyTrain – supported by crossing gates and some tunnelling – there are several portions of the line that will need to be limited to 50-60km/h speed limits – further dampening the supposed speed benefit: As a result of the longer distance combined with the speed restrictions, the proposal fails to offer a significant benefit to the one possible travel crowd that could seriously benefit: the UBC traveller; which brings me to my second contention: the Fitch proposal offers few, if any, benefits to UBC students. With the longer route and deferred connections, it’s reasonable to think that many riders – especially in non-congested off-peak hours, when the 99 B-Line trip takes as little as 30-minutes end-to-end – will opt to continue using the 99 B-Line on Broadway. This especially applies for UBC students who live on campus, where the 99 is an important connection to businesses in Point Grey/Sasamat, West Broadway and Central Broadway. Fitch’s proposed LRT links fewer business centres, with the first major business cluster from UBC being reached when the line hits Dunbar – a full 6km away, twice as far as Sasamat. Meaning, for items and needs like groceries, doctor’s appointments and other outings, UBC students likely won’t be utilizing the billions of dollars spent on Fitch’s LRT – they’ll be continuing to use the 99 B-Line. Which brings me to the third reason why Fitch’s LRT proposal is an extremely bad idea: with the required retention of the 99 B-Line, the Fitch LRT proposal doubles the annual operating debt of providing Broadway-UBC corridor transit. By not replacing the 99 B-Line, the operating cost of providing UBC-Broadway corridor transit can only increase. Versus the current (“business-as-usual”) setup that includes the 99 B-Line, the planned Broadway SkyTrain is expected to save $7 million incrementally in annual operating costs (see: design guide). With estimates already pitting the operating cost of a separate LRT on Broadway itself at over $10 million annually by 2041 (in 2011 dollars), the operating debt with the Fitch LRT simply doubles over the existing setup. The bigger picture I could go so far as to say that the Fitch LRT proposal hurts the entire region, because it is really that short-sighted in terms of practical thinking. As aforementioned, the planned driverless SkyTrain extension is expected to save $7 million in annual operating costs – savings that could be redirected to improving transit around the region. With over 54,000 new transit trips daily attracted around the region – more than double other studied options, including any theoretical LRT on Broadway itself – the planned Broadway SkyTrain generates new fare revenue. That would have also been a serious contributor to expanded transit around the region. The elimination of the 99 B-Line also means bus services improve throughout the region – because the 99 B-Line consumes more than half of TransLink’s articulated diesel bus fleet. These buses could be redirected to other busy routes in the region to address growing transit demands. With the Fitch LRT proposal, none of these improvements are able to materialize. Such bus service redirection can also take from the numerous other high-frequency bus routes that connect to UBC from different parts of the city, usually during peak hour periods. These are the: 43 Joyce Station – via 41st Ave and Oakridge – via 41st Ave and Oakridge 44 Downtown – via 4th Ave – via 4th Ave 84 VCC-Clark Station (ALL-DAY) via 4th Ave, Olympic Village and Great Northern Way (ALL-DAY) via 4th Ave, Olympic Village and Great Northern Way 480 Bridgeport Station (ALL-DAY Weekdays) – via Kerrisdale and Marpole The planned Broadway SkyTrain is the only option that offers the significant travel time benefits (cutting travel time between Commercial-Broadway and UBC in half, to 19 minutes!) that could enable the truncation of some of these routes to save even more money annually. For example: the 44 duplicates the 84, and is likely to be eliminated in favour of the faster connections downtown riders will get by utilizing the Broadway Subway with connecting routes and the Canada Line. The 480 could also be eliminated, perhaps following my suggestion. However, with the incremental operating cost savings, bus service on even these routes could theoretically be increased without costing more than the transit on the Broadway-UBC corridor today. By denying these benefits and choosing an alternative simply because it offers the prospect of less initial capital cost, the region loses out on better transit both on Broadway itself and elsewhere. Conclusion Responding to the Straight over the mention of Vancouver Mayor Gregor Robertson being on record for supporting the planned Broadway SkyTrain extension, Fitch rather arrogantly proclaimed in the opening of the recent article on his proposal: “He’s wrong on probably four or five fronts.” But, as an upvoted commenter pointed out, he didn’t list a single one. Which I think highlights another problem with critics who fear studied and decided megaprojects and act quickly to try and debase them: they don’t do a good job at it. In this case that doesn’t help, because I think that the City of Vancouver has done an excellent job at laying down the benefits and the business case of a Broadway SkyTrain extension – probably much to the dismay of many critics who have already quit. As for Adam Fitch’s 16th Ave LRT proposal, it’s evident that not only does it have no case – it really has no argument either. ***** Author’s note: Thanks for reading this far! I encourage you to subscribe to my blog by clicking the “follow” button on the left sidebar! As I previously mentioned, I will be detailing why there is really no alternative to the Broadway subway – how its business case is proven, and why any alternatives just do not work – in a follow-up article. Advertisements
def convert_to_ast(sdl: str): sdl = re.sub(GQL_SCHEMA_TYPE, "", sdl) return parse(sdl)
El Niño has always been a key player in Australia’s climate. And as temperatures in the tropical Pacific Ocean show, the current El Niño event is the most significant since 1997. For many countries in the western equatorial Pacific, most notably our neighbours in Papua New Guinea, El Niño has brought significant drought. On the other side of the ocean, floods and landslides have caused disaster in Peru. Now we’re also seeing the start of a global coral bleaching event — only the third that has ever been recorded. But until recently, climate models have been far less firm in their outlooks for Australia. Simulations of rainfall for the coming months have been dry in some models, whereas others have shown average or even wetter-than-average outlooks for parts of Australia — with an inkling of above-average heat. Notwithstanding long-term drought in parts of Queensland, northern New South Wales and the southeast of Australia, the weather that we have seen since El Niño was declared earlier this year has been consistent with a moderate influence on Australia — with patchy rainfall overall, and less rain in the north of Australia and parts of the coastal south. While that’s not quite the high-impact El Niño we loathe here in Australia, it is consistent with the science. While very strong La Niña events, such as in 2010–12, almost always bring exceptional rainfall to Australia, our relationship with El Niño is a little less straightforward. Some strong El Niño events have brought only moderately dry conditions to Australia, such as we saw in 1997–98, while some weaker events have caused widespread and severe drought, such as in 2006–07. Those in the business of managing climate risk were hoping that the models would continue to show a reasonable chance of some rain over spring. Then along came September. The climate turned dry. And hot. And the models agreed — El Niño was set to make its presence felt in Australia. With fires burning across the southeast during a record-breaking early season heatwave in the first week of October, we are keeping a very close eye on Australia’s key climate drivers. What happened in 2015? El Niño is not the only influence on Australia’s climate worth talking about. Australia’s rainfall is affected by ENSO (El Niño and La Nina) in a big way — but we also know it’s driven by many other influences. These include: the Indian Ocean (tropical and subtropical), the Southern Ocean, the seas directly to our north, the pulses of tropical convection that circle the equator, the position of the subtropical ridge (the big high-pressure system that sits over Australia), the position of cold fronts to our south, the soil moisture (surface and subsurface) over the continent, and climate change. Rainfall is also sensitive to random internal climate variability — the chaos that climatologists call stochastic variability, but most others would simply call chance. That complexity is why the world’s leading meteorological agencies – including the Australian Bureau of Meteorology — use “dynamical” climate and weather models. These numerical models absorb information from satellites, floating buoys and weather stations and use physics to calculate what may happen in the days, weeks and months ahead. We also use models, combined with observations, to work out what’s happening right now. While it would have been obvious to those watching that we had a significant El Niño developing – the Bureau made its declaration on May 12 – this time there have been a few other significant players in the climate mix. In fact, the impact of multiple climate drivers on Australia’s rainfall has been observed across the year to date. The ever-warmer Indian Ocean The Indian Ocean has been getting hotter. Recent research suggests that a significant fraction of the total ocean heat gain from climate change is finding its way to the Indian Ocean basin, driven by the global ocean circulation. The oceans to the northwest of Australia, a critical region for our rainfall, have been running at hottest-on-record for the past decade, and this record warmth has been almost basin-wide during the past three years. Consistent with this trend, June, July and August 2015 were all record-warm months for the Indian Ocean, and this, in turn, is having an impact upon the climate, and the models we use to forecast it. Not only has the warm water been pushing more moisture onto the Australian continent, it has also been altering the position of the jetstream above us. Ultimately, this was affecting our local weather patterns in a way that was countering the ever-stronger El Niño event in the Pacific. In terms of the impact of El Niño on Australia, the Indian Ocean is crucial. In the tropical Indian Ocean is the “Indian Ocean Dipole”, otherwise known as the IOD. The IOD is the difference in water temperature anomalies between an area off East Africa and an area off Indonesia. Like El Niño in the Pacific, when the heat moves to one side of the ocean basin, so too does the rainfall. Australian Bureau of Meteorology A “positive IOD” event is the Indian Ocean’s equivalent of El Niño — with warmer waters in the west of the basin associated with increased rainfall over East Africa, and cooler waters in the east associated with drier conditions in Australia and Indonesia. When this positive IOD pattern is combined with El Niño, it strengthens the El Niño influence considerably, particularly in the southeast of Australia. In fact a number of the driest years for Australia of late (1982, 1994, 2002, 2006) have been positive IOD years combined with El Niño — in what climatologists call an “in phase” relationship. Recent drying Come September 2015, a positive IOD started to emerge. In the Bureau’s regular ENSO Wrap-Ups and our September climate and water outlook video we noted that this was something we – and everyone managing climate risk – needed to keep an eye on. While models were uncertain how strongly this positive IOD would develop, it was clear that it had the potential to become a serious influence. Ultimately, and indeed rapidly, it strengthened, reaching values not seen since 2006. As a result, we now have three big players in the 2015 climate: El Niño and the positive IOD — which dry out our climate — and also a record-hot subtropical Indian Ocean — which tends to drive wetter conditions. It is a battle for supremacy of our skies. And the dry conditions are currently winning out – by a fair margin. Will the dry stay? Typically, the IOD pattern breaks down every November as the East Asian monsoon kicks in and the easterly wind patterns die off. And this year should be no exception, just as the models are suggesting. For Australian rainfall, that would return us to a hot Indian Ocean versus hot Pacific Ocean contest as early as mid-November 2015. But will that guarantee more rain? No. It won’t guarantee it, but it may at least up the odds of some summer/northern wet season rainfall. In terms of fires and agriculture, a dry and hot October, on top of a dry September, sets up risks that need to be managed for rest of the year. The Bureau’s Climate Outlooks and ENSO Wrap-Ups are two tools that might help manage these risks.
def mergesort(lyst): if len(lyst) <= 1: return lyst ind = len(lyst)//2 return merge(mergesort(lyst[:ind]), mergesort(lyst[ind:]))
def Platform(): if platform.system() == 'Windows': os.system('cls') main_dir = os.path.join('C:/' 'Users', 'joshs', 'Documents', 'Masters_Project') if platform.system() == 'Linux' or platform.system() == 'Darwin': os.system('clear') main_dir = os.path.join('media', 'mass_storage', 'josh', 'Documents', 'Masters_Project') return main_dir
Media Independent Handover Network Signalling Layer Protocol (MIH NSLP) This memo defines the Media Independent Handover Network Signalling Layer Protocol (MIH NSLP) for the transport of messages from the IEEE 802.21 standard using the Next Steps in Signalling (NSIS) framework. The MIH NSLP is responsible for the transport of MIH messages to remote entities reporting on link layer information, in order to support seamless mobility in heterogeneous environments. A usage example of the MIH NSLP is also provided.
/** * Generates Objective-C header files from compilation units. * * @author Tom Ball */ public class ObjectiveCHeaderGenerator extends ObjectiveCSourceFileGenerator { /** * Generate an Objective-C header file for each type declared in a specified * compilation unit. */ public static void generate(GenerationUnit unit) { new ObjectiveCHeaderGenerator(unit).generate(); } protected ObjectiveCHeaderGenerator(GenerationUnit unit) { super(unit, false); } @Override protected String getSuffix() { return ".h"; } public void generate() { CompilationUnit unit = getUnit(); println(J2ObjC.getFileHeader(getGenerationUnit().getSourceName())); generateFileHeader(); if (unit.getPackage().getJavadoc() != null && Options.docCommentsEnabled()) { newline(); printDocComment(unit.getPackage().getJavadoc()); } for (AbstractTypeDeclaration type : unit.getTypes()) { generate(type); newline(); printf("J2OBJC_TYPE_LITERAL_HEADER(%s)\n", NameTable.getFullName(type.getTypeBinding())); } generateFileFooter(); save(getOutputPath()); } private String getSuperTypeName(TypeDeclaration node) { Type superType = node.getSuperclassType(); if (superType == null) { return "NSObject"; } return NameTable.getFullName(superType.getTypeBinding()); } @Override public void generate(TypeDeclaration node) { ITypeBinding binding = node.getTypeBinding(); String typeName = NameTable.getFullName(binding); String superName = getSuperTypeName(node); boolean isInterface = node.isInterface(); printConstantDefines(node); newline(); printDocComment(node.getJavadoc()); if (needsDeprecatedAttribute(node.getAnnotations())) { println(DEPRECATED_ATTRIBUTE); } if (isInterface) { printf("@protocol %s", typeName); } else { printf("@interface %s : %s", typeName, superName); } List<Type> interfaces = node.getSuperInterfaceTypes(); if (!interfaces.isEmpty()) { print(" < "); for (Iterator<Type> iterator = interfaces.iterator(); iterator.hasNext();) { print(NameTable.getFullName(iterator.next().getTypeBinding())); if (iterator.hasNext()) { print(", "); } } print(isInterface ? ", NSObject, JavaObject >" : " >"); } else if (isInterface) { println(" < NSObject, JavaObject >"); } if (!isInterface) { println(" {"); printInstanceVariables(node, false); println("}"); } printDeclarations(node.getBodyDeclarations()); println("\n@end"); if (isInterface) { printStaticInterface(node); } else { printStaticInitFunction(node); printFieldSetters(node, false); printFunctions(node.getBodyDeclarations()); printStaticFields(node); } printIncrementAndDecrementFunctions(binding); String pkg = binding.getPackage().getName(); if (NameTable.hasPrefix(pkg) && binding.isTopLevel()) { String unprefixedName = NameTable.camelCaseQualifiedName(binding.getQualifiedName()); if (!unprefixedName.equals(typeName)) { if (binding.isInterface()) { // Protocols can't be used in typedefs. printf("\n#define %s %s\n", unprefixedName, typeName); } else { printf("\ntypedef %s %s;\n", typeName, unprefixedName); } } } printExternalNativeMethodCategory(node, typeName); } private static final Set<String> NEEDS_INC_AND_DEC = ImmutableSet.of( "int", "long", "double", "float", "short", "byte", "char"); private void printIncrementAndDecrementFunctions(ITypeBinding type) { ITypeBinding primitiveType = Types.getPrimitiveType(type); if (primitiveType == null || !NEEDS_INC_AND_DEC.contains(primitiveType.getName())) { return; } String primitiveName = primitiveType.getName(); String valueMethod = primitiveName + "Value"; if (primitiveName.equals("long")) { valueMethod = "longLongValue"; } else if (primitiveName.equals("byte")) { valueMethod = "charValue"; } newline(); printf("BOXED_INC_AND_DEC(%s, %s, %s)\n", NameTable.capitalize(primitiveName), valueMethod, NameTable.getFullName(type)); } @Override protected void generate(AnnotationTypeDeclaration node) { ITypeBinding type = node.getTypeBinding(); String typeName = NameTable.getFullName(type); List<AnnotationTypeMemberDeclaration> members = Lists.newArrayList( Iterables.filter(node.getBodyDeclarations(), AnnotationTypeMemberDeclaration.class)); printConstantDefines(node); boolean isRuntime = BindingUtil.isRuntimeAnnotation(type); newline(); // Print annotation as protocol. printf("@protocol %s < JavaLangAnnotationAnnotation >\n", typeName); if (isRuntime) { printAnnotationProperties(members); } println("\n@end"); if (isRuntime || hasInitializeMethod(node)) { // Print annotation implementation interface. printf("\n@interface %s : NSObject", typeName); if (isRuntime) { printf(" < %s >", typeName); } if (isRuntime && !members.isEmpty()) { println(" {\n @private"); printAnnotationVariables(members); println("}"); printAnnotationConstructor(type); printAnnotationAccessors(members); } else { newline(); } println("\n@end"); } printStaticInitFunction(node); for (IVariableBinding field : getStaticFieldsNeedingAccessors(node)) { printStaticField(field); } } private static final Predicate<BodyDeclaration> IS_NATIVE_PRED = new Predicate<BodyDeclaration>() { @Override public boolean apply(BodyDeclaration node) { return Modifier.isNative(node.getModifiers()); } }; private void printExternalNativeMethodCategory(TypeDeclaration node, String typeName) { List<MethodDeclaration> externalMethods = Lists.newArrayList( Iterables.filter(TreeUtil.getMethodDeclarations(node), IS_NATIVE_PRED)); if (!externalMethods.isEmpty()) { printf("\n@interface %s (NativeMethods)\n", typeName); for (MethodDeclaration m : externalMethods) { print(super.methodDeclaration(m)); println(";"); } println("@end"); } } private void printStaticInterface(TypeDeclaration node) { // Print @interface for static constants, if any. if (hasInitializeMethod(node)) { ITypeBinding binding = node.getTypeBinding(); String typeName = NameTable.getFullName(binding); printf("\n@interface %s : NSObject\n", typeName); println("\n@end"); } printStaticInitFunction(node); for (IVariableBinding field : getStaticFieldsNeedingAccessors(node)) { printStaticField(field); } } @Override protected void generate(EnumDeclaration node) { printConstantDefines(node); String typeName = NameTable.getFullName(node.getTypeBinding()); List<EnumConstantDeclaration> constants = node.getEnumConstants(); // Strip enum type suffix. String bareTypeName = typeName.endsWith("Enum") ? typeName.substring(0, typeName.length() - 4) : typeName; // C doesn't allow empty enum declarations. Java does, so we skip the // C enum declaration and generate the type declaration. if (!constants.isEmpty()) { newline(); printf("typedef NS_ENUM(NSUInteger, %s) {\n", bareTypeName); // Print C enum typedef. indent(); int ordinal = 0; for (EnumConstantDeclaration constant : constants) { printIndent(); printf("%s_%s = %d,\n", bareTypeName, constant.getName().getIdentifier(), ordinal++); } unindent(); print("};\n"); } newline(); if (needsDeprecatedAttribute(node.getAnnotations())) { println(DEPRECATED_ATTRIBUTE); } // Print enum type. printf("@interface %s : JavaLangEnum < NSCopying", typeName); ITypeBinding enumType = node.getTypeBinding(); for (ITypeBinding intrface : enumType.getInterfaces()) { if (!intrface.getName().equals(("Cloneable"))) { // Cloneable handled below. printf(", %s", NameTable.getFullName(intrface)); } } println(" > {"); printInstanceVariables(node, false); println("}"); printDeclarations(node.getBodyDeclarations()); println("\n@end"); printStaticInitFunction(node); printFunctions(node.getBodyDeclarations()); printf("\nFOUNDATION_EXPORT %s *%s_values_[];\n", typeName, typeName); for (EnumConstantDeclaration constant : constants) { String varName = NameTable.getStaticVarName(constant.getVariableBinding()); String valueName = constant.getName().getIdentifier(); printf("\n#define %s_%s %s_values_[%s_%s]\n", typeName, varName, typeName, bareTypeName, valueName); printf("J2OBJC_ENUM_CONSTANT_GETTER(%s, %s)\n", typeName, varName); } printStaticFields(node); printFieldSetters(node, false); String pkg = enumType.getPackage().getName(); if (NameTable.hasPrefix(pkg) && enumType.isTopLevel()) { String unprefixedName = NameTable.camelCaseQualifiedName(enumType.getQualifiedName()) + "Enum"; if (!unprefixedName.equals(typeName)) { printf("\ntypedef %s %s;\n", typeName, unprefixedName); } } } private void printStaticInitFunction(AbstractTypeDeclaration node) { ITypeBinding binding = node.getTypeBinding(); String typeName = NameTable.getFullName(binding); if (hasInitializeMethod(node)) { printf("\nFOUNDATION_EXPORT BOOL %s_initialized;\n", typeName); printf("J2OBJC_STATIC_INIT(%s)\n", typeName); } else { printf("\nJ2OBJC_EMPTY_STATIC_INIT(%s)\n", typeName); } } private void printStaticFields(AbstractTypeDeclaration node) { for (IVariableBinding var : getStaticFieldsNeedingAccessors(node)) { printStaticField(var); } } protected void printStaticField(IVariableBinding var) { String objcType = NameTable.getObjCType(var.getType()); String typeWithSpace = objcType + (objcType.endsWith("*") ? "" : " "); String name = NameTable.getStaticVarName(var); String className = NameTable.getFullName(var.getDeclaringClass()); boolean isFinal = Modifier.isFinal(var.getModifiers()); boolean isPrimitive = var.getType().isPrimitive(); newline(); if (BindingUtil.isPrimitiveConstant(var)) { name = var.getName(); } else { printf("FOUNDATION_EXPORT %s%s_%s;\n", typeWithSpace, className, name); } printf("J2OBJC_STATIC_FIELD_GETTER(%s, %s, %s)\n", className, name, objcType); if (!isFinal) { if (isPrimitive) { printf("J2OBJC_STATIC_FIELD_REF_GETTER(%s, %s, %s)\n", className, name, objcType); } else { printf("J2OBJC_STATIC_FIELD_SETTER(%s, %s, %s)\n", className, name, objcType); } } } @Override protected void printFunction(FunctionDeclaration function) { if (!Modifier.isPrivate(function.getModifiers())) { println("\nFOUNDATION_EXPORT " + getFunctionSignature(function) + ';'); } } @Override protected void printNativeDeclaration(NativeDeclaration declaration) { newline(); String code = declaration.getHeaderCode(); if (code != null) { print(declaration.getHeaderCode()); } } @Override protected void printNormalMethod(MethodDeclaration m) { if (!Modifier.isNative(m.getModifiers())) { printNormalMethodDeclaration(m); } } @Override protected void printConstructor(MethodDeclaration m) { newline(); printDocComment(m.getJavadoc()); println(super.constructorDeclaration(m) + ";"); } static enum SortState { BEFORE_DECLS, METHODS, AFTER_DECLS }; /** * Print method declarations with #pragma mark lines documenting their scope. * Since native declarations can be intermixed with methods and can be order- * dependent (such as #ifdefs surrounding a method), intermixed declarations * aren't sorted. It's okay if all the native declarations are before and/or * after the method list, however. */ @Override protected void printDeclarations(Iterable<BodyDeclaration> iter) { List<BodyDeclaration> allDeclarations = Lists.newArrayList(iter); List<BodyDeclaration> beforeDeclarations = Lists.newArrayList(); List<MethodDeclaration> allMethods = Lists.newArrayList(); List<BodyDeclaration> afterDeclarations = Lists.newArrayList(); SortState state = SortState.BEFORE_DECLS; for (BodyDeclaration decl : allDeclarations) { if (decl instanceof NativeDeclaration) { switch (state) { case BEFORE_DECLS: beforeDeclarations.add(decl); break; case METHODS: state = SortState.AFTER_DECLS; // fall-through case AFTER_DECLS: afterDeclarations.add(decl); } } else if (decl instanceof MethodDeclaration || decl instanceof FunctionDeclaration) { if (state == SortState.BEFORE_DECLS) { state = SortState.METHODS; } else if (state == SortState.AFTER_DECLS && !isPrivateOrSynthetic(decl.getModifiers())) { // Mixed native and method declarations, punt. super.printDeclarations(allDeclarations); return; } if (decl instanceof MethodDeclaration) { MethodDeclaration m = (MethodDeclaration) decl; if (shouldPrint(m)) { allMethods.add(m); } } } } super.printDeclarations(beforeDeclarations); printSortedMethods(allMethods, "Public", java.lang.reflect.Modifier.PUBLIC); printSortedMethods(allMethods, "Protected", java.lang.reflect.Modifier.PROTECTED); printSortedMethods(allMethods, "Package-Private", 0); printSortedMethods(allMethods, "Private", java.lang.reflect.Modifier.PRIVATE); super.printDeclarations(afterDeclarations); } private void printSortedMethods(List<MethodDeclaration> allMethods, String title, int modifier) { List<MethodDeclaration> methods = Lists.newArrayList(); for (MethodDeclaration m : allMethods) { int accessMask = Modifier.PUBLIC | Modifier.PROTECTED | Modifier.PRIVATE; // The following test works with package-private access, which doesn't have its own flag. if ((m.getModifiers() & accessMask) == modifier) { methods.add(m); } } if (methods.isEmpty()) { return; } printf("\n#pragma mark %s\n", title); TreeUtil.sortMethods(methods); for (MethodDeclaration m : methods) { printMethod(m); } } @Override protected void printMethod(MethodDeclaration m) { if (shouldPrint(m)) { super.printMethod(m); } } private boolean shouldPrint(MethodDeclaration m) { return !Options.hidePrivateMembers() || !isPrivateOrSynthetic(m.getModifiers()); } protected void printForwardDeclarations(Set<Import> forwardDecls) { Set<String> forwardStmts = Sets.newTreeSet(); for (Import imp : forwardDecls) { forwardStmts.add(createForwardDeclaration(imp.getTypeName(), imp.isInterface())); } if (!forwardStmts.isEmpty()) { for (String stmt : forwardStmts) { println(stmt); } newline(); } } protected void generateFileHeader() { printf("#ifndef _%s_H_\n", getGenerationUnit().getName()); printf("#define _%s_H_\n", getGenerationUnit().getName()); pushIgnoreDeprecatedDeclarationsPragma(); newline(); HeaderImportCollector collector = new HeaderImportCollector(); collector.collect(getUnit()); printForwardDeclarations(collector.getForwardDeclarations()); // Print collected includes. Set<Import> superTypes = collector.getSuperTypes(); Set<String> includeStmts = Sets.newTreeSet(); includeStmts.add("#include \"J2ObjC_header.h\""); for (Import imp : superTypes) { includeStmts.add(String.format("#include \"%s.h\"", imp.getImportFileName())); } for (String stmt : includeStmts) { println(stmt); } } protected String createForwardDeclaration(String typeName, boolean isInterface) { return String.format("@%s %s;", isInterface ? "protocol" : "class", typeName); } protected void generateFileFooter() { newline(); popIgnoreDeprecatedDeclarationsPragma(); printf("#endif // _%s_H_\n", getGenerationUnit().getName()); } private void printAnnotationVariables(List<AnnotationTypeMemberDeclaration> members) { indent(); for (AnnotationTypeMemberDeclaration member : members) { printIndent(); ITypeBinding type = member.getMethodBinding().getReturnType(); print(NameTable.getObjCType(type)); if (type.isPrimitive() || type.isInterface()) { print(' '); } print(NameTable.getAnnotationPropertyVariableName(member.getMethodBinding())); println(";"); } unindent(); } private void printAnnotationConstructor(ITypeBinding annotation) { newline(); print(annotationConstructorDeclaration(annotation)); println(";"); } private void printAnnotationProperties(List<AnnotationTypeMemberDeclaration> members) { if (!members.isEmpty()) { newline(); } for (AnnotationTypeMemberDeclaration member : members) { ITypeBinding type = member.getType().getTypeBinding(); print("@property (readonly) "); String typeString = NameTable.getSpecificObjCType(type); String propertyName = NameTable.getAnnotationPropertyName(member.getMethodBinding()); println(String.format("%s%s%s;", typeString, typeString.endsWith("*") ? "" : " ", propertyName)); if (needsObjcMethodFamilyNoneAttribute(propertyName)) { println(String.format("- (%s)%s OBJC_METHOD_FAMILY_NONE;", typeString, propertyName)); } } } private void printAnnotationAccessors(List<AnnotationTypeMemberDeclaration> members) { boolean printedNewline = false; for (AnnotationTypeMemberDeclaration member : members) { if (member.getDefault() != null) { if (!printedNewline) { newline(); printedNewline = true; } ITypeBinding type = member.getType().getTypeBinding(); String typeString = NameTable.getSpecificObjCType(type); String propertyName = NameTable.getAnnotationPropertyName(member.getMethodBinding()); printf("+ (%s)%sDefault;\n", typeString, propertyName); } } } private void printConstantDefines(AbstractTypeDeclaration node) { ITypeBinding type = node.getTypeBinding(); boolean needsNewline = true; for (IVariableBinding field : type.getDeclaredFields()) { if (BindingUtil.isPrimitiveConstant(field)) { if (needsNewline) { needsNewline = false; newline(); } printf("#define %s ", NameTable.getPrimitiveConstantName(field)); Object value = field.getConstantValue(); assert value != null; println(LiteralGenerator.generate(value)); } } } }
import numpy as np import pandas as pd from utils import * from sklearn.feature_selection import SelectKBest, SequentialFeatureSelector from sklearn.feature_selection import chi2, f_classif, mutual_info_classif from sklearn.neighbors import KNeighborsClassifier def chi_square_test(data, Y, k = 5): fs = SelectKBest(chi2, k = k) data = fs.fit_transform(data, Y) return data def information_gain(data, Y, k = 5): fs = SelectKBest(mutual_info_classif, k=5) data = fs.fit_transform(data, Y) return data def sfs (data, Y, k = 5):
The Barcelona state prosecutor said on Friday it was not opposed to substituting a 21-month prison sentence for a fine for tax fraud handed down to Lionel Messi, as long as the fine was the maximum allowed under law. The maximum fine would be €255,000, on top of a nearly €2m fine paid by the 29-year-old as part of last year's sentence. Neither is the prosecutor opposed to suspending the Barcelona players's prison sentence, and that of his father, on proviso that they have no more brushes with the law for three years given that both have had clean criminal records up to now. Join Independent Minds For exclusive articles, events and an advertising-free read for just £5.99 €6.99 $9.99 a month Get the best of The Independent With an Independent Minds subscription for just £5.99 €6.99 $9.99 a month Get the best of The Independent Without the ads – for just £5.99 €6.99 $9.99 a month The decision means that the 21-month prison sentence has been swapped for a fine worth less than Messi’s weekly wage. Messi and his father Jorge were found guilty by a Catalan court last July on three counts of tax fraud between 2007 and 2009 to the tune of €4.1m on image rights. The Argentine was never expected to serve time in jail. Under the Spanish system prison terms of under two years can be served under probation. The judge in charge of the case will now make a decision bearing in mind the prosecutor's recommendations. Judges usually follow the state prosecutor's recommendations in Spain. Reuters Keep up to date with all the latest news with expert comment and analysis from our award-winning writers
// Check that we compute the "datapresent" string correctly for the given // |available_types|. TEST(FormStructureTest, CheckDataPresence) { FormData form; form.method = ASCIIToUTF16("post"); FormField field; field.form_control_type = ASCIIToUTF16("text"); field.label = ASCIIToUTF16("First Name"); field.name = ASCIIToUTF16("first"); form.fields.push_back(field); field.label = ASCIIToUTF16("Last Name"); field.name = ASCIIToUTF16("last"); form.fields.push_back(field); field.label = ASCIIToUTF16("Email"); field.name = ASCIIToUTF16("email"); form.fields.push_back(field); FormStructure form_structure(form); FieldTypeSet unknown_type; unknown_type.insert(UNKNOWN_TYPE); for (size_t i = 0; i < form_structure.field_count(); ++i) form_structure.field(i)->set_possible_types(unknown_type); FieldTypeSet available_field_types; std::string encoded_xml; EXPECT_TRUE(form_structure.EncodeUploadRequest(available_field_types, false, &encoded_xml)); EXPECT_EQ("<\?xml version=\"1.0\" encoding=\"UTF-8\"\?>" "<autofillupload clientversion=\"6.1.1715.1442/en (GGLL)\"" " formsignature=\"6402244543831589061\" autofillused=\"false\"" " datapresent=\"\">" "<field signature=\"1089846351\" autofilltype=\"1\"/>" "<field signature=\"2404144663\" autofilltype=\"1\"/>" "<field signature=\"420638584\" autofilltype=\"1\"/>" "</autofillupload>", encoded_xml); available_field_types.clear(); available_field_types.insert(NAME_FIRST); available_field_types.insert(NAME_LAST); available_field_types.insert(NAME_FULL); available_field_types.insert(EMAIL_ADDRESS); available_field_types.insert(ADDRESS_HOME_LINE1); available_field_types.insert(ADDRESS_HOME_CITY); EXPECT_TRUE(form_structure.EncodeUploadRequest(available_field_types, false, &encoded_xml)); EXPECT_EQ("<\?xml version=\"1.0\" encoding=\"UTF-8\"\?>" "<autofillupload clientversion=\"6.1.1715.1442/en (GGLL)\"" " formsignature=\"6402244543831589061\" autofillused=\"false\"" " datapresent=\"1540000240\">" "<field signature=\"1089846351\" autofilltype=\"1\"/>" "<field signature=\"2404144663\" autofilltype=\"1\"/>" "<field signature=\"420638584\" autofilltype=\"1\"/>" "</autofillupload>", encoded_xml); available_field_types.clear(); available_field_types.insert(NAME_FIRST); available_field_types.insert(NAME_MIDDLE); available_field_types.insert(NAME_LAST); available_field_types.insert(NAME_MIDDLE_INITIAL); available_field_types.insert(NAME_FULL); available_field_types.insert(EMAIL_ADDRESS); available_field_types.insert(PHONE_HOME_NUMBER); available_field_types.insert(PHONE_HOME_CITY_CODE); available_field_types.insert(PHONE_HOME_COUNTRY_CODE); available_field_types.insert(PHONE_HOME_CITY_AND_NUMBER); available_field_types.insert(PHONE_HOME_WHOLE_NUMBER); available_field_types.insert(ADDRESS_HOME_LINE1); available_field_types.insert(ADDRESS_HOME_LINE2); available_field_types.insert(ADDRESS_HOME_CITY); available_field_types.insert(ADDRESS_HOME_STATE); available_field_types.insert(ADDRESS_HOME_ZIP); available_field_types.insert(ADDRESS_HOME_COUNTRY); available_field_types.insert(COMPANY_NAME); EXPECT_TRUE(form_structure.EncodeUploadRequest(available_field_types, false, &encoded_xml)); EXPECT_EQ("<\?xml version=\"1.0\" encoding=\"UTF-8\"\?>" "<autofillupload clientversion=\"6.1.1715.1442/en (GGLL)\"" " formsignature=\"6402244543831589061\" autofillused=\"false\"" " datapresent=\"1f7e000378000008\">" "<field signature=\"1089846351\" autofilltype=\"1\"/>" "<field signature=\"2404144663\" autofilltype=\"1\"/>" "<field signature=\"420638584\" autofilltype=\"1\"/>" "</autofillupload>", encoded_xml); available_field_types.clear(); available_field_types.insert(CREDIT_CARD_NAME); available_field_types.insert(CREDIT_CARD_NUMBER); available_field_types.insert(CREDIT_CARD_EXP_MONTH); available_field_types.insert(CREDIT_CARD_EXP_2_DIGIT_YEAR); available_field_types.insert(CREDIT_CARD_EXP_4_DIGIT_YEAR); available_field_types.insert(CREDIT_CARD_EXP_DATE_2_DIGIT_YEAR); available_field_types.insert(CREDIT_CARD_EXP_DATE_4_DIGIT_YEAR); EXPECT_TRUE(form_structure.EncodeUploadRequest(available_field_types, false, &encoded_xml)); EXPECT_EQ("<\?xml version=\"1.0\" encoding=\"UTF-8\"\?>" "<autofillupload clientversion=\"6.1.1715.1442/en (GGLL)\"" " formsignature=\"6402244543831589061\" autofillused=\"false\"" " datapresent=\"0000000000001fc0\">" "<field signature=\"1089846351\" autofilltype=\"1\"/>" "<field signature=\"2404144663\" autofilltype=\"1\"/>" "<field signature=\"420638584\" autofilltype=\"1\"/>" "</autofillupload>", encoded_xml); available_field_types.clear(); available_field_types.insert(NAME_FIRST); available_field_types.insert(NAME_MIDDLE); available_field_types.insert(NAME_LAST); available_field_types.insert(NAME_MIDDLE_INITIAL); available_field_types.insert(NAME_FULL); available_field_types.insert(EMAIL_ADDRESS); available_field_types.insert(PHONE_HOME_NUMBER); available_field_types.insert(PHONE_HOME_CITY_CODE); available_field_types.insert(PHONE_HOME_COUNTRY_CODE); available_field_types.insert(PHONE_HOME_CITY_AND_NUMBER); available_field_types.insert(PHONE_HOME_WHOLE_NUMBER); available_field_types.insert(ADDRESS_HOME_LINE1); available_field_types.insert(ADDRESS_HOME_LINE2); available_field_types.insert(ADDRESS_HOME_CITY); available_field_types.insert(ADDRESS_HOME_STATE); available_field_types.insert(ADDRESS_HOME_ZIP); available_field_types.insert(ADDRESS_HOME_COUNTRY); available_field_types.insert(CREDIT_CARD_NAME); available_field_types.insert(CREDIT_CARD_NUMBER); available_field_types.insert(CREDIT_CARD_EXP_MONTH); available_field_types.insert(CREDIT_CARD_EXP_2_DIGIT_YEAR); available_field_types.insert(CREDIT_CARD_EXP_4_DIGIT_YEAR); available_field_types.insert(CREDIT_CARD_EXP_DATE_2_DIGIT_YEAR); available_field_types.insert(CREDIT_CARD_EXP_DATE_4_DIGIT_YEAR); available_field_types.insert(COMPANY_NAME); EXPECT_TRUE(form_structure.EncodeUploadRequest(available_field_types, false, &encoded_xml)); EXPECT_EQ("<\?xml version=\"1.0\" encoding=\"UTF-8\"\?>" "<autofillupload clientversion=\"6.1.1715.1442/en (GGLL)\"" " formsignature=\"6402244543831589061\" autofillused=\"false\"" " datapresent=\"1f7e000378001fc8\">" "<field signature=\"1089846351\" autofilltype=\"1\"/>" "<field signature=\"2404144663\" autofilltype=\"1\"/>" "<field signature=\"420638584\" autofilltype=\"1\"/>" "</autofillupload>", encoded_xml); }
#include "bitcalc.h" size_t bitcalc::bits_to_bytes(size_t const bits){ return (bits>>3)+(((bits%2)>0)?(1):(0)); }
#include "libultra_internal.h" extern u64 osClockRate; extern u8 D_80365D20; extern u8 _osCont_numControllers; extern OSTimer D_80365D28; extern OSMesgQueue _osContMesgQueue; extern OSMesg _osContMesgBuff[4]; // exactly the same as osEepromLongRead except for osEepromWrite call s32 osEepromLongWrite(OSMesgQueue *mq, u8 address, u8 *buffer, int nbytes) { s32 result = 0; if (address > 0x40) { return -1; } while (nbytes > 0) { result = osEepromWrite(mq, address, buffer); if (result != 0) { return result; } nbytes -= 8; address += 1; buffer += 8; osSetTimer(&D_80365D28, 12000 * osClockRate / 1000000, 0, &_osContMesgQueue, _osContMesgBuff); osRecvMesg(&_osContMesgQueue, NULL, OS_MESG_BLOCK); } return result; }
/** * Do any of our (top level) children have the given recordId? * * @param recordId the recordId of the child * * @return true, if any child has the given recordId */ public boolean hasChildOfType(short recordId) { for (EscherRecord r : this) { if(r.getRecordId() == recordId) { return true; } } return false; }
package main // Runtime: 32 ms, faster than 96.77% of Go online submissions for Capacity To Ship Packages Within D Days. // Memory Usage: 6.2 MB, less than 95.16% of Go online submissions for Capacity To Ship Packages Within D Days. // https://leetcode.com/submissions/detail/449378694/ func shipWithinDays(weights []int, D int) int { max, sum := 0, 0 for _, weight := range weights { sum += weight if weight > max { max = weight } } left, right := max, sum+1 for left < right { mid := left + (right-left)/2 days, stack := 0, 0 for _, weight := range weights { stack += weight if stack > mid { days++ stack = weight } } if stack > 0 { days++ } if days <= D { right = mid } else { left = mid + 1 } } return left }
// Fill out your copyright notice in the Description page of Project Settings. #include "TestingGuyCCharacter.h" // Sets default values ATestingGuyCCharacter::ATestingGuyCCharacter() { // Set this character to call Tick() every frame. You can turn this off to improve performance if you don't need it. PrimaryActorTick.bCanEverTick = true; } // Called when the game starts or when spawned void ATestingGuyCCharacter::BeginPlay() { Super::BeginPlay(); } // Called every frame void ATestingGuyCCharacter::Tick(float DeltaTime) { Super::Tick(DeltaTime); CalcMotionCap(); } // Called to bind functionality to input void ATestingGuyCCharacter::SetupPlayerInputComponent(UInputComponent* PlayerInputComponent) { Super::SetupPlayerInputComponent(PlayerInputComponent); } void ATestingGuyCCharacter::CalcMotionCap() { //std::vector<USceneComponent> sceneComps; //FString fstring = FindComponentByClass<USceneComponent>()->GetName(); //GEngine->AddOnScreenDebugMessage(-1, 15.0f, FColor::Red, TEXT("HELLO WORLD!")); for (UActorComponent *const component : GetComponents()) { if (component->GetName().Equals("Scene")) { USceneComponent* temp = (USceneComponent*)component; scenePos = temp->GetComponentTransform(); } else if (component->GetName().Equals("HipTracker")) { USceneComponent* temp = (USceneComponent*)component; Hip = temp->GetComponentTransform(); /*FQuat rotation = Hip.GetRotation(); FRotator theRot = rotation.Rotator(); theRot.Roll += 90; FQuat res(theRot); Hip.SetRotation(res);*/ /*FVector pos = Hip.GetLocation(); pos[0] -= 15; Hip.SetLocation(pos);*/ } else if (component->GetName().Equals("Camera")) { USceneComponent* temp = (USceneComponent*)component; Head = temp->GetComponentTransform(); // FQuat rotation = Head.GetRotation(); // FVector location = Head.GetLocation(); // FRotator theRot = rotation.Rotator(); // theRot.Roll += 90; // FQuat res(theRot); // Head.SetRotation(res); /* float Xa = rotation.X; float Ya = rotation.Y; float Za = rotation.Z; rotation.X = -Za; rotation.Y = -Ya; rotation.Z = Xa; */// FRotator theRot = rotation.Rotator(); // location.X = 5; // location.Y = 5; // location.Z = 5; //theRot.Yaw += -90; //theRot.Pitch += 90; //theRot.Roll += 90; // Head.SetRotation(FQuat(theRot)); // Head.SetLocation(location); //GEngine->AddOnScreenDebugMessage(-1, 15.0f, FColor::Red, Head.ToString()); } else if (component->GetName().Equals("MotionController_l")) { USceneComponent* temp = (USceneComponent*)component; LHandTrack = temp->GetComponentTransform(); //GEngine->AddOnScreenDebugMessage(-1, 15.0f, FColor::Red, TEXT("HELLO, Creating Left Hand!")); } else if (component->GetName().Equals("MotionController_r")) { USceneComponent* temp = (USceneComponent*)component; RHandTrack = temp->GetComponentTransform(); } } std::ofstream outFile; outFile.open("../../../../../../Users/adamr/Desktop/test.txt"); outFile << "Hello World!!!" << std::endl << "Hip: " << TCHAR_TO_UTF8(*Hip.ToString()) << std::endl << "Head: " << TCHAR_TO_UTF8(*Head.ToString()) << std::endl << "LeftHand: " << TCHAR_TO_UTF8(*LHandTrack.ToString()) << std::endl << "RightHand: " << TCHAR_TO_UTF8(*RHandTrack.ToString()) << std::endl << "Location of Scene: " << TCHAR_TO_UTF8(*scenePos.ToString()) << std::endl; outFile.close(); Spine; // .SetComponents(FQuat Rotation, FVector Translation, FVector Scale) Spine1; Spine2; Neck; LShoulder; LArm; LForeArm; RShoulder; RArm; RForeArm; LUpLeg; LLeg; LFoot; RUpLeg; RLeg; RFoot; }
<reponame>ma2bd/fastpay // Copyright (c) Facebook Inc. // SPDX-License-Identifier: Apache-2.0 use super::*; #[test] fn test_handle_transfer_order_bad_signature() { let (sender, sender_key) = get_key_pair(); let recipient = Address::FastPay(dbg_addr(2)); let mut authority_state = init_state_with_account(sender, Balance::from(5)); let transfer_order = init_transfer_order(sender, &sender_key, recipient, Amount::from(5)); let (_unknown_address, unknown_key) = get_key_pair(); let mut bad_signature_transfer_order = transfer_order.clone(); bad_signature_transfer_order.signature = Signature::new(&transfer_order.transfer, &unknown_key); assert!(authority_state .handle_transfer_order(bad_signature_transfer_order) .is_err()); assert!(authority_state .accounts .get(&sender) .unwrap() .pending_confirmation .is_none()); } #[test] fn test_handle_transfer_order_zero_amount() { let (sender, sender_key) = get_key_pair(); let recipient = Address::FastPay(dbg_addr(2)); let mut authority_state = init_state_with_account(sender, Balance::from(5)); let transfer_order = init_transfer_order(sender, &sender_key, recipient, Amount::from(5)); // test transfer non-positive amount let mut zero_amount_transfer = transfer_order.transfer; zero_amount_transfer.amount = Amount::zero(); let zero_amount_transfer_order = TransferOrder::new(zero_amount_transfer, &sender_key); assert!(authority_state .handle_transfer_order(zero_amount_transfer_order) .is_err()); assert!(authority_state .accounts .get(&sender) .unwrap() .pending_confirmation .is_none()); } #[test] fn test_handle_transfer_order_unknown_sender() { let (sender, sender_key) = get_key_pair(); let recipient = Address::FastPay(dbg_addr(2)); let mut authority_state = init_state_with_account(sender, Balance::from(5)); let transfer_order = init_transfer_order(sender, &sender_key, recipient, Amount::from(5)); let (unknown_address, unknown_key) = get_key_pair(); let mut unknown_sender_transfer = transfer_order.transfer; unknown_sender_transfer.sender = unknown_address; let unknown_sender_transfer_order = TransferOrder::new(unknown_sender_transfer, &unknown_key); assert!(authority_state .handle_transfer_order(unknown_sender_transfer_order) .is_err()); assert!(authority_state .accounts .get(&sender) .unwrap() .pending_confirmation .is_none()); } #[test] fn test_handle_transfer_order_bad_sequence_number() { let (sender, sender_key) = get_key_pair(); let recipient = Address::FastPay(dbg_addr(2)); let authority_state = init_state_with_account(sender, Balance::from(5)); let transfer_order = init_transfer_order(sender, &sender_key, recipient, Amount::from(5)); let mut sequence_number_state = authority_state; let sequence_number_state_sender_account = sequence_number_state.accounts.get_mut(&sender).unwrap(); sequence_number_state_sender_account.next_sequence_number = sequence_number_state_sender_account .next_sequence_number .increment() .unwrap(); assert!(sequence_number_state .handle_transfer_order(transfer_order) .is_err()); assert!(sequence_number_state .accounts .get(&sender) .unwrap() .pending_confirmation .is_none()); } #[test] fn test_handle_transfer_order_exceed_balance() { let (sender, sender_key) = get_key_pair(); let recipient = Address::FastPay(dbg_addr(2)); let mut authority_state = init_state_with_account(sender, Balance::from(5)); let transfer_order = init_transfer_order(sender, &sender_key, recipient, Amount::from(1000)); assert!(authority_state .handle_transfer_order(transfer_order) .is_err()); assert!(authority_state .accounts .get(&sender) .unwrap() .pending_confirmation .is_none()); } #[test] fn test_handle_transfer_order_ok() { let (sender, sender_key) = get_key_pair(); let recipient = Address::FastPay(dbg_addr(2)); let mut authority_state = init_state_with_account(sender, Balance::from(5)); let transfer_order = init_transfer_order(sender, &sender_key, recipient, Amount::from(5)); let account_info = authority_state .handle_transfer_order(transfer_order) .unwrap(); let pending_confirmation = authority_state .accounts .get(&sender) .unwrap() .pending_confirmation .clone() .unwrap(); assert_eq!( account_info.pending_confirmation.unwrap(), pending_confirmation ); } #[test] fn test_handle_transfer_order_double_spend() { let (sender, sender_key) = get_key_pair(); let recipient = Address::FastPay(dbg_addr(2)); let mut authority_state = init_state_with_account(sender, Balance::from(5)); let transfer_order = init_transfer_order(sender, &sender_key, recipient, Amount::from(5)); let signed_order = authority_state .handle_transfer_order(transfer_order.clone()) .unwrap(); let double_spend_signed_order = authority_state .handle_transfer_order(transfer_order) .unwrap(); assert_eq!(signed_order, double_spend_signed_order); } #[test] fn test_handle_confirmation_order_unknown_sender() { let recipient = dbg_addr(2); let (sender, sender_key) = get_key_pair(); let mut authority_state = init_state(); let certified_transfer_order = init_certified_transfer_order( sender, &sender_key, Address::FastPay(recipient), Amount::from(5), &authority_state, ); assert!(authority_state .handle_confirmation_order(ConfirmationOrder::new(certified_transfer_order)) .is_ok()); assert!(authority_state.accounts.get(&recipient).is_some()); } #[test] fn test_handle_confirmation_order_bad_sequence_number() { let (sender, sender_key) = get_key_pair(); let recipient = dbg_addr(2); let mut authority_state = init_state_with_account(sender, Balance::from(5)); let sender_account = authority_state.accounts.get_mut(&sender).unwrap(); sender_account.next_sequence_number = sender_account.next_sequence_number.increment().unwrap(); // let old_account = sender_account; let old_balance; let old_seq_num; { let old_account = authority_state.accounts.get_mut(&sender).unwrap(); old_balance = old_account.balance; old_seq_num = old_account.next_sequence_number; } let certified_transfer_order = init_certified_transfer_order( sender, &sender_key, Address::FastPay(recipient), Amount::from(5), &authority_state, ); // Replays are ignored. assert!(authority_state .handle_confirmation_order(ConfirmationOrder::new(certified_transfer_order)) .is_ok()); let new_account = authority_state.accounts.get_mut(&sender).unwrap(); assert_eq!(old_balance, new_account.balance); assert_eq!(old_seq_num, new_account.next_sequence_number); assert_eq!(new_account.confirmed_log, Vec::new()); assert!(authority_state.accounts.get(&recipient).is_none()); } #[test] fn test_handle_confirmation_order_exceed_balance() { let (sender, sender_key) = get_key_pair(); let recipient = dbg_addr(2); let mut authority_state = init_state_with_account(sender, Balance::from(5)); let certified_transfer_order = init_certified_transfer_order( sender, &sender_key, Address::FastPay(recipient), Amount::from(1000), &authority_state, ); assert!(authority_state .handle_confirmation_order(ConfirmationOrder::new(certified_transfer_order)) .is_ok()); let new_account = authority_state.accounts.get(&sender).unwrap(); assert_eq!(Balance::from(-995), new_account.balance); assert_eq!(SequenceNumber::from(1), new_account.next_sequence_number); assert_eq!(new_account.confirmed_log.len(), 1); assert!(authority_state.accounts.get(&recipient).is_some()); } #[test] fn test_handle_confirmation_order_receiver_balance_overflow() { let (sender, sender_key) = get_key_pair(); let (recipient, _) = get_key_pair(); let mut authority_state = init_state_with_accounts(vec![ (sender, Balance::from(1)), (recipient, Balance::max()), ]); let certified_transfer_order = init_certified_transfer_order( sender, &sender_key, Address::FastPay(recipient), Amount::from(1), &authority_state, ); assert!(authority_state .handle_confirmation_order(ConfirmationOrder::new(certified_transfer_order)) .is_ok()); let new_sender_account = authority_state.accounts.get(&sender).unwrap(); assert_eq!(Balance::from(0), new_sender_account.balance); assert_eq!( SequenceNumber::from(1), new_sender_account.next_sequence_number ); assert_eq!(new_sender_account.confirmed_log.len(), 1); let new_recipient_account = authority_state.accounts.get(&recipient).unwrap(); assert_eq!(Balance::max(), new_recipient_account.balance); } #[test] fn test_handle_confirmation_order_receiver_equal_sender() { let (address, key) = get_key_pair(); let mut authority_state = init_state_with_account(address, Balance::from(1)); let certified_transfer_order = init_certified_transfer_order( address, &key, Address::FastPay(address), Amount::from(10), &authority_state, ); assert!(authority_state .handle_confirmation_order(ConfirmationOrder::new(certified_transfer_order)) .is_ok()); let account = authority_state.accounts.get(&address).unwrap(); assert_eq!(Balance::from(1), account.balance); assert_eq!(SequenceNumber::from(1), account.next_sequence_number); assert_eq!(account.confirmed_log.len(), 1); } #[test] fn test_handle_cross_shard_recipient_commit() { let (sender, sender_key) = get_key_pair(); let (recipient, _) = get_key_pair(); // Sender has no account on this shard. let mut authority_state = init_state_with_account(recipient, Balance::from(1)); let certified_transfer_order = init_certified_transfer_order( sender, &sender_key, Address::FastPay(recipient), Amount::from(10), &authority_state, ); assert!(authority_state .handle_cross_shard_recipient_commit(certified_transfer_order) .is_ok()); let account = authority_state.accounts.get(&recipient).unwrap(); assert_eq!(Balance::from(11), account.balance); assert_eq!(SequenceNumber::from(0), account.next_sequence_number); assert_eq!(account.confirmed_log.len(), 0); } #[test] fn test_handle_confirmation_order_ok() { let (sender, sender_key) = get_key_pair(); let recipient = dbg_addr(2); let mut authority_state = init_state_with_account(sender, Balance::from(5)); let certified_transfer_order = init_certified_transfer_order( sender, &sender_key, Address::FastPay(recipient), Amount::from(5), &authority_state, ); let old_account = authority_state.accounts.get_mut(&sender).unwrap(); let mut next_sequence_number = old_account.next_sequence_number; next_sequence_number = next_sequence_number.increment().unwrap(); let mut remaining_balance = old_account.balance; remaining_balance = remaining_balance .try_sub(certified_transfer_order.value.transfer.amount.into()) .unwrap(); let (info, _) = authority_state .handle_confirmation_order(ConfirmationOrder::new(certified_transfer_order.clone())) .unwrap(); assert_eq!(sender, info.sender); assert_eq!(remaining_balance, info.balance); assert_eq!(next_sequence_number, info.next_sequence_number); assert_eq!(None, info.pending_confirmation); assert_eq!( authority_state.accounts.get(&sender).unwrap().confirmed_log, vec![certified_transfer_order.clone()] ); let recipient_account = authority_state.accounts.get(&recipient).unwrap(); assert_eq!( recipient_account.balance, certified_transfer_order.value.transfer.amount.into() ); let info_request = AccountInfoRequest { sender: recipient, request_sequence_number: None, request_received_transfers_excluding_first_nth: Some(0), }; let response = authority_state .handle_account_info_request(info_request) .unwrap(); assert_eq!(response.requested_received_transfers.len(), 1); assert_eq!( response.requested_received_transfers[0] .value .transfer .amount, Amount::from(5) ); } #[test] fn test_handle_primary_synchronization_order_update() { let mut state = init_state(); let mut updated_transaction_index = state.last_transaction_index; let address = dbg_addr(1); let order = init_primary_synchronization_order(address); assert!(state .handle_primary_synchronization_order(order.clone()) .is_ok()); updated_transaction_index = updated_transaction_index.increment().unwrap(); assert_eq!(state.last_transaction_index, updated_transaction_index); let account = state.accounts.get(&address).unwrap(); assert_eq!(account.balance, order.amount.into()); assert_eq!(state.accounts.len(), 1); } #[test] fn test_handle_primary_synchronization_order_double_spend() { let mut state = init_state(); let mut updated_transaction_index = state.last_transaction_index; let address = dbg_addr(1); let order = init_primary_synchronization_order(address); assert!(state .handle_primary_synchronization_order(order.clone()) .is_ok()); updated_transaction_index = updated_transaction_index.increment().unwrap(); // Replays are ignored. assert!(state .handle_primary_synchronization_order(order.clone()) .is_ok()); assert_eq!(state.last_transaction_index, updated_transaction_index); let account = state.accounts.get(&address).unwrap(); assert_eq!(account.balance, order.amount.into()); assert_eq!(state.accounts.len(), 1); } #[test] fn test_account_state_ok() { let sender = dbg_addr(1); let authority_state = init_state_with_account(sender, Balance::from(5)); assert_eq!( authority_state.accounts.get(&sender).unwrap(), authority_state.account_state(&sender).unwrap() ); } #[test] fn test_account_state_unknown_account() { let sender = dbg_addr(1); let unknown_address = dbg_addr(99); let authority_state = init_state_with_account(sender, Balance::from(5)); assert!(authority_state.account_state(&unknown_address).is_err()); } #[test] fn test_get_shards() { let num_shards = 16u32; let mut found = vec![false; num_shards as usize]; let mut left = num_shards; loop { let (address, _) = get_key_pair(); let shard = AuthorityState::get_shard(num_shards, &address) as usize; println!("found {}", shard); if !found[shard] { found[shard] = true; left -= 1; if left == 0 { break; } } } } // helpers #[cfg(test)] fn init_state() -> AuthorityState { let (authority_address, authority_key) = get_key_pair(); let mut authorities = BTreeMap::new(); authorities.insert( /* address */ authority_address, /* voting right */ 1, ); let committee = Committee::new(authorities); AuthorityState::new(committee, authority_address, authority_key) } #[cfg(test)] fn init_state_with_accounts<I: IntoIterator<Item = (FastPayAddress, Balance)>>( balances: I, ) -> AuthorityState { let mut state = init_state(); for (address, balance) in balances { let account = state .accounts .entry(address) .or_insert_with(AccountOffchainState::new); account.balance = balance; } state } #[cfg(test)] fn init_state_with_account(address: FastPayAddress, balance: Balance) -> AuthorityState { init_state_with_accounts(std::iter::once((address, balance))) } #[cfg(test)] fn init_transfer_order( sender: FastPayAddress, secret: &KeyPair, recipient: Address, amount: Amount, ) -> TransferOrder { let transfer = Transfer { sender, recipient, amount, sequence_number: SequenceNumber::new(), user_data: UserData::default(), }; TransferOrder::new(transfer, secret) } #[cfg(test)] fn init_certified_transfer_order( sender: FastPayAddress, secret: &KeyPair, recipient: Address, amount: Amount, authority_state: &AuthorityState, ) -> CertifiedTransferOrder { let transfer_order = init_transfer_order(sender, secret, recipient, amount); let vote = SignedTransferOrder::new( transfer_order.clone(), authority_state.name, &authority_state.secret, ); let mut builder = SignatureAggregator::try_new(transfer_order, &authority_state.committee).unwrap(); builder .append(vote.authority, vote.signature) .unwrap() .unwrap() } #[cfg(test)] fn init_primary_synchronization_order(recipient: FastPayAddress) -> PrimarySynchronizationOrder { let mut transaction_index = VersionNumber::new(); transaction_index = transaction_index.increment().unwrap(); PrimarySynchronizationOrder { recipient, amount: Amount::from(5), transaction_index, } }
/** * OAuthHttpServletRequest is a custom implementation of the {@link HttpServletRequest} that implements the * bare minimum methods to handle OAuth 2.0 requests. * <p/> * The methods supported are delegated to the wrapped {@link HttpRequest} instance. * * @since 1.0.RC */ public class OAuthHttpServletRequest implements HttpServletRequest { private final HttpRequest httpRequest; public OAuthHttpServletRequest(HttpRequest httpRequest) { this.httpRequest = httpRequest; } @Override public String getAuthType() { throw new UnsupportedOperationException("getAuthType() method hasn't been implemented."); } @Override public Cookie[] getCookies() { throw new UnsupportedOperationException("getCookies() method hasn't been implemented."); } @Override public long getDateHeader(String name) { throw new UnsupportedOperationException("getDateHeader() method hasn't been implemented."); } @Override public String getHeader(String name) { return httpRequest.getHeader(name); } @Override public Enumeration<String> getHeaders(String name) { for (Map.Entry<String, String[]> entry : httpRequest.getHeaders().entrySet()) { if (entry.getKey().equalsIgnoreCase(name)) { return Collections.enumeration(Arrays.asList(entry.getValue())); } } return EmptyEnumeration.getInstance(); } @Override public Enumeration<String> getHeaderNames() { return Collections.enumeration(httpRequest.getHeaders().keySet()); } @Override public int getIntHeader(String name) { throw new UnsupportedOperationException("getIntHeader() method hasn't been implemented."); } @Override public String getMethod() { return httpRequest.getMethod().name(); } @Override public String getPathInfo() { throw new UnsupportedOperationException("getPathInfo() method hasn't been implemented."); } @Override public String getPathTranslated() { throw new UnsupportedOperationException("getPathTranslated() method hasn't been implemented."); } @Override public String getContextPath() { throw new UnsupportedOperationException("getContextPath() method hasn't been implemented."); } @Override public String getQueryString() { return httpRequest.getQueryParameters(); } @Override public String getRemoteUser() { throw new UnsupportedOperationException("getRemoteUser() method hasn't been implemented."); } @Override public boolean isUserInRole(String role) { throw new UnsupportedOperationException("isUserInRole() method hasn't been implemented."); } @Override public Principal getUserPrincipal() { throw new UnsupportedOperationException("getUserPrincipal() method hasn't been implemented."); } @Override public String getRequestedSessionId() { throw new UnsupportedOperationException("getRequestedSessionId() method hasn't been implemented."); } @Override public String getRequestURI() { throw new UnsupportedOperationException("getRequestURI() method hasn't been implemented."); } @Override public StringBuffer getRequestURL() { throw new UnsupportedOperationException("getRequestURL() method hasn't been implemented."); } @Override public String getServletPath() { throw new UnsupportedOperationException("getServletPath() method hasn't been implemented."); } @Override public HttpSession getSession(boolean create) { throw new UnsupportedOperationException("getSession() method hasn't been implemented."); } @Override public HttpSession getSession() { throw new UnsupportedOperationException("getSession() method hasn't been implemented."); } @Override public String changeSessionId() { throw new UnsupportedOperationException("changeSessionId() method hasn't been implemented."); } @Override public boolean isRequestedSessionIdValid() { throw new UnsupportedOperationException("isRequestedSessionIdValid() method hasn't been implemented."); } @Override public boolean isRequestedSessionIdFromCookie() { throw new UnsupportedOperationException("isRequestedSessionIdFromCookie() method hasn't been implemented."); } @Override public boolean isRequestedSessionIdFromURL() { throw new UnsupportedOperationException("isRequestedSessionIdFromURL() method hasn't been implemented."); } @Override public boolean isRequestedSessionIdFromUrl() { throw new UnsupportedOperationException("isRequestedSessionIdFromUrl() method hasn't been implemented."); } @Override public boolean authenticate(HttpServletResponse response) throws IOException, ServletException { throw new UnsupportedOperationException("authenticate() method hasn't been implemented."); } @Override public void login(String username, String password) throws ServletException { throw new UnsupportedOperationException("login() method hasn't been implemented."); } @Override public void logout() throws ServletException { throw new UnsupportedOperationException("logout() method hasn't been implemented."); } @Override public Collection<Part> getParts() throws IOException, ServletException { throw new UnsupportedOperationException("getParts() method hasn't been implemented."); } @Override public Part getPart(String name) throws IOException, ServletException { throw new UnsupportedOperationException("getPart() method hasn't been implemented."); } @Override public <T extends HttpUpgradeHandler> T upgrade(Class<T> aClass) throws IOException, ServletException { throw new UnsupportedOperationException("upgrade() method hasn't been implemented."); } @Override public Object getAttribute(String name) { throw new UnsupportedOperationException("getAttribute() method hasn't been implemented."); } @Override public Enumeration<String> getAttributeNames() { throw new UnsupportedOperationException("getAttributeNames() method hasn't been implemented."); } @Override public String getCharacterEncoding() { throw new UnsupportedOperationException("getCharacterEncoding() method hasn't been implemented."); } @Override public void setCharacterEncoding(String env) throws UnsupportedEncodingException { throw new UnsupportedOperationException("setCharacterEncoding() method hasn't been implemented."); } @Override public int getContentLength() { throw new UnsupportedOperationException("getContentLength() method hasn't been implemented."); } @Override public long getContentLengthLong() { throw new UnsupportedOperationException("getContentLengthLong() method hasn't been implemented."); } @Override public String getContentType() { return getHeader("Content-Type"); } @Override public ServletInputStream getInputStream() throws IOException { throw new UnsupportedOperationException("getInputStream() method hasn't been implemented."); } @Override public String getParameter(String name) { Assert.hasText(name); String[] parameterValue = httpRequest.getParameters().get(name); if (parameterValue == null || parameterValue.length == 0) { return null; } return parameterValue[0]; } @Override public Enumeration<String> getParameterNames() { return Collections.enumeration(httpRequest.getParameters().keySet()); } @Override public String[] getParameterValues(String name) { Assert.hasText(name); if (!httpRequest.getParameters().containsKey(name)) { return null; } return httpRequest.getParameters().get(name); } @Override public Map<String, String[]> getParameterMap() { return httpRequest.getParameters(); } @Override public String getProtocol() { throw new UnsupportedOperationException("getProtocol() method hasn't been implemented."); } @Override public String getScheme() { throw new UnsupportedOperationException("getScheme() method hasn't been implemented."); } @Override public String getServerName() { throw new UnsupportedOperationException("getServerName() method hasn't been implemented."); } @Override public int getServerPort() { throw new UnsupportedOperationException("getServerPort() method hasn't been implemented."); } @Override public BufferedReader getReader() throws IOException { throw new UnsupportedOperationException("getReader() method hasn't been implemented."); } @Override public String getRemoteAddr() { throw new UnsupportedOperationException("getRemoteAddr() method hasn't been implemented."); } @Override public String getRemoteHost() { throw new UnsupportedOperationException("getRemoteHost() method hasn't been implemented."); } @Override public void setAttribute(String name, Object o) { throw new UnsupportedOperationException("setAttribute() method hasn't been implemented."); } @Override public void removeAttribute(String name) { throw new UnsupportedOperationException("removeAttribute() method hasn't been implemented."); } @Override public Locale getLocale() { throw new UnsupportedOperationException("getLocale() method hasn't been implemented."); } @Override public Enumeration<Locale> getLocales() { throw new UnsupportedOperationException("getLocales() method hasn't been implemented."); } @Override public boolean isSecure() { throw new UnsupportedOperationException("isSecure() method hasn't been implemented."); } @Override public RequestDispatcher getRequestDispatcher(String path) { throw new UnsupportedOperationException("getRequestDispatcher() method hasn't been implemented."); } @Override public String getRealPath(String path) { throw new UnsupportedOperationException("getRealPath() method hasn't been implemented."); } @Override public int getRemotePort() { throw new UnsupportedOperationException("getRemotePort() method hasn't been implemented."); } @Override public String getLocalName() { throw new UnsupportedOperationException("getLocalName() method hasn't been implemented."); } @Override public String getLocalAddr() { throw new UnsupportedOperationException("getLocalAddr() method hasn't been implemented."); } @Override public int getLocalPort() { throw new UnsupportedOperationException("getLocalPort() method hasn't been implemented."); } @Override public ServletContext getServletContext() { throw new UnsupportedOperationException("getServletContext() method hasn't been implemented."); } @Override public AsyncContext startAsync() throws IllegalStateException { throw new UnsupportedOperationException("startAsync() method hasn't been implemented."); } @Override public AsyncContext startAsync(ServletRequest servletRequest, ServletResponse servletResponse) throws IllegalStateException { throw new UnsupportedOperationException("startAsync() method hasn't been implemented."); } @Override public boolean isAsyncStarted() { throw new UnsupportedOperationException("isAsyncStarted() method hasn't been implemented."); } @Override public boolean isAsyncSupported() { throw new UnsupportedOperationException("isAsyncStarted() method hasn't been implemented."); } @Override public AsyncContext getAsyncContext() { throw new UnsupportedOperationException("getAsyncContext() method hasn't been implemented."); } @Override public DispatcherType getDispatcherType() { throw new UnsupportedOperationException("getDispatcherType() method hasn't been implemented."); } private static class EmptyEnumeration implements Enumeration<String> { private static EmptyEnumeration instance = new EmptyEnumeration(); private EmptyEnumeration() { } public static EmptyEnumeration getInstance() { return instance; } @Override public boolean hasMoreElements() { return false; } @Override public String nextElement() { throw new NoSuchElementException("Empty enumeration. hasMoreElements() must be called first"); } } }
/** Locates and extracts Cloud Hypervisor SMBIOS data @return Address of extracted Cloud Hypervisor SMBIOS data **/ UINT8 * GetCloudHvSmbiosTables ( VOID ) { SMBIOS_TABLE_3_0_ENTRY_POINT *CloudHvTables = (VOID *)CLOUDHV_SMBIOS_ADDRESS; if ((CloudHvTables->AnchorString[0] == '_') && (CloudHvTables->AnchorString[1] == 'S') && (CloudHvTables->AnchorString[2] == 'M') && (CloudHvTables->AnchorString[3] == '3') && (CloudHvTables->AnchorString[4] == '_')) { return (UINT8 *)(UINTN)CloudHvTables->TableAddress; } return NULL; }
/** @author <NAME> Reference- https://www.interviewbit.com/problems/kth-row-of-pascals-triangle/ */ import java.util.*; import java.lang.*; import java.io.*; public class Solution { public ArrayList<Integer> getRow(int A) { ArrayList<Integer> list=new ArrayList<>(); int C=1; for(int i = 1; i <= A+1; i++) { list.add(C); C = C * (A - i+1) / i; } return list; } }
import * as admin from 'firebase-admin'; export function firebaseApp( authMock: jest.Mock, firestoreMock: jest.Mock ): admin.app.App { return { name: '', options: {}, firestore: firestoreMock, securityRules: authMock, storage: authMock, delete: authMock, remoteConfig: authMock, projectManagement: authMock, messaging: authMock, machineLearning: authMock, instanceId: authMock, database: authMock, auth: authMock, }; }
Anilah – Shamanic Healing Music Anilah sounds to me like a mix of Wardruna, Dead Can Dance, Progressive Rock and Shamanic chanting. Anilah is the musical project of vocalist and composer Dréa Drury, a musician who hails from the Selkirk Mountains of Western Canada. Her music is influenced by traditional shamanic sound practices, sacred chant, dark tribal and Indian Classical. She has studied the art of using the voice as a healing modality with sound healers and shamans from across North America. Even listening to her music on YouTube makes me feel more relaxed, so it must be working. Her unique sound includes elements of progressive rock and dark ambient chanting. On the Solpurpose review of her album “Warriro”, Dréa states the following: “The reason I use ritual and ceremony in a creative context is to help dissolve perceptual boundaries, and to enhance my ability to hear in a different way. Usually this involves actively creating a trance state through pranayama, kriyas, or mantra.” She also says the following about nature: “When I am walking through the forest and allow myself to open to the larger conversation that is happening, I feel myself filling up with the sheer grace of being, and literally have no other option but to express my gratitude – and this happens in the form of a song or melody. So to rephrase: I breathe nature in, I breathe melody out. My creativity takes this form.” Recently I had a chance to talk with this medicine woman of sound on Facebook. Here are her responses to my following questions: MG: How long have you been playing music, when and how did you pick it up? DD: I’ve been playing music ever since I could sit at a piano – started studying with my grandmother at 5, then moved on to classical training at 13, did my degree in music at college and then did private apprenticeships with my vocal teachers Ali Akbar Khan and Silvia Nakkach afterward. So yea…have been at it for a long time Always was a musical creature, couldn’t help it.. MG: I’m curious, who are your musical influences? DD: I have many, but notable ones are: Wardruna, Tool (all of Maynard James Keenan projects), Dead Can Dance – Lisa Gerrard, Bjork, NIN, Chelsea Wolfe. Also listen to tons of avant guard choral/classical music, from composers like Hildegard Von Bingen, Bulgarian Women’s Choir, and countless artists in the Indian Classcial Genre like Raga. If you would love to hear more Anilah, check out the videos and links below: ANILAH VIDEOS CALLING THE OTHERS ROLLING THUNDER [WARRIOR] RELEVANT LINKS ANILAH OFFICIAL SITE ANILAH FACEBOOK REVIEW ON SOLPURPOSE
def evaluate(self, nodes, relation, constants): return self.evaluate_aux({}, nodes, relation, constants)
//MOVE CONSTRUCTORS /** * @brief Construct from the name and a moved string */ InvokeParameter(const std::string &name, std::string &&str) { this->name = name; this->type = "string"; this->str = move(str); }
package com.wow.wowmeet.utils.googleapi; import android.content.Context; import android.os.Bundle; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.util.Log; import com.google.android.gms.common.ConnectionResult; import com.google.android.gms.common.api.Api; import com.google.android.gms.common.api.GoogleApiClient; /** * Created by mahmutkaraca on 3/25/17. */ public class GoogleApiProvider { private Context context; private GoogleApiClient googleApiClient; private boolean connected = false; private GoogleApiClient.ConnectionCallbacks callbacks = new GoogleApiClient.ConnectionCallbacks() { @Override public void onConnected(@Nullable Bundle bundle) { connected = true; onProviderConnectedListener.onConnected(); } @Override public void onConnectionSuspended(int i) { connected = false; } }; private GoogleApiClient.OnConnectionFailedListener onConnectionFailedListener =new GoogleApiClient.OnConnectionFailedListener() { @Override public void onConnectionFailed(@NonNull ConnectionResult connectionResult) { connected = false; Log.e("GoogleApiProvider", connectionResult.getErrorMessage()); onProviderConnectionFailedListener.onConnectionFailed(); } }; private GoogleLocationAPIWrapper.WrapperLocationListener wrapperLocationListener; private OnProviderConnectedListener onProviderConnectedListener; private OnProviderConnectionFailedListener onProviderConnectionFailedListener; public GoogleApiProvider(@NonNull Context context, @NonNull OnProviderConnectedListener onProviderConnectedListener, @NonNull OnProviderConnectionFailedListener onProviderConnectionFailedListener, Api... apis) { this.context = context; this.onProviderConnectedListener = onProviderConnectedListener; this.onProviderConnectionFailedListener = onProviderConnectionFailedListener; GoogleApiClient.Builder builder = new GoogleApiClient.Builder(context) .addConnectionCallbacks(callbacks) .addOnConnectionFailedListener(onConnectionFailedListener); for (Api api : apis) { builder.addApi(api); } this.googleApiClient = builder.build(); } public void onStart() { googleApiClient.connect(); } public void onStop() { googleApiClient.disconnect(); } public boolean isConnected() { return connected; } public GoogleApiClient getGoogleApiClient() { return googleApiClient; } public interface OnProviderConnectedListener { void onConnected(); } public interface OnProviderConnectionFailedListener { void onConnectionFailed(); } }
#include <bits/stdc++.h> using namespace std; int n, m; int xc, yc; using ll = long long; bool checkin(ll x, ll y) { return (1 <= x && x <= n && y <= m && y >= 1); } int go(int dx, int dy, int &xc, int &yc) { int st = 0, en = 1e9+100; while (en - st > 1) { int mid = en + st >> 1; ll x = xc + 1ll * mid * dx; ll y = yc + 1ll * mid * dy; bool f = checkin(x, y); if (f) st = mid; else en = mid; } xc = xc + 1ll * dx * st; yc = yc + 1ll * dy * st; return st; } int main() { scanf("%d %d", &n, &m); scanf("%d %d", &xc, &yc); int dx, dy, k; scanf("%d", &k); ll ans = 0; for (int i = 1; i <= k; i ++) { scanf("%d %d", &dx, &dy); ans += go(dx, dy, xc, yc); } printf("%I64d\n", ans); return 0; }
<reponame>AiLuoLiuCenYu/hello-word import { Button, Checkbox, Form, Icon, Input, message } from 'antd'; import * as React from 'react'; import rootStore from '../../rootStore'; import { storage } from '../../utils/helper'; import logo from './logo.svg'; import styles from './style.module.less'; const FormItem = Form.Item; interface IProps { history: { [key: string]: string | any }; } interface IStates { username: string; password: string; remember: boolean; } // @observer class LoginComponent extends React.Component<IProps, IStates> { // field is not touched at the beginning usernameFieldTouched = false; passwordFieldTouched = false; state = { password: '', remember: true, username: '' }; handleInputChange = (id: 'username' | 'password' | 'remember') => (e: any) => { if (id === 'remember') { this.setState({ remember: e.target.checked }); return; } if (!this[`${id}FieldTouched`]) { this[`${id}FieldTouched`] = true; } const updateObj = {}; updateObj[id] = e.target.value this.setState(updateObj); } handleSubmit = (e: any) => { e.preventDefault(); const { username, password, remember } = this.state; if (username.trim() === 'admin' && password === '<PASSWORD>') { const credentials = JSON.stringify({ access_token: '<PASSWORD>', user: { id: 1, username: username.trim() } }); storage.setItem(remember ? 'local' : 'session', 'credentials', credentials); rootStore.setAuthed(true); this.props.history.push('/'); return; } message.error('用户名或密码错误'); } render() { const { usernameFieldTouched, passwordFieldTouched, state } = this; const { username, password, remember } = state; const usernameError = !username.trim() && usernameFieldTouched; const passwordError = !password && passwordFieldTouched; return ( <div className={styles.container}> <div className={styles.top}> <img src={logo} className={styles.logo} alt="logo" /> <span className={styles.title}>react sail</span> </div> <div className={styles.login}> <Form onSubmit={this.handleSubmit} className={styles.form}> <FormItem validateStatus={usernameError ? 'error' : 'success'} help={usernameError ? 'Please input your username!' : ''} > <Input id="username" value={username} onChange={this.handleInputChange('username')} prefix={<Icon type="user" style={{ color: 'rgba(0,0,0,.25)' }} />} placeholder="admin" /> </FormItem> <FormItem validateStatus={passwordError ? 'error' : 'success'} help={passwordError ? 'Please input your password!' : ''} > <Input id="password" value={password} onChange={this.handleInputChange('password')} prefix={<Icon type="lock" style={{ color: 'rgba(0,0,0,.25)' }} />} type="password" placeholder="admin" /> </FormItem> <FormItem> <Checkbox checked={remember} onChange={this.handleInputChange('remember')} > Remember me </Checkbox> <a className={styles.forgot}>Forgot password</a> </FormItem> <FormItem> <Button disabled={!username.trim() || !password} type="primary" size="large" htmlType="submit" className={styles.btn} > Log in </Button> </FormItem> </Form> </div> </div> ); } } export default LoginComponent;
/** A Modulation which implements a classic ADSR envelope with an additional Delay at the front. DADSR is common in many synths, such as the Oberheim Matrix series. You get to specify the delay level and time (this is a time-based envelope, not a rate-based envelope), the attack level and time, the decay time and level (which is also the sustain level), and finally the release time and level. There are four curves offered linear (x) through x^8, none of which is exponential :-) but several are reasonable approximations. The envelope can be ONE SHOT, which basically means that the sustain stage is ignored -- once we reach the sustain level we immediately begin the release stage. We can also specify whether the envelope should reset on gate, or continue regardless of a gate event. */ public class DADSR extends Modulation implements ModSource { private static final long serialVersionUID = 1; // // DELAY ATTK DCY SUS REL DONE // +---+-----a---+-------+-+------+ // | | /|\ | | | | // | | / | \ | | r------r ... // | | / | \| |/| | // | | / | d-------d | | // | |/ | | | | | // l---l | | | | | // | | | | | | | // +---+-----+---+-------+-+------+ // | L A D |R // | | // | | // GATE/RESET EVENT RELEASE EVENT // // l, a, d, r: target levels for initial, delay, attack, decay [sustain], release. // L, A, D, R: rates for delay, attack, decay, release. // // On RELEASE, the modulation level starts at the CURRENT value. // On GATE, the modulation level starts at the CURRENT value. // On RESET, this current value is reset to the RELEASE LEVEL. // // Modulation values for time map to actual time as follows. // // 0.0 = 0.000 seconds // 0.1 = 0.011 seconds // 0.2 = 0.025 seconds // 0.3 = 0.042 seconds // 0.4 = 0.066 seconds // 0.5 = 0.098 seconds // 0.6 = 0.146 seconds // 0.7 = 0.226 seconds // 0.8 = 0.381 seconds // 0.9 = 0.817 seconds // 0.92 = 1.021 seconds // 0.94 = 1.341 seconds // 0.96 = 1.916 seconds // 0.98 = 3.256 seconds // 1.00 = 9.900 seconds // // We also provide three more time curves: x^2, x^4, and x^8 to a // states static final int DELAY = 0; static final int ATTACK = 1; static final int DECAY = 2; static final int SUSTAIN = 3; static final int RELEASE = 4; static final int DONE = 5; public static final int CURVE_LINEAR = 0; public static final int CURVE_X_2 = 1; public static final int CURVE_X_4 = 2; public static final int CURVE_X_8 = 3; public static final int CURVE_X_16 = 4; public static final int CURVE_X_32 = 5; public static final int CURVE_STEP = 6; public static final int CURVE_X_2_X_8 = 7; public static final int CURVE_X_4_X_16 = 8; public static final int CURVE_X_8_X_32 = 9; public static final int CURVE_1_MINUS_X_2 = 10; public static final int CURVE_1_MINUS_X_4 = 11; public static final int CURVE_1_MINUS_X_8 = 12; public static final int MOD_DELAY_TIME = 0; public static final int MOD_DELAY_LEVEL = 1; public static final int MOD_ATTACK_TIME = 2; public static final int MOD_ATTACK_LEVEL = 3; public static final int MOD_DECAY_TIME = 4; public static final int MOD_DECAY_LEVEL = 5; public static final int MOD_RELEASE_TIME = 6; public static final int MOD_RELEASE_LEVEL = 7; public static final int MOD_GATE_TR = 8; public static final int MOD_REL_TR = 9; public static final int OUT_MOD = 0; public static final int OUT_DELAY = 1; public static final int OUT_ATTACK = 2; public static final int OUT_DECAY = 3; public static final int OUT_SUSTAIN = 4; public static final int OUT_RELEASE = 5; public static final int OUT_DONE = 6; double[] level = new double[6]; // not all of these slots will be used double[] time = new double[6]; // not all of these slots will be used double start; double interval; int state = DONE; boolean released; int curve; boolean oneshot = false; boolean sync; boolean gateReset = false; boolean quickRelease = false; public boolean getSync() { return sync; } public void setSync(boolean val) { sync = val; } public void setOneShot(boolean val) { oneshot = val; } public boolean getOneShot() { return oneshot; } public int getCurve() { return curve; } public void setCurve(int val) { curve = val; } public boolean getGateReset() { return gateReset; } public void setGateReset(boolean val) { gateReset = val; } public void setQuickRelease(boolean val) { quickRelease = val; } public boolean getQuickRelease() { return quickRelease; } public static final int OPTION_CURVE = 0; public static final int OPTION_ONE_SHOT = 1; public static final int OPTION_GATE_RESET = 2; public static final int OPTION_SYNC = 3; public static final int OPTION_QUICK_RELEASE = 4; public Object clone() { DADSR obj = (DADSR)(super.clone()); obj.level = (double[])(obj.level.clone()); obj.time = (double[])(obj.time.clone()); return obj; } public int getOptionValue(int option) { switch(option) { case OPTION_CURVE: return getCurve(); case OPTION_ONE_SHOT: return getOneShot() ? 1 : 0; case OPTION_GATE_RESET: return getGateReset() ? 1 : 0; case OPTION_SYNC: return getSync() ? 1 : 0; case OPTION_QUICK_RELEASE: return getQuickRelease() ? 1 : 0; default: throw new RuntimeException("No such option " + option); } } public void setOptionValue(int option, int value) { switch(option) { case OPTION_CURVE: setCurve(value); return; case OPTION_ONE_SHOT: setOneShot(value != 0); return; case OPTION_GATE_RESET: setGateReset(value != 0); return; case OPTION_SYNC: setSync(value != 0); return; case OPTION_QUICK_RELEASE: setQuickRelease(value != 0); return; default: throw new RuntimeException("No such option " + option); } } public DADSR(Sound sound) { super(sound); defineModulations(new Constant[] { Constant.ZERO, Constant.ZERO, Constant.HALF, Constant.ONE, Constant.ZERO, Constant.ONE, Constant.HALF, Constant.ZERO, Constant.ZERO, Constant.ZERO }, new String[] { "Delay Time", "Delay Level", "Attack Time", "Attack Level", "Decay Time", "Sustain Level", "Release Time", "Release Level", "On Tr", "Off Tr" }); defineOptions(new String[] { "Curve", "One Shot", "Gate Reset", "MIDI Sync", "Fast Release" }, new String[][] { { "Linear", "x^2", "x^4", "x^8", "x^16", "x^32", "Step", "x^2, 8", "x^4, 16", "x^8, 32", "Inv x^2", "Inv x^4", "Inv x^8" }, { "One Shot" }, { "Gate Reset" }, { "MIDI Sync" }, { "No Release" } } ); defineModulationOutputs(new String[] { "Mod", "G", "A", "D", "S", "R", "E" }); setModulationOutput(0, 0); } public void gate() { super.gate(); if (isModulationConstant(MOD_GATE_TR)) doGate(); } void doGate() { time[DELAY] = (modulate(MOD_DELAY_TIME)); level[DELAY] = modulate(MOD_DELAY_LEVEL); time[ATTACK] = (modulate(MOD_ATTACK_TIME)); level[ATTACK] = modulate(MOD_ATTACK_LEVEL); time[DECAY] = (modulate(MOD_DECAY_TIME)); level[DECAY] = modulate(MOD_DECAY_LEVEL); time[SUSTAIN] = (0); // doesn't matter level[SUSTAIN] = level[DECAY]; // really doesn't matter actually, release modifies it time[RELEASE] = (modulate(MOD_RELEASE_TIME)); level[RELEASE] = modulate(MOD_RELEASE_LEVEL); time[DONE] = (0); // doesn't matter if (gateReset) level[DONE] = level[RELEASE]; // so we reset to the canonocal default else level[DONE] = getModulationOutput(0); // so we change from there when we starting state = DELAY; start = getSyncTick(sync); interval = toTicks(time[DELAY]); released = false; scheduleTrigger(T_DELAY); } public void release() { super.release(); if (isModulationConstant(MOD_REL_TR)) doRelease(); } void doRelease() { if (oneshot) return; if (state != RELEASE && state != DONE) { scheduleTrigger(T_RELEASE); } if (state == DECAY && !quickRelease) { released = true; return; } state = RELEASE; start = getSyncTick(sync); interval = toTicks(time[RELEASE]); level[SUSTAIN] = getModulationOutput(0); // so we decrease from there during release } public static final int T_DELAY = 0; public static final int T_ATTACK = 1; public static final int T_DECAY = 2; public static final int T_SUSTAIN = 3; public static final int T_RELEASE = 4; public static final int T_DONE = 5; int scheduledTriggers = 0; void scheduleTrigger(int val) { if (val == T_DELAY) { scheduledTriggers |= 1; } if (val == T_ATTACK) { scheduledTriggers |= 2; } else if (val == T_DECAY) { scheduledTriggers |= 4; } else if (val == T_SUSTAIN) { scheduledTriggers |= 8; } else if (val == T_RELEASE) { scheduledTriggers |= 16; } else if (val == T_DONE) { scheduledTriggers |= 32; } } public double toTicks(double mod) { return modToLongRate(mod) * Output.SAMPLING_RATE; } public void go() { super.go(); if (isTriggered(MOD_GATE_TR)) { doGate(); } else if (isTriggered(MOD_REL_TR)) { doRelease(); } if (scheduledTriggers != 0) { if ((scheduledTriggers & 1) == 1) { updateTrigger(OUT_DELAY); } if ((scheduledTriggers & 2) == 2) { updateTrigger(OUT_ATTACK); updateTrigger(OUT_MOD); } if ((scheduledTriggers & 4) == 4) { updateTrigger(OUT_DECAY); updateTrigger(OUT_MOD); } if ((scheduledTriggers & 8) == 8) { updateTrigger(OUT_SUSTAIN); updateTrigger(OUT_MOD); } if ((scheduledTriggers & 16) == 16) { updateTrigger(OUT_RELEASE); updateTrigger(OUT_MOD); } if ((scheduledTriggers & 32) == 32) { updateTrigger(OUT_DONE); updateTrigger(OUT_MOD); } scheduledTriggers = 0; } long tick = getSyncTick(sync); if (tick < start) // uh oh, probably switched to MIDI Sync { start = tick; } // need to reset level[DONE] if (state > DELAY) { level[DONE] = level[RELEASE]; } // What state are we in? // if we're in a sticky state, just return the level if (state == DONE) { setModulationOutput(0, level[DONE]); return; } if (!oneshot && !released && state == SUSTAIN) { setModulationOutput(0, level[SUSTAIN]); return; } // Do we need to transition to a new state? while (tick >= start + interval) { state++; if (state == DELAY) // this can't happen { updateTrigger(OUT_DELAY); } else if (state == ATTACK) { updateTrigger(OUT_MOD); updateTrigger(OUT_ATTACK); } else if (state == DECAY) { updateTrigger(OUT_MOD); updateTrigger(OUT_DECAY); } else if (state == SUSTAIN) { updateTrigger(OUT_MOD); updateTrigger(OUT_SUSTAIN); } else if (state == RELEASE) { updateTrigger(OUT_MOD); updateTrigger(OUT_RELEASE); } else if (state == DONE) { updateTrigger(OUT_MOD); updateTrigger(OUT_DONE); } // try sticky again if (state == DONE) { setModulationOutput(0, level[DONE]); return; } if (!oneshot && !released && state == SUSTAIN) { setModulationOutput(0, level[SUSTAIN]); return; } // update the state start = start + interval; interval = toTicks(time[state]); } // Where are we in the state? We compute only for delay, attack, decay, and release double firstLevel = level[DONE]; // initially, for state = delay if (state > DELAY) firstLevel = level[state - 1]; double alpha = (tick - start) / interval; switch(curve) { case CURVE_LINEAR: { // do nothing } break; case CURVE_X_2: { alpha = (1-alpha) * (1-alpha); alpha = 1 - alpha; } break; case CURVE_X_4: { alpha = (1-alpha) * (1-alpha); alpha = alpha * alpha; alpha = 1 - alpha; } break; case CURVE_X_8: { alpha = (1-alpha) * (1-alpha); alpha = alpha * alpha; alpha = alpha * alpha; alpha = 1 - alpha; } break; case CURVE_X_16: { alpha = (1-alpha) * (1-alpha); alpha = alpha * alpha; alpha = alpha * alpha; alpha = alpha * alpha; alpha = 1 - alpha; } break; case CURVE_X_32: { alpha = (1-alpha) * (1-alpha); alpha = alpha * alpha; alpha = alpha * alpha; alpha = alpha * alpha; alpha = alpha * alpha; alpha = 1 - alpha; } break; case CURVE_STEP: { alpha = 1.0; } break; case CURVE_X_2_X_8: { alpha = (1-alpha) * (1-alpha); double beta = alpha; // x^2 alpha = alpha * alpha; alpha = alpha * alpha; // x^8 alpha = 1 - (alpha + beta) * 0.5; } break; case CURVE_X_4_X_16: { alpha = (1-alpha) * (1-alpha); alpha = alpha * alpha; double beta = alpha; // x^4 alpha = alpha * alpha; alpha = alpha * alpha; // x^16 alpha = 1 - (alpha + beta) * 0.5; } break; case CURVE_X_8_X_32: { alpha = (1-alpha) * (1-alpha); alpha = alpha * alpha; alpha = alpha * alpha; double beta = alpha; // x^8 alpha = alpha * alpha; alpha = alpha * alpha; // x^32 alpha = 1 - (alpha + beta) * 0.5; } break; case CURVE_1_MINUS_X_2: { alpha = alpha * alpha; } break; case CURVE_1_MINUS_X_4: { alpha = alpha * alpha; alpha = alpha * alpha; } break; case CURVE_1_MINUS_X_8: { alpha = alpha * alpha; alpha = alpha * alpha; alpha = alpha * alpha; } break; default: { // shouldn't happen } } double levels = (1 - alpha) * firstLevel + alpha * level[state]; setModulationOutput(0, levels); } public String getModulationValueDescription(int modulation, double value, boolean isConstant) { if (isConstant) { if (modulation % 2 == 0) // it's a time { return String.format("%.4f" , modToLongRate(value)) + " Sec"; } else return super.getModulationValueDescription(modulation, value, isConstant); } else return ""; } public ModulePanel getPanel() { return new ModulePanel(DADSR.this) { public JComponent buildPanel() { Modulation mod = getModulation(); Box box = new Box(BoxLayout.Y_AXIS); box.add(new ModulationOutput(mod, 0, this)); for(int i = 0; i < mod.getNumModulations(); i++) { ModulationInput t; if (i == MOD_DELAY_TIME || i == MOD_ATTACK_TIME || i == MOD_DECAY_TIME || i == MOD_RELEASE_TIME) { t = new ModulationInput(mod, i, this) { public String[] getOptions() { return MidiClock.CLOCK_NAMES; } public double convert(int elt) { return MIDI_CLOCK_LONG_MOD_RATES[elt]; } }; } else { t = new ModulationInput(mod, i, this); } box.add(t); } for(int i = 0; i < mod.getNumOptions(); i++) { box.add(new OptionsChooser(mod, i)); } box.add(Strut.makeVerticalStrut(5)); Box box2 = new Box(BoxLayout.X_AXIS); box2.add(Box.createGlue()); Box box3 = new Box(BoxLayout.Y_AXIS); box3.add(Box.createGlue()); ModulationOutput mo = new ModulationOutput(mod, OUT_DELAY, this); mo.setTitleText(" D", false); box3.add(mo); box3.add(Strut.makeVerticalStrut(5)); mo = new ModulationOutput(mod, OUT_SUSTAIN, this); mo.setTitleText(" S", false); box3.add(mo); box2.add(box3); box3 = new Box(BoxLayout.Y_AXIS); mo = new ModulationOutput(mod, OUT_ATTACK, this); mo.setTitleText(" A", false); box3.add(mo); box3.add(Strut.makeVerticalStrut(5)); mo = new ModulationOutput(mod, OUT_RELEASE, this); mo.setTitleText(" R", false); box3.add(mo); box2.add(box3); box3 = new Box(BoxLayout.Y_AXIS); mo = new ModulationOutput(mod, OUT_DECAY, this); mo.setTitleText(" D", false); box3.add(mo); box3.add(Strut.makeVerticalStrut(5)); mo = new ModulationOutput(mod, OUT_DONE, this); mo.setTitleText(" E", false); box3.add(mo); box2.add(box3); box.add(box2); return box; } }; } }
package com.summithillsoftware.ultimate.model; import static com.summithillsoftware.ultimate.model.Action.Callahan; import static com.summithillsoftware.ultimate.model.Action.Catch; import static com.summithillsoftware.ultimate.model.Action.Drop; import static com.summithillsoftware.ultimate.model.Action.Goal; import static com.summithillsoftware.ultimate.model.Action.MiscPenalty; import static com.summithillsoftware.ultimate.model.Action.Stall; import static com.summithillsoftware.ultimate.model.Action.Throwaway; import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; import org.json.JSONException; import org.json.JSONObject; import com.summithillsoftware.ultimate.R; import com.summithillsoftware.ultimate.util.StringUtil; public class OffenseEvent extends Event { private static final long serialVersionUID = 1l; private static final String JSON_PASSER = "passer"; private static final String JSON_RECEIVER = "receiver"; public static final EnumSet<Action> OFFENSE_ACTIONS = EnumSet.of( Catch, Drop, Goal, Throwaway, Callahan, Stall, MiscPenalty); private Player passer; private Player receiver; public OffenseEvent() { } public OffenseEvent(Action action, Player passer) { this(action, passer, null); } public OffenseEvent(Action action, Player passer, Player receiver) { super(action); this.passer = passer; this.receiver = receiver; } public OffenseEvent(OffenseEvent event) { super(event); passer = event.passer; receiver = event.receiver; } public boolean isOurGoal() { return getAction() == Goal; } public boolean isTheirGoal() { return getAction() == Callahan; } public boolean isGoal() { return isOurGoal() || isTheirGoal(); } public boolean isCallahan() { return getAction() == Callahan; } public boolean isFinalEventOfPoint() { return isGoal(); } public boolean isOffense() { return true; } public boolean isPlayEvent() { return true; } @Override public boolean isCatch() { return getAction() == Catch; } @Override public boolean isDrop() { return getAction() == Drop; } @Override public boolean isTurnover() { return getAction() == Drop || getAction() == Throwaway || getAction() == Stall || getAction() == MiscPenalty || getAction() == Callahan; } @Override public boolean isNextEventOffense() { return getAction() == Catch || getAction() == Callahan; } @Override public List<Player> getPlayers() { List<Player> players = new ArrayList<Player>(); if (passer != null) { players.add(passer); } if (receiver != null) { players.add(receiver); } return players; } public boolean isAnonymous() { return isPasserAnonymous() && isReceiverAnonymous(); } public boolean isPasserAnonymous() { return (passer == null || passer.isAnonymous()); } public boolean isReceiverAnonymous() { return (receiver == null || receiver.isAnonymous()); } @Override protected String getDescriptionForTeamAndOpponent(String teamName,String opponentName) { String ourTeam = StringUtil.getString(R.string.event_description_our_team); switch(getAction()) { case Catch: { if (isAnonymous()) { // {team} pass return getString(R.string.event_description_pass,(teamName == null ? ourTeam : teamName)); } else if (isReceiverAnonymous()) { // {passer} pass return getString(R.string.event_description_pass,getPasser().getName()); } else if (isPasserAnonymous()) { // Pass to {passer} return getString(R.string.event_description_pass_to, getReceiver().getName()); } else { // {passer} to {receiver} return getString(R.string.event_description_pass_from_to, getPasser().getName(), getReceiver().getName()); } } case Drop: { if (isAnonymous()) { // {{team} drop return getString(R.string.event_description_drop,(teamName == null ? ourTeam : teamName)); } else if (isReceiverAnonymous()) { // {{passer} pass dropped return getString(R.string.event_description_drop_from,getPasser().getName()); } else if (isPasserAnonymous()) { // {{receiver} dropped pass return getString(R.string.event_description_drop_to, getReceiver().getName()); } else { // {{receiver} dropped from {passer} return getString(R.string.event_description_drop_from_to, getReceiver().getName(), getPasser().getName()); } } case Throwaway:{ return isAnonymous() ? getString(R.string.event_description_throwaway,(teamName == null ? ourTeam : teamName)) : getString(R.string.event_description_throwaway,getPasser().getName()); } case Stall:{ return isAnonymous() ? getString(R.string.event_description_stalled,(teamName == null ? ourTeam : teamName)) : getString(R.string.event_description_stalled,getPasser().getName()); } case MiscPenalty:{ return isAnonymous() ? getString(R.string.event_description_penalized,(teamName == null ? ourTeam : teamName)) : getString(R.string.event_description_penalized,getPasser().getName()); } case Callahan:{ return isAnonymous() ? getString(R.string.event_description_callahaned,(teamName == null ? ourTeam : teamName)) : getString(R.string.event_description_callahaned,getPasser().getName()); } case Goal: { if (isAnonymous()) { // {team} goal return getString(R.string.event_description_o_goal,(teamName == null ? ourTeam : teamName)); } else if (isReceiverAnonymous()) { // {passer} pass for goal return getString(R.string.event_description_o_goal_from,getPasser().getName()); } else if (isPasserAnonymous()) { // {receiver} goal return getString(R.string.event_description_o_goal, getReceiver().getName()); } else { // {team} goal ({getPasser()} to {receiver}) return getString(R.string.event_description_o_goal_from_to, (teamName == null ? ourTeam : teamName), getPasser().getName(), getReceiver().getName()); } } default: return ""; } } public Player getPasser() { if (passer == null) { passer = Player.anonymous(); } return passer; } public void setPasser(Player aPasser) { this.passer = aPasser == null ? Player.anonymous() : aPasser; } public Player getReceiver() { if (receiver == null) { receiver = Player.anonymous(); } return receiver; } public void setReceiver(Player aReceiver) { this.receiver = aReceiver == null ? Player.anonymous() : aReceiver; } protected void ensureValid() { if (passer == null) { passer = Player.anonymous(); } if (receiver == null || getAction() == Throwaway || getAction() == Stall || getAction() == MiscPenalty || getAction() == Callahan) { receiver = Player.anonymous(); } if (!OFFENSE_ACTIONS.contains(getAction())) { throw new InvalidEventException("Invalid action for offense event " + getAction()); } } public Player getPlayerOne() { return getPasser(); } public Player getPlayerTwo() { return getReceiver(); } public int image() { switch (getAction()) { case Goal: return R.drawable.goal_green; case Callahan: return R.drawable.callahan_red; case Catch: return R.drawable.pass; case Throwaway: return R.drawable.throwaway; case Drop: return R.drawable.drop; case Stall: return R.drawable.stall; case MiscPenalty: return R.drawable.penalty; default: return super.image(); } } public int imageMonochrome() { switch (getAction()) { case Goal: return R.drawable.goal; case Callahan: return R.drawable.callahan; default: return image(); } } public void useSharedPlayers() { passer = Player.replaceWithSharedPlayer(passer); receiver = Player.replaceWithSharedPlayer(receiver); } @Override public void readExternal(ObjectInput input) throws IOException, ClassNotFoundException { super.readExternal(input); passer = (Player)input.readObject(); receiver = (Player)input.readObject(); } @Override public void writeExternal(ObjectOutput output) throws IOException { super.writeExternal(output); output.writeObject(passer); output.writeObject(receiver); } public static OffenseEvent eventfromJsonObject(JSONObject jsonObject) throws JSONException { String actionAsString = jsonObject.getString(JSON_ACTION); Action action = Catch; if (actionAsString.equals("Catch")) { action = Catch; } else if (actionAsString.equals("Drop")) { action = Drop; } else if (actionAsString.equals("Goal")) { action = Goal; } else if (actionAsString.equals("Throwaway")) { action = Throwaway; } else if (actionAsString.equals("Stall")) { action = Stall; } else if (actionAsString.equals("MiscPenalty")) { action = MiscPenalty; } else if (actionAsString.equals("Callahan")) { action = Callahan; } Player passer = null; if (jsonObject.has(JSON_PASSER)) { passer = Team.getPlayerNamed(jsonObject.getString(JSON_PASSER)); } Player receiver = null; if (jsonObject.has(JSON_RECEIVER)) { receiver = Team.getPlayerNamed(jsonObject.getString(JSON_RECEIVER)); } OffenseEvent event = new OffenseEvent(action, passer, receiver); populateGeneralPropertiesFromJsonObject(event, jsonObject); return event; } public JSONObject toJsonObject() throws JSONException { ensureValid(); JSONObject jsonObject = super.toJsonObject(); String actionAsString = null; switch (getAction()) { case Catch: actionAsString = "Catch"; break; case Drop: actionAsString = "Drop"; break; case Goal: actionAsString = "Goal"; break; case Throwaway: actionAsString = "Throwaway"; break; case Stall: actionAsString = "Stall"; break; case MiscPenalty: actionAsString = "MiscPenalty"; break; case Callahan: actionAsString = "Callahan"; break; default: actionAsString = "Catch"; break; } jsonObject.put(JSON_ACTION, actionAsString); jsonObject.put(JSON_PASSER, passer.getName()); jsonObject.put(JSON_RECEIVER, receiver.getName()); return jsonObject; } }
Bioprinting of Organ-on-Chip Systems: A Literature Review from a Manufacturing Perspective : This review discusses the reported studies investigating the use of bioprinting to develop functional organ-on-chip systems from a manufacturing perspective. These organ-on-chip systems model the liver, kidney, heart, lung, gut, bone, vessel, and tumors to demonstrate the viability of bioprinted organ-on-chip systems for disease modeling and drug screening. In addition, the paper highlights the challenges involved in using bioprinting techniques for organ-on-chip system fabrications and suggests future research directions. Based on the reviewed studies, it is concluded that bioprinting can be applied for the automated and assembly-free fabrication of organ-on chip systems. These bioprinted organ-on-chip systems can help in the modeling of several different diseases and can thereby expedite drug discovery by providing an efficient platform for drug screening in the preclinical phase of drug development processes. Introduction The development of new drugs usually takes place in phases over an extended period of time and is an expensive process . A typical drug development process involves four phases before FDA review and approval . In the first phase, drugs are tested in vitro, i.e., the testing occurs using cells or biological materials outside of a living animal. If initial testing is successful, the drug is then investigated in vivo, i.e., using living animal models. However, approximately half of the drugs that pass the first phase fail in later phases . One cause for this high failure rate is that in vitro models are not always able to accurately represent interactions between the drug and the biological environment . Another cause is that standard in vivo animal models often misrepresent the human physiology. An organ-on-chip system is a microfabricated multichannel 3D microfluidic structure that emulates specific functions of human organs . Increased specificity of organon-chip systems is accomplished by using dynamic fluid flow to provide nutrition and oxygenation with tissue-specific environmental cues and molecular gradients . It is foreseen that use of such sophisticated organ-on-chip systems for modeling the activities, mechanics, and physiological responses of human tissues and organs will allow for inexpensive and faster testing of new therapeutic drugs compared with use of traditional in vitro and in vivo animal models . Micro-fabrication methods, including soft-lithography and photolithography, are traditionally used to fabricate organ-on-chip systems . In soft lithography, an elastomeric stamp with patterned relief structures on its surface is used to generate patterns and structures (also known as a mold) with feature sizes ranging from 30 nm to 100 µm. Polydimethylsiloxane (PDMS) is then poured into the mold to create a closed-circuit channel sealed with glass slide . In photolithography, a silicon wafer is covered by photoresist material, and ultraviolet (UV) light removes the photoresist material from some portions of the wafer surface. Then, some silicon is etched away from the portions of the wafer not covered with the photoresist material, to create a mold. PDMS is then poured into this mold, to create a closed-circuit channel sealed with a glass slide . However, these methods are expensive and time-consuming . In addition, these methods suffer from limited availability of compatible biological materials . Bioprinting involves the spatial patterning of living cells and other biologics by stacking them using a computer-aided layer-by-layer deposition approach to fabricate living tissue-like constructs . It has the ability to create channels that have features with complex design and is a one-step fabrication process. In addition, it has the potential to be fully automated, maintain accuracy, and be replicated with relative ease . In recent years, bioprinting has been used to produce organ-on-chip systems . Figure 1 shows the trend in the number of organ-on-chip publications involving bioprinting over the past five years. In recent years, several review papers have been published on the state-of-art of applying bioprinting to fabricate organ-on-chip systems. However, these reviews discuss the studies primarily from physiological and biological perspectives. and structures (also known as a mold) with feature sizes ranging from 30 nm to 100 µm. Polydimethylsiloxane (PDMS) is then poured into the mold to create a closed-circuit channel sealed with glass slide . In photolithography, a silicon wafer is covered by photoresist material, and ultraviolet (UV) light removes the photoresist material from some portions of the wafer surface. Then, some silicon is etched away from the portions of the wafer not covered with the photoresist material, to create a mold. PDMS is then poured into this mold, to create a closed-circuit channel sealed with a glass slide . However, these methods are expensive and time-consuming . In addition, these methods suffer from limited availability of compatible biological materials . Bioprinting involves the spatial patterning of living cells and other biologics by stacking them using a computer-aided layer-by-layer deposition approach to fabricate living tissue-like constructs . It has the ability to create channels that have features with complex design and is a one-step fabrication process. In addition, it has the potential to be fully automated, maintain accuracy, and be replicated with relative ease . In recent years, bioprinting has been used to produce organ-on-chip systems . Figure 1 shows the trend in the number of organ-on-chip publications involving bioprinting over the past five years. In recent years, several review papers have been published on the state-of-art of applying bioprinting to fabricate organ-on-chip systems. However, these reviews discuss the studies primarily from physiological and biological perspectives. This paper is the first review paper to provide a review on bioprinting used to fabricate organ-on-chip systems from the perspective of manufacturing. Section 2 introduces the bioprinting techniques and their working principles, as well as advantages and limitations for fabricating organ-on-chip systems. Section 3 discusses recent advances in bioprinted organ-on-chip systems, categorized on the basis of tissue type, e.g., liver, kidney, and heart. Section 4 briefly presents key requirements of bioinks, the material that is printed to fabricate constructs, and use of hydrogels as bioinks. Section 5 highlights the current challenges in using bioprinting to fabricate organ-on-chip systems and presents some directions for future research. In the final section, key insights are summarized. This paper is the first review paper to provide a review on bioprinting used to fabricate organ-on-chip systems from the perspective of manufacturing. Section 2 introduces the bioprinting techniques and their working principles, as well as advantages and limitations for fabricating organ-on-chip systems. Section 3 discusses recent advances in bioprinted organ-on-chip systems, categorized on the basis of tissue type, e.g., liver, kidney, and heart. Section 4 briefly presents key requirements of bioinks, the material that is printed to fabricate constructs, and use of hydrogels as bioinks. Section 5 highlights the current challenges in using bioprinting to fabricate organ-on-chip systems and presents some directions for future research. In the final section, key insights are summarized. Figure 2 shows the number of reported studies utilizing different bioprinting techniques for fabricating organ-on-chip systems. Out of twenty-two reviewed studies, sixteen used extrusion-based bioprinting, four used inkjet bioprinting, and only one used stereolithography bioprinting. There were no reported studies using laser-based bioprinting. Understanding the working principles of these techniques, along with their advantages and limitations in fabricating organ-on-chip systems, can provide guidance when researchers select bioprinting techniques for organ-on-chip systems. Figure 3 shows schematic illustrations of the three bioprinting techniques that have been used in organ-on-chip fabrication. Figure 2 shows the number of reported studies utilizing different bioprinting techniques for fabricating organ-on-chip systems. Out of twenty-two reviewed studies, sixteen used extrusion-based bioprinting, four used inkjet bioprinting, and only one used stereolithography bioprinting. There were no reported studies using laser-based bioprinting. Understanding the working principles of these techniques, along with their advantages and limitations in fabricating organ-on-chip systems, can provide guidance when researchers select bioprinting techniques for organ-on-chip systems. Figure 3 shows schematic illustrations of the three bioprinting techniques that have been used in organon-chip fabrication. Extrusion-Based Bioprinting The dispensing head can move along the X and Y axes in the horizontal plane, and up and down along the Z axis, as directed by the CAD (computer-aided design) model. Instructions from the CAD model are input (as a G-code file) to the robotic system, which consists of the bioprinter, and the computer hardware and software . In some printers, the printing platform moves up and down along the Z axis while the head can only move along the x-y planes. The fluid-dispensing system can be driven by pressure generated from a pneumatic-, mechanical-(piston or screw-driven), or solenoid-based system . In certain printers, the extrusion temperature is controlled by heating or cooling the thermal jacket that holds the syringe. Major controllable printing parameters include extrusion pressure (the pressure at which bioink is extruded), extrusion temperature (temperature at which bioink is extruded), and printing speed (the speed at which dispensing head moves in the x-y planes). The extrusion-based process is capable of printing a wide array of biomaterials, including composite bioinks that are comparable to natural tissue , and therefore is suitable for fabricating organ-on-chip systems that involve several different types of tissues and extracellular matrix components . In addition, the extrusion-based process usually has higher printing speed than other bioprinting processes . Furthermore, the relative simplicity of the technology enables ease-of-use for researchers across disciplines . However, the extrusion-based process has drawbacks such as limited resolution , noz- Extrusion-Based Bioprinting The dispensing head can move along the X and Y axes in the horizontal plane, and up and down along the Z axis, as directed by the CAD (computer-aided design) model. Instructions from the CAD model are input (as a G-code file) to the robotic system, which consists of the bioprinter, and the computer hardware and software . In some printers, the printing platform moves up and down along the Z axis while the head can only move along the x-y planes. The fluid-dispensing system can be driven by pressure generated from a pneumatic-, mechanical-(piston or screw-driven), or solenoid-based system . In certain printers, the extrusion temperature is controlled by heating or cooling the thermal jacket that holds the syringe. Major controllable printing parameters include extrusion pressure (the pressure at which bioink is extruded), extrusion temperature (temperature at which bioink is extruded), and printing speed (the speed at which dispensing head moves in the x-y planes). The extrusion-based process is capable of printing a wide array of biomaterials, including composite bioinks that are comparable to natural tissue , and therefore is suitable for fabricating organ-on-chip systems that involve several different types of tissues and extracellular matrix components . In addition, the extrusion-based process usually has higher printing speed than other bioprinting processes . Furthermore, the relative simplicity of the technology enables ease-of-use for researchers across disciplines . However, the extrusion-based process has drawbacks such as limited resolution , nozzle clogging , and lower cell viability . Extrusion-based bioprinting process parameters need to be optimized to balance shape fidelity and cell viability. Smaller needle diameters increase the shape fidelity of printed constructs but are harmful to cells due to increased shear stress . Increased extrusion pressure is required to print bioinks of high viscosity , but this also harms cells through increased shear stress . Different cell types have different sensitivities to shear stress , so the optimization of process parameters depends on the biomaterials used during printing. Inkjet Bioprinting In inkjet bioprinting, droplets of bioink are formed through vaporization and dispensed (through an extruder controlled by an actuator) onto a platform in a layer-by-layer fashion to fabricate a 3D construct . The actuator can have thermal or piezoelectric modality for actuation. The extruder can move in the x-y-z directions for fabrication as per the CAD design. However, in certain printers, the extruder is fixed while the printing platform moves in the x-y-z directions. In inkjet printing, the main printing parameter that can be controlled is droplet size, which is governed by the actuator's modality. Inkjet bioprinting offers a high resolution (~30 µm) which makes it suitable for fabricating organ-on-chip systems which are in sizes of hundred microns or less . In addition, constructs printed with inkjet bioprinting offer high cell viability . However, inkjet bioprinting is only suitable for bioinks with low viscosity (~0.1 Pa·s) , and the shape fidelity of vertical constructs are poorer with inkjet printing than other bioprinting techniques . Because of these factors, the application of inkjet printing is limited in fabricating organ-on-chip systems that contain tissue types and biomaterials of higher viscosity. Stereolithography In stereolithography, a UV laser selectively polymerizes photosensitive resin containing cells to form a solid layer, after which the build platform shifts down in the z direction by one layer, and the process is repeated until the construct is complete . The setup contains a projector array and digital micro-mirror device which can both be moved in the x-y plane. The photosensitive resin is composed of biocompatible, polymerizable oligomers and a biocompatible photoinitiator . The position and the intensity of the UV laser focus are the parameters that can be controlled in stereolithography. By controlling the positions of the laser focus, polymerization of the resin can be precisely controlled for selective polymerization. This achieves a high resolution, which is required in fabrication of organ-on-chip systems . Additionally, vertical constructs printed by stereolithography have good quality . However, stereolithography is time-intensive relative to other techniques. Another limitation is need for intense UV radiation, which can have a negative effect on cell viability . Bioink Used in Bioprinting of Organ-on-Chip Systems In the bioprinting process, from an organ-on-chip fabrication perspective, the bioink formulation needs to satisfy certain physical and biological requirements . The primary biological requirement is that the bioink needs to be biocompatible, which means that it should not be toxic to the cells and should not alter the functionality or physiology of the cells . The second biological requirement is biomimicry, i.e., bioink should mimic the extracellular matrix so that cells can proliferate . Physical properties that the bioink should include are shear-thinning behavior, defined as a non-linear increase in viscosity as stress is applied, and structural fidelity. Shear-thinning behavior is necessary for the bioink to be printable . Bioink also needs to have suitable mechanical properties so that the printed construct is stable . Unique properties of hydrogels make them ideal candidates as bioink constituents. Hydrogels are three dimensional molecules composed of hydrophilic chains which are formed by cross-linking of polymer chains in an aqueous medium through various mechanisms such as physical crosslinking, chemical crosslinking, and photo crosslinking . Crosslinking mechanisms have distinct advantages and disadvantages. Photo crosslinked constructs have high shape fidelity, but the UV light used during photo crosslinking can create free radicals which are harmful to cells . Ionic crosslinking, a common form of physical crosslinking, forms mechanically weaker constructs than chemical crosslinking, but promotes higher cell viability . Hydrogels have the ability to absorb water up to a thousand times their dry weight, which makes them suitable materials to act as extracellular matrix and support cell proliferation. The reviewed studies have used natural hydrogels like alginate, gelatin, cellulose, fibrin, and collagen, and synthetic hydrogels such as poly (ethylene glycol) (PEG), poly (ε-caprolactone) (PCL), pluronic, and gelatin methacryloyl (GelMA) as shown in Table 1. Organ-on-Chip Systems Fabricated Using Bioprinting This section outlines specific examples, as shown in Table 2, of the research efforts for modeling the liver, kidneys, heart, lungs, gut, bone, vessel, and tumors on microfluidic chips, and describes the fabrication processes and applications of these organ-on-chip systems. Table 3 summarizes the major results that were found for each category of organ-onchip system, based on collected literature. Liver-on-Chip Liver plays a critical role in drug metabolism and detoxification of blood. Druginduced hepatotoxicity is one of the main reasons for drug withdrawal in later phases of clinical trials, thus highlighting the need for liver-on-chip models that can be used to evaluate hepatoxicity during drug-screening . Bhise et al. developed a liver-on-chip platform by bioprinting hepatocyte spheroid-laden hydrogel constructs directly within the culture chamber of bioreactor . By using the developed liver-on-chip platform as a model to predict acetaminophen (a drug) toxicity, with results comparable to those obtained from in vivo models, they demonstrated that this bioprinted liver-on-chip system has applications for drug toxicity analysis. Snyder et al., printed epithelial-laden and hepatocyte-laden Matrigel on microfluidic chip to study radiation shielding of liver cells by the prodrug amifostine . This study demonstrated the application of a bioprinted liveron-chip system to obtain an understanding of multi-cellular biological systems. The current in vitro models typically lose their drug metabolism functions rapidly . Liver-on-chip systems capable of maintaining drug metabolism functions for a long period of time are required. Grix et al. performed a proof-of-concept study in which perfusable liver organoid was bioprinted using stereolithography technique . By verifying the stable protein expression of liver organoid using immunohistology and qPCR (quantitative polymerase chain reaction), they demonstrated that bioprinted liver organoid can be cultivated on a chip to produce a liver-on-chip system. This study demonstrated that a liver-on-chip model can be developed which maintains metabolic function for long time. To demonstrating that rapid liver-on-chip modeling is possible, Lee et al. used a novel bioprinting method involving a multi-head tissue organ building system to fabricate a liver-on-chip system in a single step . The bioprinted liver-on-chip system showed significantly enhanced liver function, indicating that the developed bioprinting method can be applied to fabricate organ-on-chip systems for mechanistic therapeutic studies and drug screening. Kidney-on-Chip Drug induced kidney toxicity is responsible for nearly one-fifth of drug failures in Phase III clinical trials even if the drug passes preclinical testing . Kidney-on-chip systems can act as in vitro kidney models that accurately predict the human drug response in preclinical testing. Homan et al. bioprinted convoluted renal proximal tubules on a microfluidic chip to develop a kidney-on-chip system . In this model, the tubule structure was created by extrusion-based bioprinting using pluronic ink. A silicon gasket was first printed onto a glass slide, which was then filled via extrusion printing with gelatin-fibrinogen bioink (mimicking kidney extracellular matrix). The pluronic was then liquefied by cooling and flushed out to create an open tubule for epithelial cell seeding. The printed kidney-on-a-chip system exhibited nephrotoxicity against cyclosporine A (immunosuppressive drug). Heart-on-Chip Although current in vitro models of the heart are suitable for short-term modelling of human cardiac conditions and small-scale drug screening, they are not well suited for higher-throughput drug studies and longer-term studies . Heart-on-chip systems can provide a stable platform for long-term drug screening studies. Zhang et al. created a heart-on-chip model by integrating bioprinting and microfluidics technology . Extrusion-based bioprinting was used to print bioink containing endothelial cells to form a microfibrous scaffold. After endothelial cells migrated to the outside of the fibers, the scaffold was seeded with cardiomyocytes. The resultant semi-self-assembled endothelialized myocardium scaffold accurately mimicked the in vivo vascularized structure of the myocardium and was integrated with a microfluidic chip to be used as a platform for cardiovascular drug screening. The research team observed dose dependent responses of cardiomyocytes and endothelial cells to an anti-cancer drug, demonstrating that such a heart-on-chip model can be successfully used for drug screening. Lind et al. used multimaterial extrusion-based bioprinting to fabricate a heart-on-chip system in a single step . Dextran ink was used to print a film on a glass slide substrate, on which thermoplastic polyurethane (TPU) ink was used to print a cantilever base. A thermoplastic polyurethane (CB:TPU) ink strain gauge was printed on top and covered by TPU ink wire cover. PDMS ink filaments were printed on the top part of this cantilever, followed by the addition of high conductivity silver particle filled polyamide ink (ink filled with high conductivity silver particles) which was insulated with PDMS ink. The printed microfluidic device had eight wells which acted as cell incubators and were seeded with cardiomyocytes. This heart-on-chip system was employed to study the effect of drugs (verapamil and isoproterenol) on the beating of cardiac microtissues. The beating frequency and strength were observed and recorded directly with this heart-on-chip system, demonstrating the application of this novel heart-on-chip device for toxicology and drug-screening research. Lung-on-Chip Human lungs are exposed to environmental agents which can lead to respiratory diseases . They are sites of disorders such as asthma and chronic obstructive lung diseases . Lung-on-chip systems can be used as a platform for development of drugs for treatment of these diseases and disorders. Park et al. bioprinted a vascularized lung-onchip system in a single step . PCL bioink was printed to create a frame of channels and chambers on printed PDMS substrate, which was populated with endothelial cell bioink and lung fibroblast cell bioink via bioprinting, followed by printing PDMS to fix the frame. The lung-on-chip model was successfully used to recapitulate inflammatory response. Gut-on-Chip Gut is a vital organ with a complex architecture involving multiple diverse cell and tissue types . Gut-on-chip systems can help in learning about different gastric and intestinal cells and can contain gut microbiota to mimic a dynamic gut environment . In order to better mimic one aspect of the complex architecture of gut, Kim et al. used bioprinting to fabricate a 3D intestinal villi model with capillaries . In this study, bioink containing epithelial cells was printed as a layer of core region, and bioink containing adenocarcinoma cells was printed as a layer of shell region to form a mesh structure on which villus structure was printed vertically. The successful cellular proliferation of both types of cells signified the establishment of cell-cell interactions. Bone-on-Chip Osteoporosis is a disease caused by impaired bone turnover, which increases the risk of bone fracture in men and women over the age of 50. The drugs currently used to treat osteoporosis pose problems like elevated risk of cancer, stroke, and blood clots with longterm use . Due to the prevalence of osteoporosis, a drug development platform that enables the analysis of the long-term physiological response of drugs is needed. Emerging bone-on-chip systems can help in expediting the process of drug development . Lee et al. used inkjet bioprinting to print micropatterns containing the antibiotic rifampicin and biphasic calcium phosphate nanoparticles dispersed in a poly(lactic-co-glycolic acid) (PLGA) matrix on a glass slide, which was integrated with microfluidic chip . The microfluidic chip was seeded with osteoblast (bone) cells. The study concluded that inkjetprinted micropatterns promoted osteogenic development by osteoblasts and prevented bacterial infection. Vessel-on-Chip Proper nutrient and oxygen supply is crucial for ensuring long-term cell viability in complex multi-tissue models, which makes developing vascular network crucial in organon-chip systems . Lee et al. used inkjet bioprinting to create a vascular channel on a flow chamber . In order to fabricate vasculature, gelatin bioink containing endothelial cells was printed on layers of collagen matrix in a straight channel form, and collagen was printed over the gelatin pattern. The endothelial cells attached to the inner surface of the channel during incubation and subsequently the gelatin in chamber was flushed after being liquefied to create a channel. It was noted that cells proliferated, and successful gene expression analysis revealed that this method can be used to create vessels in organ-on-chip systems. In a similar study, Abudupataer et al. used extrusion-based bioprinting to fabricate a vessel-on-chip system . Two layers of bioink containing endothelial cells and muscle cells were printed on chip (fabricated with PDMA), and after the cells proliferated, a continuous flow of growth medium was perfused in the channel of the chip to mimic blood flow in the vessel. Such a vessel-on-chip model can be used to study the pathogenesis of disease and drug screening. In another study, Kolesky et al. used extrusion-based bioprinting to print a vascular system using a sacrificial bioink, composed of Pluronic and thrombin . The sacrificial bioink was printed in a crosshatched pattern with a thickness of 1 cm. Bioink containing endothelial cells, muscle cells and dermal fibroblast cells was printed around the sacrificial bioink. Removal of the fugitive ink resulted in the formation of a connected network of vessels that supported endothelialization and retained cell viability up to 95% post printing, demonstrating that such a vascular system can potentially be used to construct vessels in organ-on-chip models. In an analogous study, Schöneberg et al. used inkjet bioprinting to print a multi-layer vasculature imitating in vivo blood vessels . On a custom-made bioreactor, a layer of fibrin bioink with muscle cells and fibrinogen-collagen with crosslinker thrombin was printed on a sacrificial gelatin core containing epithelial cells, which was flushed out to create open channels. High cell viability after printing was noted along with protein expression, indicating that the system has necessary biological functionality to potentially be used in vessel-on-chip systems as a platform for pre-screening of drugs. In yet another study, Gao et al. used extrusion-based bioprinting to print a vascular structure with multilevel fluidic channels (macro-channel and micro-channel) which can potentially be integrated into organ-on-chip systems to better simulate the micro-environment of blood vessels . Zhang et al. used extrusion-based bioprinting to fabricate a thrombosis-on-chip system . Thrombosis is the formation of a clot in a blood vessel. In this study, a scaffold was printed with luronic, dehydrated scaffold was placed on PDMS mold filled with GelMA, and the scaffold was crosslinked followed by dissolution of sacrificial channels to produce construct with hollow channels. After the incubation of seeded endothelial cells in hollow channels, a solution of blood with added calcium chloride (to induce clotting) was injected to form a clot. The clot maturation and subsequent dissolution of clot on being treated with tPA (anti-clotting drug) demonstrated that such a thrombosis-on-chip system can be used for thrombosis drug screening. Tumor-on-Chip Cancer is an umbrella term for a variety of diseases having the same underlying cause of unregulated division of cells, which can be a cause of morbidity and mortality. Cancer treatment is especially challenging due to different tumor characteristics (mass of tissue caused by unregulated division of cells) in different patients. Tumor-on-chip systems tackle this heterogeneity by enabling the development of patient-specific anti-cancer drugs targeted to treat the patient-specific tumor. Yi et al. used extrusion-based bioprinting to fabricate a glioblastoma-on-chip system . Glioblastoma (GBM) is an aggressive type of cancer that occurs in the brain or spinal cord. This tumor-on-chip system was fabricated by printing its chamber structures using silicon ink on a glass substrate, inside which bdECM (brain decellularized ECM) bioink containing epithelial cells was printed to construct the GBM-mimetic ring structure filled by printing bdECM glioblastoma cells, and the chamber was covered with a glass slip. The developed glioblastoma-on-chip system resisted concurrent treatment of chemoradiation and temozolomide (anti-cancer drug) in the same way as observed in cancer patients, demonstrating that such a tumor-on-chip system can be used to determine drug combinations for cancer treatment. Hamid et al. used extrusion based bioprinting in combination with photolithography to fabricate a breast tumor-on-chip model . Photolithography was used to fabricate the base of the chip using PDMS. SU-8 (epoxy-based photoresist material) was used to create internal micro-architecture of channels on the chip, followed by printing of human breast adenocarcinoma cells in the micro-channels. Three chips with 300, 500, and 700 µm pore sizes of channels were printed for purpose of comparative evaluation. Successful cell proliferation and metabolization of drug by cells demonstrated that such a breast tumor-on-chip can be used for investigating the efficacy of drugs. In another study, the same research team used extrusion based bioprinting along with maskless lithography, a type of photolithography that does not use a static mask, to fabricate a co-culture tumors-on-chip system . Maskless lithography was used to fabricate the base of the chip using PDMS, on which SU-8 was printed and channels were patterned using an ultraviolet light emitting head. The micro-channels were populated with liver cancer and breast cancer cells. Mi et al. used inkjet bioprinting to fabricate a breast tumor-on-chip system . Breast cancer cells and endothelial cells were printed on a PDMS chip (fabricated with soft-lithography). Post printing, the cells showed good cell viability and cell quality. A significant inhibition of tumor cell migration ability was observed when treated with paclitaxel (anti-cancer drug), demonstrating the effectiveness of such a tumor-on-chip system in aiding cancer research and for anti-cancer drug screening. Successful cell proliferation and cell integration in co-culture showed that such a co-culture tumors-on-chip system is viable for biological characterization. Cheng et al. used extrusion-based bioprinting to print paper-based cancer tissue models which have the potential for application in the cost-effective fabrication of organon-chip models . In this study, sacrificial petroleum jelly-liquid paraffin ink was used to print on bacterial cellulose hydrogel, the entire matrix was air-dried to form a paper-like membrane, and perfusable microchannels were obtained by removing the sacrificial ink using heat. The epithelial cells were seeded into the microchannels and cancer cells were seeded onto the surface of the paper-based device. It was observed that endothelial cells and tumor cells spread and proliferated, and cytotoxicity of cancer cells was observed on treatment with tamoxifen (anti-cancer drug), demonstrating that such paper-based models can possibly be used for producing tumor-on-chip systems for drug screening. The available invitro anti-tumor drug screening strategies including 2D cell models are not able to mimic biological systems sufficiently since they lack true perfusion and draining microcirculation systems. Although current organ-on-chip systems are able to integrate perfusable blood vessels, only a few in vitro models are able to re-establish both blood and lymphatic vessel pair. Cao et al. fabricated a tumor-on-chip model by bioprinting a hollow blood vessel and lymphatic vessel pair which reproduced the microcirculation featuring both delivery and drainage routes to better mimic the transport kinetics of biomolecules and drugs . A custom-made coaxial nozzle containing three injection channels was connected to an extrusion-based bioprinter and used to print the vessels. The bioink consisted of alginate, GelMA, photoinitiator, and PEGDA. PEGOA was extruded through the middle layer of the coaxial nozzle, while the CaCl2 solution was ejected through both the inner and outer layers to immediately crosslink the alginate and obtain tubular structures. Then GelMA, PEGDA, and PEGOA components were photo crosslinked by UV light, followed by immersion in ETA (Eicosatetraenoic acid) solution to remove the sacrificial alginate. The bioprinted vessel pair was embedded in a GelMA matric inoculated with MCF-7 breast cancer cells to examine drug transport rate by studying the diffusion of FITC (staining agent for microscopy) through the system and to investigate the effect of Doxorubicin (a chemotherapy anti-cancer drug) perfusion on the MCF-7 cancer cells. It was observed that the permeability parameters of the bioprinted blood and lymphatic vessels in such a bioprinted tumor-on-chip system could be controlled by precisely tuning the composition of the bioink. The tumor-on-chip could meet different biological needs for delivery and drainage channels under various scenarios and offer a convenient method for in vitro drug screening. Challenges and Future Directions There are several challenges associated with use of bioprinting technology for organon-chip system fabrication. The resolution that can be achieved with available 3D bioprinters is not as good as that achievable by traditional organ-on-chip fabrication techniques like soft-lithography. Although there are a few bioprinting systems that can achieve micron-level resolution , their high cost makes them unattractive options compared to traditional soft-lithography and photo-lithography systems to achieve same level of resolution. Bioprinting systems have a relatively low throughput for manufacturing organon-chip systems in large scale production settings. There are certain stereolithography printing systems that can achieve reasonable throughput suitable for low and medium volume production , but they cannot compete with other techniques like injection molding when it comes to large scale manufacturing of organ-on-chip systems. The properties of bioink materials for fabricating organ-on-chip systems still need improvement. The soft-lithography has been used widely for fabricating organ-on-chip systems due to the optimal properties of PDMS material, including transparency, biocompatibility, flexibility, gas-permeability, relatively low-cost, and high shape fidelity . It is challenging to find a biomaterial with all these properties. The vasculature fabricated using bioprinting has a limited resolution. With soft-lithography, the patterning of micro-channels with resolution of 5 microns is standard while sub-micron vasculature fabrication is also possible. However, the highest resolution of microchannel printed with stereolithography is 100 microns . Organ-on-chip systems fabricated with bioprinting have potential for commercial applications if sophisticated bioprinting techniques are developed which are capable of bioprinting organ-on-chip systems in a scalable, accurate, and high-throughput manner. The combination of bioprinting techniques can enable creation of bioprinting process that overcomes the limitations of individual bioprinting techniques . This viable strategy needs further systematic investigations. Another area that needs further investigation is the integration of biosensors in the organ-on-chip systems fabricated with bioprinting . This integration can enhance the functionality of organ-on-chip systems to allow the monitoring of cell behavior in dynamic and controlled environments, and to monitor cell behavior and microenvironment. Drug-screening can potentially become much more accurate if the complex organ systems of the human body, containing multiple organs, can be accurately reflected on organ-on-chip systems. There is a need to investigate multimaterial bioprinting from the perspective of printing multiple tissues in the same organ-on-chip systems that can enable printing of such organ-on-chip systems. Currently, most of the reported studies either focus on demonstrating the application of developing novel bioprinting strategies for fabricating functional organ-on-chip systems or provide proof-ofconcept applications of using organ-on-chip systems fabricated with bioprinting for drug screening . There are very few studies that have investigated optimization strategies for bioprinting process parameters in a systematic way. For the successful application of bioprinting in fabricating organ-on-chip systems, standardization and optimization of the printing process is necessary. Therefore, there is a need to investigate the relationship between different process parameters and functions in the bioprinting process of printed organ-on-chip systems. Conclusions The use of bioprinting for fabricating organ-on-chip systems has the potential to expedite the drug screening process while minimizing the cost investment in drug development. While research regarding the use of bioprinting techniques to fabricate organ-on-chip systems is still in the early stages, the reviewed studies demonstrate that bioprinted organon-chip systems are able to functionally mimic in vivo environments and provide nearly accurate drug responses comparable to animal and in vitro models. In addition, the reviewed studies show that using bioprinting for fabricating organ-on-chip systems possesses advantages over traditional techniques of fabricating organ-on-chip systems, including soft-lithography and photolithography, in terms of automation, cost, time-consumption, and design modifications. However, from the manufacturing perspective, for bioprinting to become the norm in fabricating organ-on-chip systems for the purpose of drug screening, investigations need to be conducted regarding the improvement of resolution, vasculature fabrication, process optimization, and process standardization. Nevertheless, with further advancements in bioprinting processes, it is expected that bioprinted organ-on-a-chip systems will be used broadly in disease modeling and drug development.
<gh_stars>100-1000 use super::{get_lit_str, get_meta_items, parse_lit_into_path, parse_lit_str}; use proc_macro2::TokenStream; use serde_derive_internals::Ctxt; use syn::{Expr, ExprLit, ExprPath, Lit, Meta, MetaNameValue, NestedMeta, Path}; pub(crate) static VALIDATION_KEYWORDS: &[&str] = &[ "range", "regex", "contains", "email", "phone", "url", "length", "required", ]; #[derive(Debug, Clone, Copy, PartialEq)] enum Format { Email, Uri, Phone, } impl Format { fn attr_str(self) -> &'static str { match self { Format::Email => "email", Format::Uri => "url", Format::Phone => "phone", } } fn schema_str(self) -> &'static str { match self { Format::Email => "email", Format::Uri => "uri", Format::Phone => "phone", } } } #[derive(Debug, Default)] pub struct ValidationAttrs { length_min: Option<Expr>, length_max: Option<Expr>, length_equal: Option<Expr>, range_min: Option<Expr>, range_max: Option<Expr>, regex: Option<Expr>, contains: Option<String>, required: bool, format: Option<Format>, } impl ValidationAttrs { pub fn new(attrs: &[syn::Attribute], errors: &Ctxt) -> Self { ValidationAttrs::default() .populate(attrs, "schemars", false, errors) .populate(attrs, "validate", true, errors) } pub fn required(&self) -> bool { self.required } fn populate( mut self, attrs: &[syn::Attribute], attr_type: &'static str, ignore_errors: bool, errors: &Ctxt, ) -> Self { let duplicate_error = |path: &Path| { if !ignore_errors { let msg = format!( "duplicate schemars attribute `{}`", path.get_ident().unwrap() ); errors.error_spanned_by(path, msg) } }; let mutual_exclusive_error = |path: &Path, other: &str| { if !ignore_errors { let msg = format!( "schemars attribute cannot contain both `{}` and `{}`", path.get_ident().unwrap(), other, ); errors.error_spanned_by(path, msg) } }; let duplicate_format_error = |existing: Format, new: Format, path: &syn::Path| { if !ignore_errors { let msg = if existing == new { format!("duplicate schemars attribute `{}`", existing.attr_str()) } else { format!( "schemars attribute cannot contain both `{}` and `{}`", existing.attr_str(), new.attr_str(), ) }; errors.error_spanned_by(path, msg) } }; for meta_item in attrs .iter() .flat_map(|attr| get_meta_items(attr, attr_type, errors, ignore_errors)) .flatten() { match &meta_item { NestedMeta::Meta(Meta::List(meta_list)) if meta_list.path.is_ident("length") => { for nested in meta_list.nested.iter() { match nested { NestedMeta::Meta(Meta::NameValue(nv)) if nv.path.is_ident("min") => { if self.length_min.is_some() { duplicate_error(&nv.path) } else if self.length_equal.is_some() { mutual_exclusive_error(&nv.path, "equal") } else { self.length_min = str_or_num_to_expr(&errors, "min", &nv.lit); } } NestedMeta::Meta(Meta::NameValue(nv)) if nv.path.is_ident("max") => { if self.length_max.is_some() { duplicate_error(&nv.path) } else if self.length_equal.is_some() { mutual_exclusive_error(&nv.path, "equal") } else { self.length_max = str_or_num_to_expr(&errors, "max", &nv.lit); } } NestedMeta::Meta(Meta::NameValue(nv)) if nv.path.is_ident("equal") => { if self.length_equal.is_some() { duplicate_error(&nv.path) } else if self.length_min.is_some() { mutual_exclusive_error(&nv.path, "min") } else if self.length_max.is_some() { mutual_exclusive_error(&nv.path, "max") } else { self.length_equal = str_or_num_to_expr(&errors, "equal", &nv.lit); } } meta => { if !ignore_errors { errors.error_spanned_by( meta, format!("unknown item in schemars length attribute"), ); } } } } } NestedMeta::Meta(Meta::List(meta_list)) if meta_list.path.is_ident("range") => { for nested in meta_list.nested.iter() { match nested { NestedMeta::Meta(Meta::NameValue(nv)) if nv.path.is_ident("min") => { if self.range_min.is_some() { duplicate_error(&nv.path) } else { self.range_min = str_or_num_to_expr(&errors, "min", &nv.lit); } } NestedMeta::Meta(Meta::NameValue(nv)) if nv.path.is_ident("max") => { if self.range_max.is_some() { duplicate_error(&nv.path) } else { self.range_max = str_or_num_to_expr(&errors, "max", &nv.lit); } } meta => { if !ignore_errors { errors.error_spanned_by( meta, format!("unknown item in schemars range attribute"), ); } } } } } NestedMeta::Meta(Meta::Path(m)) if m.is_ident("required") || m.is_ident("required_nested") => { self.required = true; } NestedMeta::Meta(Meta::Path(p)) if p.is_ident(Format::Email.attr_str()) => { match self.format { Some(f) => duplicate_format_error(f, Format::Email, p), None => self.format = Some(Format::Email), } } NestedMeta::Meta(Meta::Path(p)) if p.is_ident(Format::Uri.attr_str()) => { match self.format { Some(f) => duplicate_format_error(f, Format::Uri, p), None => self.format = Some(Format::Uri), } } NestedMeta::Meta(Meta::Path(p)) if p.is_ident(Format::Phone.attr_str()) => { match self.format { Some(f) => duplicate_format_error(f, Format::Phone, p), None => self.format = Some(Format::Phone), } } NestedMeta::Meta(Meta::NameValue(nv)) if nv.path.is_ident("regex") => { match (&self.regex, &self.contains) { (Some(_), _) => duplicate_error(&nv.path), (None, Some(_)) => mutual_exclusive_error(&nv.path, "contains"), (None, None) => { self.regex = parse_lit_into_expr_path(errors, attr_type, "regex", &nv.lit).ok() } } } NestedMeta::Meta(Meta::List(meta_list)) if meta_list.path.is_ident("regex") => { match (&self.regex, &self.contains) { (Some(_), _) => duplicate_error(&meta_list.path), (None, Some(_)) => mutual_exclusive_error(&meta_list.path, "contains"), (None, None) => { for x in meta_list.nested.iter() { match x { NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) if path.is_ident("path") => { self.regex = parse_lit_into_expr_path(errors, attr_type, "path", lit) .ok() } NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) if path.is_ident("pattern") => { self.regex = get_lit_str(errors, attr_type, "pattern", lit) .ok() .map(|litstr| { Expr::Lit(syn::ExprLit { attrs: Vec::new(), lit: Lit::Str(litstr.clone()), }) }) } meta => { if !ignore_errors { errors.error_spanned_by( meta, format!("unknown item in schemars regex attribute"), ); } } } } } } } NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) if path.is_ident("contains") => { match (&self.contains, &self.regex) { (Some(_), _) => duplicate_error(&path), (None, Some(_)) => mutual_exclusive_error(&path, "regex"), (None, None) => { self.contains = get_lit_str(errors, attr_type, "contains", lit) .map(|litstr| litstr.value()) .ok() } } } NestedMeta::Meta(Meta::List(meta_list)) if meta_list.path.is_ident("contains") => { match (&self.contains, &self.regex) { (Some(_), _) => duplicate_error(&meta_list.path), (None, Some(_)) => mutual_exclusive_error(&meta_list.path, "regex"), (None, None) => { for x in meta_list.nested.iter() { match x { NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit, .. })) if path.is_ident("pattern") => { self.contains = get_lit_str(errors, attr_type, "contains", lit) .ok() .map(|litstr| litstr.value()) } meta => { if !ignore_errors { errors.error_spanned_by( meta, format!( "unknown item in schemars contains attribute" ), ); } } } } } } } _ => {} } } self } pub fn apply_to_schema(&self, schema_expr: &mut TokenStream) { let mut array_validation = Vec::new(); let mut number_validation = Vec::new(); let mut object_validation = Vec::new(); let mut string_validation = Vec::new(); if let Some(length_min) = self .length_min .as_ref() .or_else(|| self.length_equal.as_ref()) { string_validation.push(quote! { validation.min_length = Some(#length_min as u32); }); array_validation.push(quote! { validation.min_items = Some(#length_min as u32); }); } if let Some(length_max) = self .length_max .as_ref() .or_else(|| self.length_equal.as_ref()) { string_validation.push(quote! { validation.max_length = Some(#length_max as u32); }); array_validation.push(quote! { validation.max_items = Some(#length_max as u32); }); } if let Some(range_min) = &self.range_min { number_validation.push(quote! { validation.minimum = Some(#range_min as f64); }); } if let Some(range_max) = &self.range_max { number_validation.push(quote! { validation.maximum = Some(#range_max as f64); }); } if let Some(regex) = &self.regex { string_validation.push(quote! { validation.pattern = Some(#regex.to_string()); }); } if let Some(contains) = &self.contains { object_validation.push(quote! { validation.required.insert(#contains.to_string()); }); if self.regex.is_none() { let pattern = crate::regex_syntax::escape(contains); string_validation.push(quote! { validation.pattern = Some(#pattern.to_string()); }); } } let format = self.format.as_ref().map(|f| { let f = f.schema_str(); quote! { schema_object.format = Some(#f.to_string()); } }); let array_validation = wrap_array_validation(array_validation); let number_validation = wrap_number_validation(number_validation); let object_validation = wrap_object_validation(object_validation); let string_validation = wrap_string_validation(string_validation); if array_validation.is_some() || number_validation.is_some() || object_validation.is_some() || string_validation.is_some() || format.is_some() { *schema_expr = quote! { { let mut schema = #schema_expr; if let schemars::schema::Schema::Object(schema_object) = &mut schema { #array_validation #number_validation #object_validation #string_validation #format } schema } } } } } fn parse_lit_into_expr_path( cx: &Ctxt, attr_type: &'static str, meta_item_name: &'static str, lit: &syn::Lit, ) -> Result<Expr, ()> { parse_lit_into_path(cx, attr_type, meta_item_name, lit).map(|path| { Expr::Path(ExprPath { attrs: Vec::new(), qself: None, path, }) }) } fn wrap_array_validation(v: Vec<TokenStream>) -> Option<TokenStream> { if v.is_empty() { None } else { Some(quote! { if schema_object.has_type(schemars::schema::InstanceType::Array) { let validation = schema_object.array(); #(#v)* } }) } } fn wrap_number_validation(v: Vec<TokenStream>) -> Option<TokenStream> { if v.is_empty() { None } else { Some(quote! { if schema_object.has_type(schemars::schema::InstanceType::Integer) || schema_object.has_type(schemars::schema::InstanceType::Number) { let validation = schema_object.number(); #(#v)* } }) } } fn wrap_object_validation(v: Vec<TokenStream>) -> Option<TokenStream> { if v.is_empty() { None } else { Some(quote! { if schema_object.has_type(schemars::schema::InstanceType::Object) { let validation = schema_object.object(); #(#v)* } }) } } fn wrap_string_validation(v: Vec<TokenStream>) -> Option<TokenStream> { if v.is_empty() { None } else { Some(quote! { if schema_object.has_type(schemars::schema::InstanceType::String) { let validation = schema_object.string(); #(#v)* } }) } } fn str_or_num_to_expr(cx: &Ctxt, meta_item_name: &str, lit: &Lit) -> Option<Expr> { match lit { Lit::Str(s) => parse_lit_str::<ExprPath>(s).ok().map(Expr::Path), Lit::Int(_) | Lit::Float(_) => Some(Expr::Lit(ExprLit { attrs: Vec::new(), lit: lit.clone(), })), _ => { cx.error_spanned_by( lit, format!( "expected `{}` to be a string or number literal", meta_item_name ), ); None } } }
<reponame>syukri21/Paperid-Golang-Testcase package migration import ( "github.com/jinzhu/gorm" "github.com/sirupsen/logrus" "github.com/syukri21/Paperid-Golang-Testcase/src/database/entity" ) // AutoMigration is auto migrate database func AutoMigration(conn *gorm.DB) { conn.AutoMigrate(entity.User{}) if conn.HasTable(&entity.User{}) { conn.AutoMigrate(entity.Profile{}) conn.Model(&entity.Profile{}).AddForeignKey("user_id", "users(id)", "CASCADE", "CASCADE") conn.AutoMigrate(entity.FinanceAccount{}) conn.Model(&entity.FinanceAccount{}).AddForeignKey("user_id", "users(id)", "CASCADE", "CASCADE") if conn.HasTable(&entity.FinanceAccount{}) { conn.AutoMigrate(entity.FinanceTransaction{}) conn.Model(&entity.FinanceTransaction{}).AddForeignKey("user_id", "users(id)", "CASCADE", "CASCADE") conn.Model(&entity.FinanceTransaction{}).AddForeignKey("finance_account_id", "finance_accounts(id)", "CASCADE", "CASCADE") } } conn.AutoMigrate(entity.FinanceAccountType{}) if (conn.HasTable(&entity.FinanceAccount{})) { conn.Model(&entity.FinanceAccount{}).AddForeignKey("type_id", "finance_account_types(id)", "CASCADE", "CASCADE") } logrus.Info("Success running migration") }
/** VBSearched - check to see if virtual base hase already been searched * * flag = VBSearched (type) * * Entry type = type index of virtual base * * Exit none * * Returns TRUE if virtual base has already been searched * FALSE if virtual base has not been searched */ bool_t VBSearched ( CV_typ_t type ) { ulong i; for (i = 0; i < pVBSearch->CurIndex; i++) { if (pVBSearch->dom[i] == type) { return (TRUE); } } return (FALSE); }
Lower motor unit discharge rates in gastrocnemius lateralis, but not in gastrocnemius medialis or soleus, in runners with Achilles tendinopathy: a pilot study Objectives Deficits in muscle performance could be a consequence of a reduced ability of a motor neuron to increase the rate in which it discharges. This study aimed to investigate motor unit (MU) discharge properties of each triceps surae muscle (TS) and TS torque steadiness during submaximal intensities in runners with Achilles tendinopathy (AT). Methods We recruited runners with (n = 12) and without (n = 13) mid-portion AT. MU discharge rate was analysed for each of the TS muscles, using high-density surface electromyography during 10 and 20% isometric plantar flexor contractions. Results MU mean discharge rate was lower in the gastrocnemius lateralis (GL) in AT compared to controls. In AT, GL MU mean discharge rate did not increase as torque increased from 10% peak torque, 8.24 pps (95% CI 7.08 to 9.41) to 20%, 8.52 pps (7.41 to 9.63, p = 0.540); however, in controls, MU discharge rate increased as torque increased from 10%, 8.39 pps (7.25–9.53) to 20%, 10.07 pps (8.89–11.25, p < 0.001). There were no between-group difference in gastrocnemius medialis (GM) or soleus (SOL) MU discharge rates. We found no between-group differences in coefficient of variation of MU discharge rate in any of the TS muscles nor in TS torque steadiness. Conclusion Our data demonstrate that runners with AT may have a lower neural drive to GL, failing to increase MU discharge rate to adjust for the increase in torque demand. Further research is needed to understand how interventions focussing on increasing neural drive to GL would affect muscle function in runners with AT. Supplementary Information The online version contains supplementary material available at 10.1007/s00421-022-05089-w. Introduction Achilles tendinopathy (AT) is the most prevalent running injuries, accounting for about 6.2-9.5% of all running injuries (Lagas et al. 2020;Mousavi et al. 2019). AT is an overloading injury and although its aetiology is multifactorial (Cook et al. 2016), deficits in muscle performance is suggested to be a key factor (O'Neill et al. 2019;Mahieu et al. 2006), which seems to be maintained long after symptomatic recovery (Silbernagel et al. 2007). Several lines of evidence suggest that neural changes to the triceps surae might underpin some of these chronic motor deficits (Crouzier et al. 2020;Fernandes et al. 2021). In particular, it has been shown that individuals with AT have: (1) lower contribution of gastrocnemius lateralis (GL) to produce plantar flexor force Crouzier et al. 2020 and (b) greater levels of intra-cortical inhibition associated with lower plantar flexor endurance during single leg heel raise test (Fernandes et al. 2021) when compared to controls. Collectively, these findings suggest Communicated by Olivier Seynnes. that changes in how the central nervous system control muscles coordination (Hug and Tucker 2017) within the triceps surae (force distribution and activation) might impact load distribution to the tendon in individuals with AT. This is of particular importance because altered triceps surae coordination (due to lower individual muscle contribution to muscle force) could create uneven loading of the Achilles tendon and contribute to tendinopathy (Cook and Purdam 2009). There has been some speculation about differences in recruitment strategies within the triceps surae in people with AT, with conflicting evidence about which muscle is affected. One study (O'Neill et al. 2019) suggested soleus (SOL) would be the main muscle responsible for the strength and endurance deficits observed in these individuals. The AT group had deficits in plantar flexor torque during dynamometry testing, irrespectively of knee position (knee flexed/ extended) compared to controls. The authors reasoned that if the gastrocnemii were affected, deficits between groups would be larger during knee extended and smaller during knee flexed. However, determining force deficits in SOL in relation to the gastrocnemii solely by comparing torque measures between flexed and extended knee positions provides very limited and possibly inaccurate information about muscle recruitment patterns, neglecting all the neurophysiological mechanisms that enable force production in the first place (Enoka and Duchateau 2017). Conversely, runners with acute AT (Crouzier et al. 2020) (< 3 months) have about 22% lower contribution of GL during 20 and 40% of peak plantar flexor isometric torque but no differences in gastrocnemius medialis (GM) or SOL, compared to controls. Force-sharing contribution of individual muscles of the triceps surae was estimated for each muscle based on the root mean squared (RMS) of surface EMG (electromyography) signal amplitude and other muscle characteristics (i.e. physiological cross-sectional area). Even though data from surface EMG signal is somewhat limited in estimating changes in neural drive to a specific muscle , this result suggests that individual muscles recruitment strategies might be altered in AT. From a neurophysiological perspective, the force exerted by a muscle depends, partly, on the recruitment and discharge rates of the motor units (Enoka and Duchateau 2017). Thus, deficits in motor performance could be a result of a reduced ability to recruit motor units and/or to increase the rate at which motor neurons' discharge (Enoka and Duchateau 2017). The analysis of individual motor unit discharge rates from each muscle of the triceps surae (Hug et al. 2020) is a more reliable way of investigating the central nervous system strategy of recruitment of the triceps surae muscle (i.e. neural drive), than the typical and limited interference EMG (Souza et al. 2018). This method has been also used in other studies to estimate changes in neural drive to specific muscles in individuals with other chronic musculoskeletal conditions such as ACL injury (Nuccio et al. 2021) and patellofemoral pain . Furthermore, reduced control of the plantar flexors could create tendon overload, progressing to early stages of tendinopathy (Cook and Purdam 2009). Increased fluctuation in torque (torque steadiness) is associated with painful musculoskeletal conditions such as knee osteoarthritis or patellofemoral pain Rice et al. 2015) or following ACL reconstruction (Telianidis et al. 2014) and could occur as consequence of greater variation in motor unit discharge rate (Enoka and Farina 2021). Coefficient of variation of motor unit discharge represents, at an individual muscle level, the ability to effectively control muscle torque and it is an important measure that can help explain motor performance (Enoka and Duchateau 2017). Thus, this study aimed to: (i) investigate differences in neural drive to each muscle of the triceps surae during submaximal plantar flexor contractions in individuals with AT; (ii) determine between-group differences in coefficient of variation of motor unit discharge rate and torque steadiness. We hypothesised that motor unit mean discharge rate of each individual muscles of the triceps surae would be lower in the tendinopathy group, associated with differences between muscles of the triceps surae in the AT group. We also hypothesised the AT group would have increased variability in motor unit discharge rate and torque steadiness compared to controls. Methods This was a cross-sectional study comparing runners with and without mid-portion AT. A sample size of 18 participants (9 per group) was calculated based on a similar study (Gallina et al. 2018) (GPower software parameters: effect size F = 0.40; α err prob: 0.05; power 0.95; n = 9 per group). 25 endurance runners were recruited for this study, 12 with mid-portion AT (7 males, 44.3 years old ± 95% CI 6.7, 173 cm ± 5.7, 76.2 kg ± 9.3) and 13 healthy controls (7 males, 34.0 years old ± 4.2, 171 cm ± 5.6, 64.8 kg ± 7.0), with a running routine of more than twice weekly for more than 4 months. Runners were recruited from local running clubs, via email and social media. Participants from this study are the same from a separate study (one more participant in the AT group) (Fernandes et al. 2021). Torque measures such as absolute and normalised plantar flexor peak isometric torque and explosive torque were not different between the groups, as have been reported previously (Fernandes et al. 2021). All volunteers were endurance runners, recruited from local running clubs around Southeast Queensland, Australia. Diagnosis of mid-portion AT was confirmed by an experienced physiotherapist during examination if patients presented with localised mid-portion Achilles tendon pain for more than 3 months, pain provoked by physical activities in a dose dependent way and had pain with palpation at the mid-portion of tendon. Volunteers were excluded if presenting insertional AT; previous rupture or surgery of the Achilles tendon; clinical findings indicating a differential diagnosis for the Achilles tendon pain (such as tendon tear); regular participation in other sports involving high speed running (football, rugby, AFL etc.), 4) VISA-A score > 90 points for AT group and < 100 for the healthy group; any other musculoskeletal injuries of the lower limb; any neurological disorder; or mental health issues affecting consent. All participants were free of comorbidities such as cardiac, pulmonary, renal, and endocrine of gastrointestinal and were not taking any medication for tendon pain or that would affect tendon structure (Knobloch 2016). Prior to testing, all participants read and signed a detailed informed consent document and completed the VISA-A questionnaire ). The average VISA-A score for the AT group was 70.1 ± 5.7 and 100 ± 0 for the control group. The AT group had a running routine of 38.7 ± 9.1 km/week and 30.4 ± 8.4 km/week for the control group; mean difference of 8.3 ± 12.2. This study was approved by the Queensland University of Technology Human Research and Ethics Committee in line with the Declaration of Helsinki. Data collection was conducted during the COVID-19 pandemic and all safety procedures followed local state government policies. Data collection and analysis Plantar flexor isometric peak torque was measured using an isokinetic dynamometer (Biodex Medical Systems, Shirley, New York). For the bilateral AT presentations (n = 3), the most symptomatic leg was used and for the control group, the dominant leg was used for testing. Leg dominance was selected by asking the participants what their preferred leg was, and if participants were unsure, they were asked which leg they would use to kick a ball. Participants were sitting (75 degrees of hip flexion) with their knee straight and with the foot perpendicular to shank. Warm up consisted of 2 × 4 s isometric contractions of each participant's perceived 20, 40, 60 and 80% maximal voluntary isometric contraction intensity. After warm-up, participants performed at least three maximal voluntary isometric contractions, until < 5% variation was observed between contractions, and the highest value was used. Thereafter, participants performed three trapezoidal submaximal isometric contractions with each target intensity based on their peak isometric torque (3 × 10% and 3 × 20% peak torque) in a randomised order. For each intensity participants had four attempts to get familiarised with task before recordings. Rate of torque rise and decline was standardised at 10% peak torque/s between contractions with different intensities, with a 10 s sustained plateau at the top, followed by 1 min of rest between contractions Vecchio et al. 2019). Participants received realtime visual feedback of the trapezoidal pathway, displayed in a monitor placed at 1 m away from the participant. During the plantar-flexors trapezoidal contractions, HD-EMG (Sessantaquattro, OTBioelettronica, Torino, Italy) signals were recorded with OT Biolab + software (version 1.3.0., OTBioelettronica, Torino, Italy), from SOL, GM and GL. After skin preparation (shaving, light abrasion, and cleansing of area with alcohol), electrodes were positioned following the estimated muscle fibres orientation using a bi-adhesive layer with a conductive paste to ensure good skin-electrode contact and conductibility. One 32-channel electrode grid (ELSCH032NM6, OTBioelettronica, Torino, Italy) was placed on GM, one 32-channel electrode grid on GL and two 32-channel electrodes grid on SOL, one laterally and one medially to the Achilles tendon ( Fig. 1). Two electrodes were used on SOL to increase the number of identified motor units. Data from both electrodes were clustered into one file to increase motor unit yield, prior to analysis of SOL motor unit characteristics. The ground strap electrode (WS2, OTBioelettronica, Torino, Italy) was dampened and positioned around the ankle joint of the tested leg. The EMG signals were recorded in monopolar mode, amplified (256×), bandpassed filtered (10-500 Hz) and converted to digital signal at 2048 Hz by a 16-bit wireless amplifier (Sessantaquattro, OTBioelettronica, Torino, Italy), before being stored for offline analysis. Since the grid adapter device (AD2 × 32SE, OTBioelettronica, Torino, Italy) has only two channels for electrode connection, each intensity of the protocol had to be performed twice, once with electrodes connected to the gastrocnemii and a second time with the electrodes connected SOL, this order was randomised for each intensity. Torque signal was recorded and analysed with OT Biolab + software. HD-EMG signal was recorded and analysed offline, decomposed into motor unit spike trains, then converted into instantaneous discharge rates with specialised software using blind source separation decomposition technique using DEMUSE tool software (v.4.1; The University of Maribor, Slovenia) . For each muscle and for each intensity, the 2 best contractions, with the lowest deviation from trapezoidal torque trajectory, were combined in one file and motor unit tracked across the 2 contractions at the same intensity for analysis. All motor units were visually inspected, erroneous discharge times were excluded, and missed discharges included ). Manual inspection is required to reduce automatic decomposition discharge errors and improve data reliability (Martinez-Valdes et al. 2016). Reliability for manual inspection across operators is very high for motor unit mean discharge rate and recruitment with intra-class correlation coefficient (ICC) of > 0.99 (Hug et al. 2021). We have also calculated ICC for our data for motor unit discharge rate across the 2 contractions for each intensity, per muscle for each group and it is shown in Table 1. Only motor units with a pulse-to-noise ratio (PNR) > 30 dB, sensitivity > 90%, were used for data analysis (Vecchio et al. 2019. For participants that yield no good quality motor units (PNR > 30 dB) after motor unit tracking across the 2 contractions at the same intensity Frančič and Holobar 2021), the best single contraction (higher PNR per motor units) was used for analysis with the motor unit discharge characteristics inspected as mentioned above. Due to the reduced number of the same motor units found across intensities, motor unit tracking across intensities was not feasible and, therefore, not used for analysis. Assessor who performed motor unit analysis process was not blinded by group. Motor units were collected from the 10 s isometric plateau, the first and last two seconds were excluded and analysis of mean motor unit mean discharge rate, coefficient of variation of motor unit discharge rate and torque steadiness were performed from the central 6 s of the isometric plateau. Motor unit mean discharge rates and coefficient of variation of motor unit discharge rates were then calculated for each muscle (SOL, GM and GL) and intensity tested (10 and 20% peak torque). Torque steadiness was analysed as the coefficient of variation in torque for each torque intensity tested. Torque was filtered (10 Hz, 4th order, low pass). To reduce intrasubject variability, data from the coefficient of variability of torque were averaged across the 4 contractions for each of the two torque intensities tested, the 2 contractions recorded during gastrocnemii testing and from the 2 contractions during SOL testing (Jakobi et al. 2020). Statistical analysis All analyses were performed using R studio (version 1.3.1093). Models were fitted using the lme4 package (Bates et al. 2015). Separate linear mixed-effect models were used to compare motor unit mean discharge rates and coefficient of variation of motor unit discharge rate of identified motor units for each muscle (SOL, GM, GL); between intensities (10 and 20%) and groups (AT and control). We tested the model using a random intercept (participant ID) and slope (recruitment threshold by intensity) for each participant in the study to account for the influence of motor unit populations and the correlation between repeated observations in each participant. The estimated marginal mean difference and 95% confidence intervals (CI) for all variables (motor unit mean discharge rate, coefficient of variation of motor unit discharge rate, between groups and torque steadiness) were determined using the emmeans package (Lenth 2016). Normality assumptions were confirmed by analysis of the histogram of residuals, Q-Q Plot, and the residual-predicted scatterplot. Independent t test was used to compare torque Fig. 1 Schematic representation of electrode positioning used for data acquisition for all three muscles of the triceps surae (GM gastrocnemius medialis, GL gastrocnemius lateralis and SOL soleus. One electrode was placed over GM, one electrode over GL, and two electrodes over SOL, one medially and one laterally to the Achilles tendon steadiness between groups for each torque intensity. An alpha level of 5% was set for statistical significance for all tests, and when appropriate, Bonferroni post hoc analysis was performed. Data are presented as mean ± 95% CI. Data for total number of motor units per group are presented as (mean ± SD). Motor unit identification We found a total of 1.055 motor units, 518 motor units in the AT group (43.1 ± 7.0, per participant) and 537 motor units in the control group (41.4 ± 6.3, per participant) across all muscles and intensities. The total number, mean and standard deviation of identified motor units per group, muscle, and contraction intensity are reported in Table 2. The number of identified motor unit for each participant per muscle and intensity is reported in Supplementary material 1. GL was the muscle with the least amount motor unit found in single contractions, and some were lost during motor unit tracking between the two contractions of the same intensity. Coefficient of variation of motor unit discharge rate SOL had no difference in coefficient of variation of motor unit discharge rate between intensities (F = 2.963, p = 0.086) or between groups (F = 0.151, p = 0.700). In the AT group, the coefficient of variation of motor unit discharge rate was 10.3% (8.8-11.8) at 10% peak torque and 9.3% (7.9-10.7) at 20% peak torque; in the control group, the coefficient of variation of motor unit discharge rate was 10.2% (8.8-11.7) at 10% peak torque and 10.0% (8.6-11.5) at 20% peak torque. GM presented difference in coefficient of variation of motor unit discharge rate between intensities (F = 51.203, p < 0.001, η 2 p = 0.10) but not between groups (F = 3.673, p = 0.07) and had no intensity × group interaction (F = 0.872, p = 0.350). In the AT group, the coefficient of variation of motor unit discharge rate was 12.15% (11.1-13.1) at 10% peak torque and 9.16% (8.1-10.1) at 20% peak torque; in the control group, the coefficient of variation of motor unit discharge rate was 10.75% (9.8-11.7) at 10% peak torque and 8.45% (7.4-9.5) at 20% peak torque. In GL, as well as in GM, we observed difference between intensities (F = 5.222, p = 0.024, η 2 p = 0.05) but not between groups (F = 0.661, p = 0.428) or intensity × group interaction (F = 2.779, p = 0.098). In the AT group, the coefficient of variation Fig. 2 Motor unit mean discharge rate of soleus during 10 and 20% peak isometric contraction. Each dot represents a single motor unit data point, coloured by participants. Mean and 95% confidence interval are offset to the left to facilitate visualisation. pps pulse per second Fig. 3 Motor unit mean discharge rate of gastrocnemius medialis during 10 and 20% peak isometric contraction. Each dot represents a single motor unit data point, coloured by participants. Mean and 95% confidence interval are offset to the left to facilitate visualisation. pps pulse per second Fig. 4 Motor unit mean discharge rate of gastrocnemius lateralis during 10 and 20% peak isometric contraction. Each dot represents a single motor unit data point, coloured by participants. Mean and 95% confidence interval are offset to the left, to facilitate visualisation. pps pulse per second of motor unit discharge rate was 10.9% (8.5-13.2) at 10% peak torque and 11.9% (9.6-14.0 at 20% peak torque; in the control group, the coefficient of variation of motor unit discharge rate was 12.7% (10.5-14.9) at 10% peak torque and 12.9% (10.5-15.2) at 20% peak torque. Torque steadiness There were no differences in torque steadiness analysis between groups in either of the two intensities analysed. Mean coefficient of variation of torque at 10% peak torque, in the AT group was 1.06 (0.79-1.32) and 1.13 (0.90-1.37, p = 0.656) in the control group; and at 20%, mean coefficient of variation in torque in the AT group was 0.80 (0.57-1.03) and 0.92 (0.73-1.11, p = 0.375) in the control group. Main findings The present study aimed to determine if runners with chronic mid-portion AT had lower neural drive to the triceps surae and if there were muscle-specific differences in motor unit discharge characteristics within the triceps surae. For that, we analysed motor unit mean discharge rate and coefficient of variation of motor unit discharge rate of each individual muscle of the triceps surae during isometric contractions of increasing intensities. We also aimed to determine if the AT group had lower torque steadiness. Our data indicate that runners with AT have lower neural drive to GL during the increase in plantar flexor isometric torque output. We confirmed our primary hypothesis, demonstrating a muscle-specific difference in neural drive in the AT group and a lower neural drive to GL during the increase in plantar flexor torque, not observed in the control group. However, we had also hypothesised that the neural drive to the triceps surae of the AT group would be lower, but GM and SOL were no different from controls. Furthermore, we did not confirm our second hypothesis, as we found no differences in the coefficient of variation of motor unit discharge rate in any of the muscles, nor did we find differences in triceps surae torque steadiness between groups. Mean motor unit discharge rates It has been previously identified that the three muscles of the triceps surae, although synergists as ankle plantar flexors, may have an independent neural drive from one another, allowing independent recruitment strategies for better joint control Hug et al. 2020. Our study also observed independent neural drive within the triceps surae. Individuals have a unique muscle activation pattern (Hug et al. 2022). Such activation signature has been shown robust for triceps surae isometric contractions across days and contractions (Hug et al. 2022;Crouzier et al. 2019). Further, we found different neural strategies between groups in only one of the three muscles of the triceps surae. Our data show that the AT group does not use the GL as effectively as healthy controls to match the increase in plantar flexor torque intensity. The AT group had lower motor unit mean discharge rate with the increase in torque, outlining a change in muscle coordination in GL that was not observed in the control group. During a voluntary contraction, muscle force is dependent of the number of the MU recruited and the rate of which they discharge (Enoka and Duchateau 2017). One possible explanation for the lower discharge rate observed in GL in the AT group during the submaximal isometric contractions, without an increase in GM or SOL discharge rate while matching same torque, is the increase in the number of motor unit recruited in GM and/or SOL rather than the increase in motor unit discharge rate. It is also possible instead of the increase in neural drive in GL from 10 to 20% observed in the control group, in the AT group this neural drive was distributed between GM and SOL, yet not sufficiently large to be observed in our sample. Another possible explanation is that the AT group used other muscles to increase plantarflexion torque such as flexor hallucis longus (FHL), which was not measured in this study. Increased FHL EMG activity has been reported in the painful side of unilateral AT presentations during isometric submaximal plantarflexion contractions, compared to asymptomatic side and to controls side (Masood et al. 2014). Similar findings of lower GL activity have been reported in another study with runners with AT Crouzier et al. 2020. The authors used the physiological cross-sectional area and normalised RMS EMG to calculate the index of force of each muscle and estimate individual muscles' contribution to triceps surae force production. GL had a significantly lower contribution to overall triceps surae force output; suggesting a lower neural drive compared to healthy counterparts. Muscle force depends on motor unit discharge rate, which is proportional to the neural drive to the muscle. In healthy individuals, motor unit discharge rate increases to adjust for an increase in torque intensity (Enoka and Duchateau 2017). Contrary to what was previously suggested in the literature (O'Neill et al. 2019), we found no differences in SOL motor unit mean discharge during the increase in torque, which suggests that at least for the condition and type of task considered in the current study, SOL contribution to plantar flexor force is not impaired in AT. The lower neural drive to GL observed in our study seems relevant in the persistent muscle deficits observed in AT (Silbernagel et al. 2007). Perhaps current treatment strategies for AT fail in effectively rehabilitating GL function; therefore, maintaining this lower neural drive and contribution to force production during ankle plantar flexion. The neural drive to each individual muscles of the triceps surae can be influenced independently by strategies such as modified feet position during plantar flexion (Hug et al. 2020). Therefore, utilising strategies to increase GL recruitment and contribution during exercise is important, as altered muscle coordination may lead to unequal loading to the Achilles tendon (Hug and Tucker 2017). Performing heel raises with the foot positioned with toes pointed inwards significantly increased GL motor unit discharge rate compared to toes neutral in healthy individuals (Hug et al. 2020). Foot position, in healthy individuals, can also selectively affect GM and GL hypertrophy (Nunes et al. 2020). Therefore, implementing different foot positions during rehabilitation could help increase GL activity during plantar flexor resistance training. Rehabilitation programs using different foot positions during triceps surae resistance training should be studied in patients with AT to explore how this lower contribution of GL in triceps surae torque, impacts AT and if implementing treatment strategies to increase the neural drive to GL would influence tendon pain and function in AT. Coefficient of variation of motor unit discharge rate and torque steadiness Torque variability was measured during 10 and 20% relative peak isometric torque plateau. We found no differences between groups in coefficient of variation of motor unit discharge in any of the muscles of the triceps surae nor did we find differences in triceps surae torque steadiness. All three muscles of the triceps surae were equally matched to controls in the two submaximal intensities tested. Coefficient of variation of motor unit discharge represents, at an individual muscle level, the ability to effectively control muscle torque and it is an important measures that can help explain motor performance (Enoka and Farina 2021;Negro et al. 2009). Fluctuations in torque, coefficient of variation of motor unit discharge rate and torque steadiness are more variable in lower intensities than in higher torque intensities, hence why 10 and 20% intensities were used for analysis (Enoka and Farina 2021). Based on our findings, the ability of the triceps surae in controlling torque during submaximal contractions is not affected in runners with AT, which aligns with another study (Vallance et al. 2019). Torque steadiness is affected by pain (Rice et al. 2015), and lower torque steadiness has been reported in other chronic (Rice et al. 2015) and painful musculoskeletal conditions Telianidis et al. 2014). In our study, we used submaximal torque intensities and none of our participants reported pain during testing; however, we cannot assert if such changes in torque steadiness and coefficient of variation of motor unit discharge rate would not occur during activities that provoke pain in this group, such as running. Limitations We were unable to effectively track the same motor unit across from 10 to 20% peak torque. We tried tracking the same motor units across the two intensities, but this has markedly reduced the number of motor units left for analysis. Therefore, we cannot say with certainty the change in motor unit discharge rate, or lack of thereof, observed from 10 to 20% MVIC across muscles occur in the same motor unit. Although both intensities used in this study are considered as of low threshold, each motor unit is unique from another, and motor unit tracking would have provided more robust information about each motor unit unique response to the increase in torque. The EMG device used was limited to up to 2 × 32-channel adaptor, not allowing sampling of all three muscles at the same time. Our study shows preliminary evidence of a possibly lower neural drive to GL. However, it is worth mentioning the reduced number of MU found in GL per participant, which reduced sample sizes in both groups. Thus, future studies with larger sample sizes should consider recording all three muscles using electrodes with more channels (i.e. 64-electrode grid) to increase the number of motor unit identified during decomposition when estimating neural drive to the triceps surae allowing tracking of the same motor unit between intensities to confirm our findings. Another limitation that should be highlighted is the type of contraction used for analysis. HD-EMG analysis provides reliable estimates of motor unit discharge rates (Martinez-Valdes et al. 2016); however, it requires isometric contractions for motor unit analysis. Thus, the observations of neural drive from this study cannot be extrapolated into dynamic tasks such as heel raises or running. Furthermore, we used submaximal intensities of relative peak isometric torque, as this facilitates motor unit identification; therefore, it is possible that during higher torque intensities, which demand more torque of each individual muscles, the differences observed in this study would be greater. Conclusion Our data suggest that runners with mid-portion AT have a muscle-specific deficit in the triceps surae, possibly creating heterogeneous loading to the Achilles tendon and contributing for the high recurrences ) of AT. We observed lower motor unit discharge rate, (i.e. lower neural drive) in GL during the increase in plantar flexor torque demands but not in GM or SOL. This deficit in neural drive in GL might be greater during activities that require greater plantar flexor torque, which could contribute to overload the Achilles tendon. Different strategies to try and increase GL activation during plantar flexion resistance training could be beneficial for AT, such as adopting different feet position during heel raise. Such rehabilitation strategy should be studied in patients with AT to further understand how the lower contribution of GL impacts Achilles tendinopathy and how implementing strategies to increase the neural drive to GL would affect AT patient outcomes.
def train(self, X, y): for c in set(y): self.class_words[c] = [] for _element, _class in zip(X,y): _element = self.transform_ngrams(_element) print(_element) for w in _element: if w not in self.corpus_words: self.corpus_words[w] = 1 else: self.corpus_words[w] += 1 self.class_words[_class].extend([w]) self.save()
The Reserve Bank says its limits on low-deposit home loans will stay until late this year, dousing expectations the temporary measures could go soon. The central bank said that loan to value ratio (LVR) limits brought in last year "are achieving their purpose" with signs the housing market is cooling down. "Without the Loan to Value Ratio restrictions, annual house-price inflation might be running some 2.5 per cent higher," Reserve Bank deputy governor Grant Spencer said today. The limits on low-deposit loans have sharply reduced the amount of lending in that part of the market, hitting first home buyers especially. The rules mean banks have to limit low-deposit loans to no more than 10 per cent of new mortgages. Westpac Bank economists said the Reserve Bank had been expected to change the limit soon or start talks about when the limits might be dropped. Spencer's comments were seen as meaning the LVR limits would not be changed in any way until late this year, Westpac said. But that did set an indicative timetable for when the LVRs could be removed, possibly next year, unless the housing market takes off again. Westpac chief economist Dominick Stephens said: "In our view that is unlikely to happen, with interest rates rising as they are." Spencer said the financial system was now less vulnerable to an adverse housing shock and banks were now less exposed to potential credit losses as the interest rate cycle turns upwards as a result of the LVRs. The LVRs were temporary, but Spencer said that before removing them the central bank wanted to be sure the housing market was cooling after interest rate increases; and that immigration pressures were not causing a resurgence of house price pressures. "It will take some time to gain this assurance," he said. "At this stage we consider the earliest date for beginning to remove LVRs is likely to be late in the year." The bank has said the housing market was cooling "gradually", but risks remained, including the present high levels of migration. It also warned borrowers to expect floating mortgage rates to head towards 8 per cent in the next couple of years. But how far and fast rates would rise depends on the exchange rate and the housing market. Spencer, said today that the volume of house sales has dropped considerably across the country, other than in Canterbury, and the slowdown in volume had also been reflected in prices. The supply of homes had started to improve, with a recovery evident in residential building, he said. In Auckland, progress was being made in freeing up the supply of buildable land and improving the consenting process. In Canterbury, the replacement of severely damaged homes was well in train after a slow start. However, the housing shortage remained large, and significant increases in building were needed in Auckland and Canterbury over the next three years. There were many parts to the housing market equation – and many risks. "Probably the major risk at present is the outlook for net immigration, in part due to reduced departures of New Zealand citizens," Spencer said. "We are forecasting net immigration to reduce gradually as economic conditions improve in Australia. "We've started raising the official cash rate, with the aim of forestalling general inflation pressures in the broader economy. "Floating mortgage rates could be 7 per cent to 8 per cent in two years' time, closer to their average of the past 20 years." "The extent and timing of interest rate increases will depend on a number of uncertain variables, in particular the exchange rate and housing market pressures."
package utils import ( "math" "sort" ) type PFunc func([]float64) float64 func Max(data []float64) float64 { max := math.SmallestNonzeroFloat32 for _, n := range data { if n > max { max = n } } return max } func Min(data []float64) float64 { min := math.MaxFloat32 for _, n := range data { if n < min { min = n } } return min } func Sum(data []float64) float64 { var sum float64 for _, n := range data { sum += n } return sum } func Avg(data []float64) float64 { return Sum(data) / float64(len(data)) } func Stdev(data []float64) float64 { a := Avg(data) var ret float64 for _, n := range data { n -= a n *= n ret += n } ret /= float64(len(data)) return math.Sqrt(ret) } func Percent(n int) PFunc { return func(data []float64) float64 { tmp := make([]float64, len(data)) copy(tmp, data) sort.Slice(tmp, func(i, j int) bool { return tmp[i] < tmp[j] }) n := int(float32(n)/100.) * len(tmp) return tmp[n] } }
#!/usr/bin/env python """ (c) 2020, <NAME> (c) 2014, <NAME>, <NAME>, <NAME>, <NAME>. shader code adapted from Vispy's example molecular_viewer.py: https://github.com/vispy/vispy/blob/master/examples/demo/gloo/molecular_viewer.py Distributed under the terms of the new BSD License. """ # external dependencies: from vispy import app, gloo, visuals from vispy.util.transforms import perspective, translate, rotate from vispy.visuals.transforms import STTransform, MatrixTransform import numpy as np import attr from urqmd_tools.pids import LOOKUP_TABLE, ALL_SORTED from urqmd_tools.parser.f14 import F14_Parser, Particle from statistics import mean, stdev import collections import argparse import sys import os import time import pickle r = np.random.RandomState(1237+80) VERT_SHADER = """ #version 120 uniform mat4 u_model; uniform mat4 u_view; uniform mat4 u_projection; uniform vec3 u_light_position; uniform vec3 u_light_spec_position; uniform float u_aspect; attribute vec3 a_position; attribute vec3 a_color; attribute float a_radius; varying vec3 v_color; varying vec4 v_eye_position; varying float v_radius; varying vec3 v_light_direction; void main (void) { v_radius = a_radius; v_color = a_color; v_eye_position = u_view * u_model * vec4(a_position,1.0); v_light_direction = normalize(u_light_position); float dist = length(v_eye_position.xyz); gl_Position = u_projection * v_eye_position; // stackoverflow.com/questions/8608844/... // ... resizing-point-sprites-based-on-distance-from-the-camera vec4 proj_corner = u_projection * vec4(a_radius, a_radius, v_eye_position.z, v_eye_position.w); // # noqa gl_PointSize = 512.0 * proj_corner.x / proj_corner.w * u_aspect; // attempt to make very far points slighly bigger: //gl_PointSize = 128.0 * log(proj_corner.x / proj_corner.w * 5 + 1); } """ FRAG_SHADER = """ #version 120 uniform mat4 u_model; uniform mat4 u_view; uniform mat4 u_projection; uniform vec3 u_light_position; uniform vec3 u_light_spec_position; varying vec3 v_color; varying vec4 v_eye_position; varying float v_radius; varying vec3 v_light_direction; void main() { // r^2 = (x - x0)^2 + (y - y0)^2 + (z - z0)^2 vec2 texcoord = gl_PointCoord* 2.0 - vec2(1.0); float x = texcoord.x; float y = texcoord.y; float d = 1.0 - x*x - y*y; if (d <= 0.0) discard; float z = sqrt(d); vec4 pos = v_eye_position; pos.z += v_radius*z; vec3 pos2 = pos.xyz; pos = u_projection * pos; // gl_FragDepth = 0.5*(pos.z / pos.w)+0.5; vec3 normal = vec3(x,y,z); float diffuse = clamp(dot(normal, v_light_direction), 0.0, 1.0); // Specular lighting. vec3 M = pos2.xyz; vec3 O = v_eye_position.xyz; vec3 L = u_light_spec_position; vec3 K = normalize(normalize(L - M) + normalize(O - M)); // WARNING: abs() is necessary, otherwise weird bugs may appear with some // GPU drivers... float specular = clamp(pow(abs(dot(normal, K)), 40.), 0.0, 1.0); vec3 v_light = vec3(1., 1., 1.); gl_FragColor.rgba = vec4(.15*v_color + .55*diffuse * v_color + .35*specular * v_light, 1.0); } """ class HICCanvas(app.Canvas): pid_colors = {urqmdpid.id: r.uniform(low=0.2, high=1.0, size=4) for urqmdpid in ALL_SORTED} def __init__(self, pts, ts, b=7, a=23, fmps=2, cb=0.9224028, bb=0.0, sf=10, w=1920, h=1080, t='dark', c='by_pid', win=False): """ pts: particles at different timesteps ts: list of the timesteps (in fm/c) b: before a: after fmps: fm per second (relation speed of light to visualization time) cb: CMS beta bb: boost beta sf: scaling factor w: viewport width h: viewport height t: theme ('bright' or 'dark') c: coloring scheme ('by_kind' or 'by_pid') win: start in windowed mode instead of full-screen (use F11 to toggle during run) """ start = time.time() self.pts = pts # the particles in the time evolution self.ts = ts # the timesteps for self.pts self.b = b # amount of s before t0 self.a = a # amount of s after t0 self.fmps = fmps # fm / s self.cb = cb # cms beta self.bb = bb # boost beta self.sf = sf # scaling factor self.w = w # view width self.h = h # view height self.theme = t self.coloring = c self.windowed = win if self.theme not in ('bright', 'dark'): raise NotImplementedError('theme: %s' % self.theme) if self.coloring not in ('by_kind', 'by_pid'): raise NotImplementedError('coloring', self.coloring) self.print_current_particles = False self.print_current_particles_without_nucleons = True self.recent_fps = [] self.last_update = 0.0 self.last_stats_output = 0.0 app.Canvas.__init__(self, title='UrQMD Viewer', keys='interactive', size=(w, h)) # Create program self.program = gloo.Program(VERT_SHADER, FRAG_SHADER) self.ps = self.pixel_scale n = max([len(ps) for ps in self.pts]) self.n = n self.particles = np.zeros(n, [('a_position', 'f4', 3), ('a_color', 'f4', 4), ('a_radius', 'f4')]) #self.particles['a_position'] = np.random.uniform(-20, +20, (n, 3)) + 1000 self.particles['a_position'] = 1000000, 1000000, 1000000 self.particles['a_radius'] = self.sf * self.pixel_scale self.particles['a_color'] = 1, 1, 1, 0 self.translate = 40 self.view = translate((0, 0, -self.translate)) self.model = np.eye(4, dtype=np.float32) self.projection = np.eye(4, dtype=np.float32) self.theta = 0 self.phi = 0 # Time self._t = time.time() # Bind vertex buffers self.program.bind(gloo.VertexBuffer(self.particles)) self.program['u_model'] = self.model self.program['u_view'] = self.view self.program['u_aspect'] = w/h self.program['u_light_position'] = 0., 0., 2. self.program['u_light_spec_position'] = -5., 5., -5. self._timer = app.Timer('auto', connect=self.on_timer, start=True) self.start = time.time() self.paused = False self.update_required = True self.pause_started = 0.0 self.visuals = [] if self.theme == 'bright': text_color = (0, 0, 0, 1) if self.theme == 'dark': text_color = (1, 1, 1, 1) self.fps_fmt = '{:.1f} FPS' llt = visuals.TextVisual(self.fps_fmt.format(0.0), bold=False, pos=[10, self.physical_size[1] - 10 - 20 - 10], color=text_color, anchor_x='left', anchor_y='bottom', method='gpu', font_size=10) self.lower_left_text = llt self.visuals.append(llt) self.time_fmt = 'τ = {:.1f} fm/c' ult = visuals.TextVisual(self.time_fmt.format(0.0), bold=True, pos=[10, 10], color=text_color, anchor_x='left', anchor_y='bottom', method='gpu', font_size=20) self.upper_left_text = ult self.visuals.append(ult) #urt = visuals.TextVisual('© <NAME>', bold=True, urt = visuals.TextVisual('UrQMD Viewer by @pklaus', bold=True, pos=[self.physical_size[0] - 10, 10], color=text_color, anchor_x='right', anchor_y='bottom', method='gpu', font_size=20) self.upper_right_text = urt self.visuals.append(urt) line_pos = np.array([[0, 0, -100], [0, 0, 100], ]) ba = visuals.LineVisual(pos=line_pos, color=(.5, .5, .5, 1), width=1, method='gl', antialias=False) self.beam_axis = ba self.visuals.append(ba) beam_direction = visuals.ArrowVisual(pos=np.array([[0, 0, 0], [0, 0, 1]]), color=(.5, .5, .5, 1), width=1, method='gl', arrow_size=0.6) self.visuals.append(beam_direction) sphere = visuals.SphereVisual(radius=0.08) self.visuals.append(sphere) self.update_model() self.update_projection() if self.theme == 'dark': gloo.set_state(depth_test=True, clear_color=(0, 0, 0, 1)) elif self.theme == 'bright': gloo.set_state(depth_test=True, clear_color=(1, 1, 1, 1)) # translucent particles: gloo.set_state(blend=True, blend_func=('src_alpha', 'one')) #gloo.wrappers.set_depth_range(near=-1000.0, far=10000.0) if not self.windowed: self.fullscreen = True self.show() end = time.time() def on_key_press(self, event): if event.text == ' ': self.paused = ~self.paused if self.paused: self.pause_started = time.time() else: self.start += time.time() - self.pause_started elif event.key in ('j', 'Left'): self.start += 0.05 elif event.key in ('k', 'Right'): self.start -= 0.05 elif event.key in ('h', 'PageDown'): self.start += 0.5 elif event.key in ('l', 'PageUp'): self.start -= 0.5 elif event.key in ('e',): self.sf *= 1.1 elif event.key in ('q',): self.sf *= 0.9 elif event.key in ('p',): self.print_current_particles = True else: pass # print(event.key) self.update_required = True self.update() def on_resize(self, event): vp = (0, 0, *event.physical_size) gloo.set_viewport(*vp) #self.context.set_viewport(*vp) for v in (v for v in self.visuals if type(v) is visuals.TextVisual): v.transforms.configure(canvas=self, viewport=vp) self.upper_right_text.pos = [event.size[0] - 10, 10] self.lower_left_text.pos = [10, event.size[1] - 10 - 10 - 20] self.update_projection(*event.size) self.update() def update_projection(self, width=None, height=None): if width is None or height is None: width, height = self.size self.projection = perspective(fovy=40.0, aspect=width/height, znear=5.0, zfar=500.0) self.program['u_projection'] = self.projection self.program['u_aspect'] = width/height def on_mouse_wheel(self, event): self.translate -= event.delta[1] #self.translate = max(-1, self.translate) self.view = translate((0, 0, -self.translate)) self.program['u_view'] = self.view self.update() def _normalize(self, x_y): x, y = x_y w, h = float(self.size[0]), float(self.size[1]) return x/(w/2.)-1., y/(h/2.)-1. def on_mouse_move(self, event): if event.is_dragging: x0, y0 = self._normalize(event.press_event.pos) x1, y1 = self._normalize(event.last_event.pos) x, y = self._normalize(event.pos) dx, dy = x - x1, -(y - y1) button = event.press_event.button self.phi += dx*150 self.theta -= dy*150 self.update_model() self.update() #if button == 1: # pass #elif button == 2: # pass def on_draw(self, event): gloo.clear() # need to do this every time as the texts overwrites it: for v in (v for v in self.visuals if type(v) is not visuals.TextVisual): pass#v.draw() gloo.set_state(depth_test=True) self.program.draw('points') for v in (v for v in self.visuals if type(v) is visuals.TextVisual): v.draw() def update_model(self): mt = MatrixTransform() mt.rotate(120, (1, 1, 1)) mt.rotate(self.phi, (0, 1, 0)) mt.rotate(self.theta, (1, 0, 0)) self.beam_axis.transform = mt self.model = mt.matrix self.program['u_model'] = self.model def stats_output(self): if len(self.recent_fps) < 2: return if (time.time() - self.last_stats_output) > 0.5: self.last_stats_output = time.time() fps, self.recent_fps = self.recent_fps, [] self.lower_left_text.text = self.fps_fmt.format(mean(fps)) def update(self): super().update() self.recent_fps.append(1/(time.time() - self.last_update)) self.last_update = time.time() self.stats_output() def on_timer(self, event): if self.paused and not self.update_required: return self.update_required = False if self.paused: now = self.pause_started else: now = time.time() t = (now - self.start) % (self.a + self.b) - self.b t_fm = t * self.fmps # time in fm before (-) or after (+) the collision self.upper_left_text.text = self.time_fmt.format(t_fm) white = (1, 1, 1, 1) red = [1, 0.3, 0.3, 1] green = [.2, .5, 1, 1] yellow = [.8, 1, .2, 1] blue = [.1, 0.1, .9, 1] baryons = [i for i in range(1, 56)] baryons += [-baryon for baryon in baryons] mesons = [101, 106, 102, 107, 104, 108, 103, 109, 111, 110, 105, 112, 114, 113, 115, 116, 122, 121, 123, 124, 118, 117, 119, 120, 126, 125, 127, 128, 130, 129, 131, 132] mesons += [-meson for meson in mesons] if t <= self.ts[0]: t_fm_0 = self.ts[0] # initial timestep ps = self.pts[0] #self.particles = np.zeros(len(ps), [('a_position', 'f4', 3), # ('a_color', 'f4', 4), # ('a_radius', 'f4')]) self.particles['a_position'][0:len(ps)] = np.array([[p.rx, p.ry, p.rz] for p in ps]) self.particles['a_position'][:,2] += np.where( self.particles['a_position'][:,2] < 0, (t_fm-t_fm_0)*( self.cb+self.bb)/(1+self.cb*self.bb), (t_fm-t_fm_0)*(-self.cb+self.bb)/(1+self.cb*self.bb) ) # move everything else away... self.particles['a_position'][len(ps):] = 100000, 100000, 100000 else: # find the best suiting timestamp for interpolation: for i, ts_fm in enumerate(self.ts): if t_fm <= ts_fm: break t_fm_0 = ts_fm d_t_fm = t_fm - t_fm_0 # fetch the particle set belonging to that timestep ps = self.pts[i] #self.particles = np.zeros(len(ps), [('a_position', 'f4', 3), # ('a_color', 'f4', 4), # ('a_radius', 'f4')]) self.particles['a_position'][0:len(ps)] = np.array([[p.rx, p.ry, p.rz] for p in ps]) +\ + d_t_fm * np.array([p.beta3 for p in ps]) if self.print_current_particles: if self.print_current_particles_without_nucleons: print_ps = [p for p in ps if not (p.id == 1 and p.ncl == 0)] print("Particles currently in the scene (without any nucleons that didn't collide so far):") else: print_ps = ps print("Particles currently in the scene:") pids = [p.id for p in print_ps] for pid, freq in collections.Counter(pids).most_common(): print(" ", LOOKUP_TABLE[pid].name, " - amount currently in the scene:", freq) self.print_current_particles = False # DEBUG Output of the xyz extents of the collision # (outermost particle positions) #x_extent = tuple(func(self.particles['a_position'][0:len(ps)][0,:]) for func in (np.amin, np.amax)) #y_extent = tuple(func(self.particles['a_position'][0:len(ps)][0,:]) for func in (np.amin, np.amax)) #z_extent = tuple(func(self.particles['a_position'][0:len(ps)][0,:]) for func in (np.amin, np.amax)) #print(x_extent, y_extent, z_extent) radius_from = 'm0' #m0' if radius_from == 'm': self.particles['a_radius'][0:len(ps)] = np.array([p.m**(1/3) for p in ps]) elif radius_from == 'm0': self.particles['a_radius'][0:len(ps)] = np.array([p.m0**(1/3) for p in ps]) elif radius_from in ('E', 'Ekin'): self.particles['a_radius'][0:len(ps)] = np.array([p.E**(1/3) for p in ps]) self.particles['a_radius'][0:len(ps)] *= self.sf * self.pixel_scale #self.particles['a_radius'][0:len(ps)][[p.id == 1 for p in ps]] = 0.5 # make any particles white that are not colored otherwise #self.particles['a_color'][0:len(ps)] = 1, 1, 1, 1 a_color = np.zeros((len(ps), 4)) if self.coloring == 'by_kind': a_color[[p.id in mesons for p in ps]] = yellow # mesons a_color[[p.id in baryons for p in ps]] = green #blue # excited baryons a_color[[p.id == 1 for p in ps]] = red # nucleons (mostly projectile and target nucleons) a_color[[p.id == 1 and p.ncl > 0 for p in ps]] -= (0.1, 0.1, 0.1, 0) # collided projectile and target nucleons elif self.coloring == 'by_pid': a_color = np.array([self.pid_colors[p.id] for p in ps]) a_color[[p.ncl == 0 for p in ps]] += (0.2, 0.2, 0.2, 0) self.particles['a_color'][0:len(ps)] = a_color #self.particles['a_color'][len(ps):self.n] = 0, 0, 0, 0 #self.particles['a_color'][len(ps):] = 0, 0, 0, 0 self.particles['a_position'][len(ps):] = 100000, 100000, 100000 # special treatment for some particles # Φ (phi) phis = (109, 128, 132) self.particles['a_color'][0:len(ps)][[p.id in phis for p in ps]] = blue if round(t*4) % 2 else yellow #self.particles['a_radius'][0:len(ps)][[p.id in phis for p in ps]] = 4 if any([p.id in phis for p in ps]): print("found a Φ (phi) at", t_fm) # 27 = Lambda, 106 = K+ / K0, 108 = K* if round(t_fm*4)%2: self.particles['a_color'][0:len(ps)][[p.id in (27, 106, 108) for p in ps]] = white self.program.bind(gloo.VertexBuffer(self.particles)) self.update() def main(): parser = argparse.ArgumentParser() parser.add_argument('urqmd_file', metavar='URQMD_FILE', type=argparse.FileType('r'), help="Must be of type .f14") parser.add_argument('--after', default=40, type=float) parser.add_argument('--before', default=5, type=float) parser.add_argument('--width', default=900, type=float) parser.add_argument('--height', default=600, type=float) parser.add_argument('--windowed', action='store_true') parser.add_argument('--cms-beta', default=0.9224028, type=float) parser.add_argument('--boost-beta', default=0.0, type=float) parser.add_argument('--fm-per-sec', default=3, type=float) parser.add_argument('--scaling-factor', default=1, type=float) parser.add_argument('--theme', choices=('bright', 'dark'), default='dark') parser.add_argument('--coloring-scheme', choices=('by_kind', 'by_pid'), default='by_pid') args = parser.parse_args() start_loading_data = time.time() cache_file = '.cache.'+os.path.basename(args.urqmd_file.name)+'.pickle' try: print("Trying to open a cached version of the .f14 data file.") with open(cache_file, 'rb') as f: data = pickle.load(f) pts = data['pts'] ts = data['ts'] except: print("Cached version unavailable, now parsing the .f14 file.") for event in F14_Parser(args.urqmd_file).get_events(): particles = [Particle(particle_properties) for particle_properties in event['particle_properties']] ts = sorted(list(set(p.time for p in particles))) pts = [] # naive approach: for i, t in enumerate(ts): print(i, "out of", len(ts), "filtered") selection = [] for p in particles: if p.time == t: selection.append(p) if p.time > t: break for p in selection: particles.remove(p) pts.append(selection) break # only read the very first event in the file with open(cache_file, 'wb') as f: pickle.dump({'ts': ts, 'pts': pts}, f, pickle.HIGHEST_PROTOCOL) print("Done loading particle data after {:.3f} s".format(time.time() - start_loading_data)) if args.boost_beta: for ps in pts: for p in ps: p.boost(args.boost_beta) # The Lorentz boost changes our timesteps, too... # actually now every boosted particle has a different timestamp, so this is broken... ts = sorted(list(set(ps[0].time for ps in PS_TS))) c = HICCanvas(pts, ts, a=args.after, b=args.before, cb=args.cms_beta, bb=args.boost_beta, fmps=args.fm_per_sec, sf=args.scaling_factor, w=args.width, h=args.height, t=args.theme, c=args.coloring_scheme, win=args.windowed, ) app.run() if __name__ == '__main__': main()
Election Blu-ray Review Reviewed by Dr. Svet Atanasov, December 16, 2017 The best candidate The dark humor that makes Alexander Payne'sattractive comes from the same place where many of Todd Solondz's films reside. It is an odd place that can make one feel very uneasy, at times even nauseated, but there are a lot of inconvenient truths that can be uncovered there.The film is set in Omaha, Nebraska, where a local high school is gearing up to elect a new student president. Initially, it looks like the entire process will be a formality and Tracy Flick (Reese Witherspoon) will cruise to victory. She is smart, energetic, and super-ambitious. She is also the only candidate on the ballot who is actually campaigning to get elected. So, it is only a matter of time before Tracy's dream of running the student government becomes a reality. She can even feel it -- this is her time to be a star.But a series of unexpected events slowly begin to chip away from Tracy's perfect election campaign. One of her teachers, Mr. McAllister (Matthew Broderick), urges a dimwitted football player named Paul Matzler (Chris Klein) to run against her and much to everyone's surprise he decides to give the challenge a shot. Then Paul's sister, Tammy (Jessica Campbell), who has recently had her heart broken by another girl, also officially enters the race while vowing to dismantle the student government that for years has been wasting everyone's precious time. However, Tracy does not panic and begins working even harder to convince undecided voters that she is the right person for the position, though the more laser-focused she becomes on her mission, the more she begins to realize that her opponents are gaining serious momentum.It is painfully obvious thatsatirizes the country's electoral process, and for a number of valid reasons. For example, all of the main characters basically mirror a conventional political character that you are bound to discover in any election cycle -- Tracy is the overconfident candidate that voters have to choose because it is her time to win; Paul's is the clueless plant who is in the race for the thrill, not because he actually has a vision for the future; and Tammy is the angry outsider that just wants to blow up the whole damn system because she thinks that like her everyone else has had enough of it. Sound familiar? Then you have the dirty games and backstabbing that heated elections are known for, though here most of the behind-the-scenes surprises are rather underwhelming. It is the familiar circus that you see covered by the news networks but downsized for local consumption. So, all of the targets are very clear and Payne fires at them with proper ammunition.The problem with this film is that there are rather large sections of it where the balance between the humor and the awkward becomes unbearably artificial, and when it happens there is typically a 'gottcha' moment that feels completely out of sync. The scattered narration and random freeze-frame shots also add an MTV-esque vibe to the story that ultimately make other parts look as if they come from a trendy, slightly unhinged reality show.
def is_chromedriver_too_old(self): self.__check_scope() self.__fail_if_not_using_chrome("is_chromedriver_too_old()") if int(self.get_chromedriver_version().split(".")[0]) < 73: return True return False
import sys s=sys.stdin.readline().strip() n=int(s[0]) data={} data['A']=2**31-1 data['B']=2**31-1 data['C']=2**31-1 data['AC']=2**31-1 data['AB']=2**31-1 data['BC']=2**31-1 data['ABC']=2**31-1 s=sys.stdin.readline().strip() while s!='': s=s.split() s[0]=int(s[0]) if len(s[1])==1: if data[s[1]]>s[0]: data[s[1]]=s[0] elif len(s[1])==2: if 'C' not in s[1]: if data['AB']>s[0]: data['AB']=s[0] elif 'B' not in s[1]: if data['AC']>s[0]: data['AC']=s[0] elif 'A' not in s[1]: if data['BC']>s[0]: data['BC']=s[0] else: if data['ABC']>s[0]: data['ABC']=s[0] s=sys.stdin.readline().strip() prices1=data['A']+data['B']+data['C'] prices2=data['AB']+data['C'] prices3=data['BC']+data['A'] prices4=data['AC']+data['B'] prices5=data['AB']+data['BC'] prices6=data['AC']+data['AB'] prices7=data['BC']+data['AC'] prices8=data['ABC'] prices=min(prices1,prices2,prices3,prices4,prices5,prices6,prices7,prices8) if prices>=2147483647: print(-1) else: print(prices)
// add the contents of filename to allPush and allPushSources. func add(filename string, allPush packages.PackageConfig, allPushSources map[string]string) error { config, err := packages.LoadPackageConfig(filename) if err != nil { sklog.Fatalf("Failed to load PackageConfig file: %s", err) } for k, v := range config.Servers { if _, ok := allPush.Servers[k]; ok { return fmt.Errorf("Found duplicate push config name: %q appears in %q and %q", k, filename, allPushSources[k]) } allPushSources[k] = filename allPush.Servers[k] = v } return nil }
use std::{ env, fs::File, io::{BufReader, BufWriter}, path::{Path, PathBuf}, thread::sleep, time::Duration, }; use tracing::{span, Level}; use tracing_flame::FlameLayer; use tracing_subscriber::{prelude::*, registry::Registry}; static PATH: &str = "flame.folded"; fn setup_global_subscriber(dir: &Path) -> impl Drop { let (flame_layer, _guard) = FlameLayer::with_file(dir.join(PATH)).unwrap(); let subscriber = Registry::default().with(flame_layer); tracing::subscriber::set_global_default(subscriber).unwrap(); _guard } fn make_flamegraph(tmpdir: &Path, out: &Path) { println!("outputting flamegraph to {}", out.display()); let inf = File::open(tmpdir.join(PATH)).unwrap(); let reader = BufReader::new(inf); let out = File::create(out).unwrap(); let writer = BufWriter::new(out); let mut opts = inferno::flamegraph::Options::default(); inferno::flamegraph::from_reader(&mut opts, reader, writer).unwrap(); } fn main() { let out = if let Some(arg) = env::args().nth(1) { PathBuf::from(arg) } else { let mut path = env::current_dir().expect("failed to read current directory"); path.push("tracing-flame-inferno.svg"); path }; // setup the flame layer let tmp_dir = tempfile::Builder::new() .prefix("flamegraphs") .tempdir() .expect("failed to create temporary directory"); let guard = setup_global_subscriber(tmp_dir.path()); // do a bunch of span entering and exiting to simulate a program running span!(Level::ERROR, "outer").in_scope(|| { sleep(Duration::from_millis(10)); span!(Level::ERROR, "Inner").in_scope(|| { sleep(Duration::from_millis(50)); span!(Level::ERROR, "Innermost").in_scope(|| { sleep(Duration::from_millis(50)); }); }); sleep(Duration::from_millis(5)); }); sleep(Duration::from_millis(500)); // drop the guard to make sure the layer flushes its output then read the // output to create the flamegraph drop(guard); make_flamegraph(tmp_dir.path(), out.as_ref()); }
// Copyright 1998-2017 Epic Games, Inc. All Rights Reserved. #include "SceneOutlinerModule.h" #include "Modules/ModuleManager.h" #include "Widgets/DeclarativeSyntaxSupport.h" #include "Framework/Application/SlateApplication.h" #include "SceneOutlinerSettings.h" #include "SSceneOutliner.h" #include "SceneOutlinerActorInfoColumn.h" #include "SceneOutlinerGutter.h" #include "SceneOutlinerItemLabelColumn.h" #include "SceneOutlinerPublicTypes.h" /* FSceneOutlinerModule interface *****************************************************************************/ namespace SceneOutliner { void OnSceneOutlinerItemClicked(TSharedRef<ITreeItem> Item, FOnActorPicked OnActorPicked) { Item->Visit( FFunctionalVisitor() .Actor([&](const FActorTreeItem& ActorItem){ if (AActor* Actor = ActorItem.Actor.Get()) { OnActorPicked.ExecuteIfBound(Actor); } }) ); } } void FSceneOutlinerModule::StartupModule() { RegisterDefaultColumnType< SceneOutliner::FSceneOutlinerGutter >(SceneOutliner::FDefaultColumnInfo(SceneOutliner::FColumnInfo(SceneOutliner::EColumnVisibility::Visible, 0), ESceneOutlinerMode::ActorBrowsing)); RegisterDefaultColumnType< SceneOutliner::FItemLabelColumn >(SceneOutliner::FDefaultColumnInfo(SceneOutliner::FColumnInfo(SceneOutliner::EColumnVisibility::Visible, 10))); RegisterDefaultColumnType< SceneOutliner::FActorInfoColumn >(SceneOutliner::FDefaultColumnInfo(SceneOutliner::FColumnInfo(SceneOutliner::EColumnVisibility::Visible, 20))); } void FSceneOutlinerModule::ShutdownModule() { UnRegisterColumnType< SceneOutliner::FSceneOutlinerGutter >(); UnRegisterColumnType< SceneOutliner::FItemLabelColumn >(); UnRegisterColumnType< SceneOutliner::FActorInfoColumn >(); } TSharedRef< ISceneOutliner > FSceneOutlinerModule::CreateSceneOutliner( const SceneOutliner::FInitializationOptions& InitOptions, const FOnActorPicked& OnActorPickedDelegate ) const { auto OnItemPicked = FOnSceneOutlinerItemPicked::CreateStatic( &SceneOutliner::OnSceneOutlinerItemClicked, OnActorPickedDelegate ); return CreateSceneOutliner(InitOptions, OnItemPicked); } TSharedRef< ISceneOutliner > FSceneOutlinerModule::CreateSceneOutliner( const SceneOutliner::FInitializationOptions& InitOptions, const FOnSceneOutlinerItemPicked& OnItemPickedDelegate ) const { return SNew( SceneOutliner::SSceneOutliner, InitOptions ) .IsEnabled( FSlateApplication::Get().GetNormalExecutionAttribute() ) .OnItemPickedDelegate( OnItemPickedDelegate ); } /* Class constructors *****************************************************************************/ USceneOutlinerSettings::USceneOutlinerSettings( const FObjectInitializer& ObjectInitializer ) : Super(ObjectInitializer) { } IMPLEMENT_MODULE(FSceneOutlinerModule, SceneOutliner);