content
stringlengths
10
4.9M
<filename>modules/publix/app/services/publix/WorkerCreator.java package services.publix; import daos.common.BatchDao; import daos.common.worker.WorkerDao; import models.common.Batch; import models.common.workers.GeneralMultipleWorker; import models.common.workers.GeneralSingleWorker; import models.common.workers.MTSandboxWorker; import models.common.workers.MTWorker; import javax.inject.Inject; import javax.inject.Singleton; /** * Service class for JATOS Controllers (not Publix).. * * @author <NAME> */ @Singleton public class WorkerCreator { private final WorkerDao workerDao; private final BatchDao batchDao; @Inject WorkerCreator(WorkerDao workerDao, BatchDao batchDao) { this.workerDao = workerDao; this.batchDao = batchDao; } /** * Creates and persists a MTWorker or a MTSandboxWorker. */ public MTWorker createAndPersistMTWorker(String mtWorkerId, boolean mTurkSandbox, Batch batch) { MTWorker worker; if (mTurkSandbox) { worker = new MTSandboxWorker(mtWorkerId); } else { worker = new MTWorker(mtWorkerId); } batch.addWorker(worker); workerDao.create(worker); batchDao.update(batch); return worker; } /** * Create and persist a GeneralSingleWorker */ public GeneralSingleWorker createAndPersistGeneralSingleWorker(Batch batch) { GeneralSingleWorker worker = new GeneralSingleWorker(); batch.addWorker(worker); workerDao.create(worker); batchDao.update(batch); return worker; } /** * Create and persist a GeneralMultipleWorker */ public GeneralMultipleWorker createAndPersistGeneralMultipleWorker(Batch batch) { GeneralMultipleWorker worker = new GeneralMultipleWorker(); batch.addWorker(worker); workerDao.create(worker); batchDao.update(batch); return worker; } }
/** * Contains methods to validate user input in various JSF pages. This is a * request scoped CDI managed bean. * All methods throw a ValidatorException with a * customized message if something is wrong with the input. * */ @Named @RequestScoped public class Validator { @Inject private AcctCreation acctCreation; public Validator() { } /** * Checks if a username is already taken. This method internally calls * {@link AccountCreation#usernameAvailable(java.lang.String) } * @param context * @param component * @param value */ public void usernameAvailable(FacesContext context, UIComponent component , Object value){ String username = (String) value; // Here is an alternative method to get a managed bean from another managed // bean (instead of using CDI injection): //ELContext elContext = context.getELContext(); //AccountCreation acct = (AccountCreation) elContext.getELResolver() // .getValue(elContext,null,"acct"); if (!acctCreation.usernameAvailable(username)){ FacesMessage message = new FacesMessage("Error! This username is " + "already taken."); message.setSeverity(FacesMessage.SEVERITY_ERROR); throw new ValidatorException(message); } } /** * Checks if the passwords entered match when passwords are entered (in the * account creation use case passwords are required, but in the the account * modification use case they are not). * * @param context * @param component * @param value */ public void passwordValidate(FacesContext context, UIComponent component , Object value){ UIInput passwordField = (UIInput) component.findComponent("password"); String password = (String) passwordField.getLocalValue(); UIInput passwordReenterField = (UIInput) component.findComponent( "password2"); String passwordReenter = (String) passwordReenterField.getLocalValue(); boolean fail1 = passwordField.isValid() && password != null && !password.equals(passwordReenter); boolean fail2 = passwordReenterField.isValid() && passwordReenter != null && !passwordReenter.equals(password); if (fail1 || fail2){ FacesMessage message = new FacesMessage("Error! Passwords don't " + "match."); message.setSeverity(FacesMessage.SEVERITY_ERROR); throw new ValidatorException(message); } } /** * Checks if this is valid syntax for an email address. * @param context * @param component * @param value */ public void emailValidate(FacesContext context, UIComponent component , Object value){ String email = (String) value; if(!email.matches("^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$")){ FacesMessage message = new FacesMessage("Error! Invalid e-mail " + "format"); message.setSeverity(FacesMessage.SEVERITY_ERROR); throw new ValidatorException(message); } } /** * Checks if the credit card number entered is really a number. Since this is * a mock application, it doesn't check if it follows the correct credit * card number format, but that would be easy to add (e.g. Luhn algorithm). * @param context * @param component * @param value */ public void numberValidate(FacesContext context, UIComponent component , Object value){ String number = (String) value; if (number!=null && !number.matches("[0-9]+")){ FacesMessage message = new FacesMessage("Error! The value " + "entered as credit card number is not a number."); message.setSeverity(FacesMessage.SEVERITY_ERROR); throw new ValidatorException(message); } } /** * Checks if the credit card expiration date entered has already passed * * @param context * @param component * @param value */ public void expirationValidate(FacesContext context, UIComponent component , Object value){ UIInput yearField = (UIInput) component.findComponent("cardYear"); String yearString = (String) yearField.getLocalValue(); UIInput monthField = (UIInput) component.findComponent("cardMonth"); String monthString = (String) monthField.getLocalValue(); UIInput numberField = (UIInput) component.findComponent("cardNumber"); String numberString =(String) numberField.getLocalValue(); int year = Integer.parseInt(yearString); int month = Integer.parseInt(monthString); Calendar cardExp = Calendar.getInstance(); cardExp.clear(); cardExp.set(Calendar.YEAR, year); cardExp.set(Calendar.MONTH, month - 1); cardExp.set(Calendar.DATE, cardExp.getActualMaximum(Calendar.DATE)); Calendar now = Calendar.getInstance(); if (numberString!=null && cardExp.before(now)){ FacesMessage message = new FacesMessage("Error! This credit " + "card has already expired."); message.setSeverity(FacesMessage.SEVERITY_ERROR); throw new ValidatorException(message); } } /** * Checks if a billing address has been provided. This method can be used * to validate customer address in the store checkout page. * When checking out, the customer must provide both shipping and billing * address; if they are the same, he must provide just shipping address * and select the "same as shipping" checkbox for the billing address. * * @param context * @param component * @param value */ public void billingAddrValidate(FacesContext context, UIComponent component , Object value){ String[] billingAddrArray = {"billingStreet","billingCity", "billingState","billingCountry", "billingZIP", "billingPhone"}; boolean sameAsShipping = (Boolean) ((UIInput) component.findComponent( "sameAsShipping")).getLocalValue(); boolean billingProvided = true; for (String s: billingAddrArray){ if (((UIInput) component.findComponent( "billingPhone")).getLocalValue()==null){ billingProvided = false; } } if (!sameAsShipping && !billingProvided){ FacesMessage message = new FacesMessage("Error! No billing address" + " provided. If same as " + "shipping, select checkbox. If different, enter below."); message.setSeverity(FacesMessage.SEVERITY_ERROR); throw new ValidatorException(message); } } /** * Checks if a string entered as a book ISBN number is really a number * @param context * @param component * @param value */ public void isbnValidate(FacesContext context, UIComponent component , Object value){ if (value == null || !value.toString().matches("[0-9]+")){ FacesMessage message = new FacesMessage("Error! ISBN must be an" + " integer"); message.setSeverity(FacesMessage.SEVERITY_ERROR); throw new ValidatorException(message); } } }
// todo - implement these as you see fit. inline GLint glRenderMode(GLenum a0) { return GL_RENDER; }; // ymmv. should return previous mode inline GLenum glGetError() { return GL_NO_ERROR; }; inline GLboolean glIsList(GLuint a0) { return GL_TRUE; }; inline GLuint glGenLists(GLsizei a0) { return (GLuint)a0; }; inline const GLubyte* glGetString(GLenum a0) { return (const GLubyte *)"egl-xyzzy"; }; inline GLboolean glIsEnabled(GLenum a0) { return GL_TRUE; }; inline GLboolean glAreTexturesResident(GLsizei a0,const GLuint * a1,GLboolean * a2) { return GL_TRUE; }; inline GLboolean glIsTexture(GLuint a0) { return GL_TRUE; }; inline void glGetBooleanv(GLenum a0,GLboolean * a1) { *a1 = GL_TRUE; }; inline void glGetDoublev(GLenum a0,GLdouble * a1) { *a1 = 0.0; }; inline void glGetFloatv(GLenum a0,GLfloat * a1) { *a1 = 0.0f; }; inline void glGetIntegerv(GLenum a0,GLint * a1) { *a1 = 0; };
<filename>src/store/store.ts export const storeData = <Data = string | Record<string, unknown>>( key: string, value: Data, ) => { try { localStorage.setItem( `@storage_${key}`, typeof value === "string" ? value : JSON.stringify(value), ); } catch (e) { return null; } }; export const getData = <Data = string>( key: string, ): Data | null => { try { const jsonValue = localStorage.getItem(`@storage_${key}`); try { return jsonValue != null ? JSON.parse(jsonValue) as Data : null; } catch (error) { return jsonValue as unknown as Data; } } catch (e) { return null; } };
//FIXME: these two methods will be reworked to avoid reentrancy problems. // Currently, calling it may result in important messages being dropped. /// Prompt the user to chose a file to open. /// /// Blocks while the user picks the file. pub fn open_file_sync(&mut self, options: FileDialogOptions) -> Option<FileInfo> { let hwnd = self.get_hwnd()?; unsafe { get_file_dialog_path(hwnd, FileDialogType::Open, options) .ok() .map(|s| FileInfo { path: s.into() }) } }
#include <bits/stdc++.h> using namespace std; typedef long long int ll; #define MAX 10000007; // // // ll p[1000000000001]; // // ll power(ll x, ll y){ // // int res = 1; // // // x = x % mx; // // while (y > 0){ // // if (y & 1) // // res = (res * x) % MAX; // // y = y >> 1; // // x = (x * x) % MAX; // // } // // return res; // // } // // void pre(){ // // for (ll i = 1; i <= 1000000000001; i++){ // // for (ll j = 1; j <= sqrt(i); j ++) // // { // // if(i % j == 0) // // p[i] = (p[i] + j) % MAX; // // } // // } // // } // // vector<bool> p(3000,true); // // void sieve(){ // // // vector<int> primes; // // p[0] = p[1] = false; // // // int cnt = 0; // // ll row = 1; // // ll temp = 1; // // for (int i = 2; i * i <=(3000); i++){ // // if (p[i] == true){ // // for (int j = i * i; j <= 3000; j += i){ // // p[j] = false; // // } // // } // // } // // } // // ll arr[3000]; // // void precalc(){ // // for (int i = 1; i <= 3000; i++){ // // for (int j = 2; j <= i; j++){ // // if (p[j] == 1 && i % j == 0) // // arr[i]++; // // } // // } // // } // // void pattern(){ // // for (int i = 1; i <= 100000000; i++ ) // // { // // if () // // } // // } // // ll binomial(ll n, ll k){ // // ll c[n+1][k+1]; // // ll i,j; // // for (i = 0; i <= n; i++){ // // for (j = 0; j <= min(i,k); j++){ // // if (j == 0 || j == i){ // // c[i][j] = 1; // // } // // else // // { // // c[i][j] = (c[i-1][j-1] + c[i-1][j]) % MAX; // // } // // } // // } // // return c[n][k]; // // } // // ll gcd(ll a, ll b, ll &x, ll &y) { // // if (b == 0) { // // x = 1; // // y = 0; // // return a; // // } // // ll x1, y1; // // ll d = gcd(b, a % b, x1, y1); // // x = y1; // // y = x1 - y1 * (a / b); // // return d; // // } // // vector<int> arr(1000000000,0); // // void fib(){ // // arr[0] = 0; arr[1] = 1; // // for (int i = 2; i <= 1000000000; i++){ // // arr[i] = (arr[i-1] + arr[i-2]) % MAX; // // } // // } // //deterministic version to check prime // // typedef int64_t u64; // // typedef __uint128_t u128; // // u64 binpower(u64 base, u64 e, u64 mod) { // // u64 result = 1; // // base %= mod; // // while (e) { // // if (e & 1) // // result = (u128)result * base % mod; // // base = (u128)base * base % mod; // // e >>= 1; // // } // // return result; // // } // // bool check_composite(u64 n, u64 a, u64 d, int s) { // // u64 x = binpower(a, d, n); // // if (x == 1 || x == n - 1) // // return false; // // for (int r = 1; r < s; r++) { // // x = (u128)x * x % n; // // if (x == n - 1) // // return false; // // } // // return true; // // }; // // bool MillerRabin(int64_t n) { // returns true if n is prime, else returns false. // // if (n < 2) // // return false; // // int r = 0; // // int64_t d = n - 1; // // while ((d & 1) == 0) { // // d >>= 1; // // r++; // // } // // for (int a : {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37}) { // // if (n == a) // // return true; // // if (check_composite(n, a, d, r)) // // return false; // // } // // return true; // // } // // wheeler's factorization method // // map <long long int,long long int> v; // // void factors(long long int n){ // // while (n % 2 == 0){ // // v[2]++; // // n /= 2; // // } // // for (long long int i = 3; i <= sqrt(n); i += 2){ // // while (n % i == 0) // // { // // v[i]++; // // n /= i; // // } // // } // // if (n > 1) v[n]++; // // // return v; // // } // //euler totient function to calculate phi which is the no. of positive integer from [1...n] which is coprime to n; // // vector<int> phi(100000000 + 1); // // void phi_1_to_n(int n) { // // phi[0] = 0; // // phi[1] = 1; // // for (int i = 2; i <= n; i++) // // phi[i] = i - 1; // // for (int i = 2; i <= n; i++) // // for (int j = 2 * i; j <= n; j += i) // // phi[j] -= phi[i]; // // } // // int phi(int n) { // // int result = n; // // for (int i = 2; i * i < n; i++) { // // if (n % i == 0) { // // while (n % i == 0) // // n /= i; // // result -= result / i; // // } // // } // // if (n > 1) // // result -= result / n; // // return result; // // } int main() { int n,t; cin >> n >> t; string s; cin >> s; while (t--){ for (int i = 0; i < s.length();){ if (s[i] == 'B' && s[i+1] == 'G') { swap(s[i],s[i+1]); i += 2; } else { i++; } } } cout << s; }
<gh_stars>0 // iterator pattern interface IIterator { next(): any hasNext(): any } interface ICounter { getIterator(): IIterator } class Counter implements ICounter { collection: any; constructor(data: any) { this.collection = data; } getIterator() { return new CounterIterator(this.collection) } } class CounterIterator implements IIterator { current: number; collection: any; constructor(data: any) { this.collection = data; this.current = 0; } next() { return this.collection[this.current++] } prev() { return this.collection[this.current - 1] } hasNext() { return this.collection.length > this.current } } let iterator = new Counter([1, 2, 3, 4, 5]).getIterator(); while (iterator.hasNext()) { console.log(iterator.next()); }
package testdata import ( "errors" . "github.com/dsphub/go-simple-crud-sample/model" ) type StubFailedPostStore struct{} func (s *StubFailedPostStore) Connect() error { return errors.New("failed to ping db") } func (s *StubFailedPostStore) Disconnect() error { return errors.New("failed to close db") } func (s *StubFailedPostStore) GetAllPosts() ([]Post, error) { return []Post{}, ErrorPostsAreNotFound } func (s *StubFailedPostStore) GetPostByID(id int) (Post, error) { return Post{}, ErrorPostDoesNotExist } func (s *StubFailedPostStore) CreatePost(title, text string) error { return ErrorPostIsNotCreated } func (s *StubFailedPostStore) UpdatePost(id int, title, text string) error { return ErrorPostDoesNotExist } func (s *StubFailedPostStore) DeletePost(id int) error { return ErrorPostDoesNotExist }
#include "CTcpConnection.h"
import tkinter as tk import tkinter.filedialog import tkinter.messagebox import tkinter as tk from tkinter import messagebox from tkinter import filedialog import os from PIL import Image from PIL import ImageTk # binding_not_defterime uyarlama class GUI: def __init__(self, master): master.geometry('800x600') master.title('DOGAN PYTHON & TEXT EDITOR ') master.resizable(width=False, height=False) self.master = master # self.check_button_var = tk.IntVar() self.radio_button_var = tk.StringVar() self.radio_button_var.set('Arial 12') # menu nesnemi olusturuyorum self.menu_bar = tk.Menu(master) self.file_popup = tk.Menu(tearoff=0) self.menu_bar.add_cascade(label='File', menu=self.file_popup, font='Arial 12', underline=0) self.file_popup.add_command(label='Open...', command=self.file_open_handler, font='Arial 10 bold', underline=1, accelerator='Ctrl+O') master.bind('<Control-o>', self.file_open_handler) self.file_popup.add_command(label='Save As...', command=self.file_saveas_handler, font='Arial 10 bold', underline=0, accelerator='Ctrl+S') master.bind('<Control-s>', self.file_saveas_handler) self.file_popup.add_command(label='Close', command=self.file_close_handler, font='Arial 10', underline=2, foreground='blue', state=tk.DISABLED) # self.file_popup.add_checkbutton(label='Check Button', variable=self.check_button_var, # command=self.file_checkbutton_handler) self.file_popup.add_separator() self.file_popup.add_command(label='Exit', command=self.master.quit, font='Arial 10', underline=1, accelerator='Ctrl+E') self.edit_popup = tk.Menu(tearoff=0) self.edit_popup.add_command(label='Cut', underline=0, command=self.edit_cut_handler, accelerator='Ctrl+X') self.edit_popup.add_command(label='Copy', underline=1, command=self.edit_copy_handler, accelerator='Ctrl+C') self.edit_popup.add_command(label='Paste', underline=0, command=self.edit_paste_handler, accelerator='Ctrl+V') self.edit_popup.add_separator() self.edit_font_popup = tk.Menu(tearoff=0) self.edit_popup.add_cascade(label='Font', menu=self.edit_font_popup) self.edit_font_popup.add_radiobutton(label='Arial 12', command=self.edit_font_handler, value='Arial 12', variable=self.radio_button_var) self.edit_font_popup.add_radiobutton(label='Consolas 12', command=self.edit_font_handler, value='Consolas 12', variable=self.radio_button_var) self.edit_font_popup.add_radiobutton(label='Verdana 12', command=self.edit_font_handler, value='Verdana 12', variable=self.radio_button_var) # edit kısmı self.menu_bar.add_cascade(label='Edit', menu=self.edit_popup, underline=0) master.config(menu=self.menu_bar) self.text = tk.Text(root, font='Consolas 14', bg='light green') self.text.place(x=0, y=64, width=800, height=600) # toolbar nesnemi olusturuyorum self.toolbar = tk.Frame(master) self.toolbar.place(x=0, y=0, width=800, height=64) img = tk.PhotoImage(file='open.png') self.toolbar_button_open = tk.Button(self.toolbar, command=self.file_open_handler, image=img, padx=0, pady=0) self.toolbar_button_open.image = img self.toolbar_button_open.place(x=0, y=0, width=64, height=64) img = tk.PhotoImage(file='save.png') self.toolbar_button_saveas = tk.Button(self.toolbar, command=self.file_saveas_handler, image=img, padx=0, pady=0) self.toolbar_button_saveas.image = img self.toolbar_button_saveas.place(x=64, y=0, width=64, height=64) img = tk.PhotoImage(file='close.png') self.toolbar_button_close = tk.Button(self.toolbar, command=self.file_close_handler, image=img, padx=0, pady=0, state=tk.DISABLED) self.toolbar_button_close.image = img self.toolbar_button_close.place(x=128, y=0, width=64, height=64) img = tk.PhotoImage(file='exit.png') self.toolbar_button_exit = tk.Button(self.toolbar, command=self.master.quit, image=img, padx=0, pady=0) self.toolbar_button_exit.image = img self.toolbar_button_exit.place(x=192, y=0, width=64, height=64) img = tk.PhotoImage(file='cut.png') self.toolbar_button_cut = tk.Button(self.toolbar, command=self.edit_cut_handler, image=img, padx=0, pady=0) self.toolbar_button_cut.image = img self.toolbar_button_cut.place(x=300, y=0, width=64, height=64) # Ctrl+x tuşuna basıldıgında kes tetikleniyor master.bind('<Control-x>', self.edit_cut_handler) img = tk.PhotoImage(file='copy.png') self.toolbar_button_copy = tk.Button(self.toolbar, command=self.edit_copy_handler, image=img, padx=0, pady=0) self.toolbar_button_copy.image = img self.toolbar_button_copy.place(x=364, y=0, width=64, height=64) # Ctrl+c tuşuna basıldıgında kopyalama tetikleniyor master.bind('<Control-c>', self.edit_copy_handler) img = tk.PhotoImage(file='paste.png') self.toolbar_button_paste = tk.Button(self.toolbar, command=self.edit_paste_handler, image=img, padx=0, pady=0) self.toolbar_button_paste.image = img self.toolbar_button_paste.place(x=428, y=0, width=64, height=64) # Ctrl+V tuşuna basıldıgında yapıstırma tetikleniyor master.bind('<Control-v>', self.edit_paste_handler) # my logo adding self.my_img=ImageTk.PhotoImage(Image.open("kartal.png")) self.my_label= tk.Label(image=self.my_img) self.my_label.place(x=700, y=0, width=64, height=64) # baglam menusu farenin sag tusuna basıldıgında cıkıyor menu nesnesini alıyorum self.text_context_menu = tk.Menu(master, tearoff=0) self.text_context_menu.add_command(label='Cut', command=self.edit_cut_handler) self.text_context_menu.add_command(label='Copy', command=self.edit_copy_handler) self.text_context_menu.add_command(label='Paste', command=self.edit_paste_handler) # frame icinde sag tusu basıldıgında tetiklenen fonksiyonum self.text.bind('<Button-3>', self.text_mouse_right_press_handler) self.toolbar_context_menu = tk.Menu(master, tearoff=0) self.toolbar_context_menu.add_command(label='Add') self.toolbar_context_menu.add_command(label='Remove') self.toolbar_context_menu.add_command(label='Adjust') # sag tusa basldıgında contex menun cagrılacagını soyleyen handler bu kısım toolbar icinde calısıyor self.toolbar.bind('<Button-3>', self.toolbar_mouse_right_press_handler) def file_open_handler(self, *args): try: path = tk.filedialog.askopenfilename(title='Dosya Seçimi', filetypes=[('Python Files', '*.py'), ('Text Files', '*.txt')]) if path != '': with open(path) as f: self.text.delete('1.0', 'end') self.text.insert('1.0', f.read()) self.file_popup.entryconfig(2, state=tk.NORMAL) self.file_popup.entryconfig(0, state=tk.DISABLED) self.toolbar_button_open.config(state=tk.DISABLED) self.toolbar_button_close.config(state=tk.NORMAL) except Exception as e: tk.messagebox.showerror(title='Error', message=str(e)) # handler icerisinde *args ile istenilen kadar parametre gonderilebilir def file_saveas_handler(self, *args): try: path = tk.filedialog.asksaveasfilename(title='Dosya Seçimi', filetypes=[('Python Files', '*.py'), ('Text Files', '*.txt')]) if path != '': print(path) with open(path, 'w') as f: s = self.text.get('1.0', 'end') f.write(s) except Exception as e: tk.messagebox.showerror(title='Error', message=str(e)) def file_close_handler(self, *args): self.text.delete('1.0', 'end') self.file_popup.entryconfig(0, state=tk.NORMAL) self.file_popup.entryconfig(2, state=tk.DISABLED) self.toolbar_button_open.config(state=tk.NORMAL) self.toolbar_button_close.config(state=tk.DISABLED) def edit_cut_handler(self, *args): print('Cut') def edit_copy_handler(self, *args): print('Copy') def edit_paste_handler(self, *args): print('Paste') def edit_font_handler(self): self.text['font'] = self.radio_button_var.get() # def file_checkbutton_handler(self): # print('Checked' if self.check_button_var.get() else 'Unchecked') # farenin tuslarına basılınca yapılanlar; def text_mouse_right_press_handler(self, event): self.text_context_menu.post(event.x_root, event.y_root) return 'break' def toolbar_mouse_right_press_handler(self, event): self.toolbar_context_menu.post(event.x_root, event.y_root) return 'break' root = tk.Tk() gdb = GUI(root) root.mainloop()
// parseIPv4s returns a slice of int64 IP addresses. func parseIPv4s(ips []string) ([]int64, error) { ipv4s := make([]int64, len(ips)) for i, ip := range ips { ipv4, err := common.ParseIPv4(ip) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "invalid IPv4 address %q", ip) } ipv4s[i] = int64(ipv4) } return ipv4s, nil }
The late Steve Jobs may be as venerated within the company he founded as he is outside it. But that doesn't mean his successor is suffering by comparison. In fact, according to anonymous employee posts on the employer reviews website Glassdoor, Cook has the highest approval rating of any CEO in tech — indeed, any CEO in the U.S. — a whopping 97%. That's a small but significant step above Jobs, who garnered 95% approval during his final year in the CEO role. (Cook's year includes the months he was officially filling in for Jobs while the founder was on medical leave.) But for that extra 2%, Cook would not be the sole winner of Glassdoor's list of tech CEOs. He'd be level-pegging with Qualcomm CEO Paul Jacobs, who has had a stunning year as far as his employees are concerned, rising from 87% approval to 95%. Meanwhile, Google CEO Larry Page isn't faring so well — he's seen a 2% decline in employee satisfaction compared to his predecessor, Eric Schmidt. Given that Page has been on a tear killing projects in order to focus on a few core products, it's perhaps surprising that his number is still as high as 94%. As for Cook, there are any number of reasons why his popularity is so high. Apple shares are soaring past $600, a 50% premium since Jobs' departure. He's set up a matching program for charitable donations, something Jobs famously refused to do. And he's dealt expertly with threats to the Apple image — most recently making an impromptu visit to the Foxconn factories in China. "The products speak for themselves and the company," writes one anonymous Apple engineer on Glassdoor. "We have the best management team anywhere," says another Cupertino reviewer. Perhaps the real question should be: What's the deal with the 3% of Apple employees who don't like Tim Cook?
def ned2lists(fname): channels = [] capacities = [] p = re.compile(r'\s+node(\d+).port\[(\d+)\]\s+<-->\s+Channel(\d+)kbps\s+<-->\s+node(\d+).port\[(\d+)\]') with open(fname) as fobj: for line in fobj: m=p.match(line) if m: matches = list(map(int,m.groups())) capacities.append(matches[2]) del matches[2] channels.append(matches) n=max(map(max, channels))+1 connections = [{} for i in range(n)] for c in channels: connections[c[0]][c[1]]=c[2] connections[c[2]][c[3]]=c[0] connections = [[v for k,v in sorted(con.items())] for con in connections ] edges = [(c[0],c[2]) for c in channels] + [(c[2],c[0]) for c in channels] capacities = capacities + capacities return connections,n,edges,capacities
<filename>third_party/blink/renderer/core/layout/custom/custom_layout_fragment.cc // Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/layout/custom/custom_layout_fragment.h" #include "third_party/blink/renderer/core/layout/custom/custom_layout_fragment_request.h" namespace blink { CustomLayoutFragment::CustomLayoutFragment( CustomLayoutFragmentRequest* fragment_request, const LayoutUnit inline_size, const LayoutUnit block_size) : fragment_request_(fragment_request), inline_size_(inline_size.ToDouble()), block_size_(block_size.ToDouble()) {} LayoutBox* CustomLayoutFragment::GetLayoutBox() const { return fragment_request_->GetLayoutBox(); } bool CustomLayoutFragment::IsValid() const { return fragment_request_->IsValid(); } void CustomLayoutFragment::Trace(blink::Visitor* visitor) { visitor->Trace(fragment_request_); ScriptWrappable::Trace(visitor); } } // namespace blink
<reponame>swinner2/algomart<gh_stars>0 import { PacksService } from '@algomart/shared/services' import { DependencyResolver } from '@algomart/shared/utils' import { logger } from '@api/configuration/logger' import { Model } from 'objection' export default async function handlePackAuctionCompletionTask( registry: DependencyResolver ) { const log = logger.child({ task: 'handle-pack-auction-completion' }) const packs = registry.get<PacksService>(PacksService.name) const trx = await Model.startTransaction() try { const result = await packs.handlePackAuctionCompletion(trx) log.info('handled %d completed pack auctions', result) await trx.commit() } catch (error) { await trx.rollback() log.error(error as Error, 'failed to handle completed pack auctions') } }
// Start registers API Routes and starts Server on the specified port func (s *Server) Start() error { s.log.Infof("Listening for admission reviews on %s", s.srv.Addr) s.rtr.HandleFunc("/v1/ping", s.debugHandler).Methods("GET") s.rtr.HandleFunc("/v1/api/admission/review", s.admissionHandler).Methods("POST") go func() { if err := s.srv.ListenAndServeTLS(s.certPath, s.keyPath); err != nil { s.log.Errorf("Failed to start REST Server. Err: %v", err) } else { s.log.Info("REST Server running!") } }() return nil }
Compositions for coatings for packing products and methods of coating application FIELD: chemistry. SUBSTANCE: invention relates to composition for coating of different substrates, including metal substrates of packing products, for example, containers for food products or drinks, and method of obtaining such packing products. Product contains: metal substrate of container for food products or drinks or its parts and coating, applied on, at least, part of metal substrate surface, with coating containing: film-forming quantity of polyether polymer, which has one or more segments of the following formula I: -O-Ar-(R n -Ar) n -O-, where each Ar independently represents phenylene group, each n independently stands for 0 or 1, R, if present, represents bivalent organic group, which has molecular weight lower than 500 and two atoms of oxygen, each represents ether oxygen, with temperature of glass transition (Tg) constitutes at least 70°C, with composition for coating is, at least, in fact, free from bisphenol A and diglycidyl ether of bisphenol A, and polyether polymer is product of reaction ingredients, which include polyepoxide and polyatomic phenol. EFFECT: invention makes it possible to obtain coating with stability against albication, corrosion, stability to etching, and high adhesion to metal substrate. 24 cl, 4 tbl
class ExtendDefinitionType: """The ExtendDefinitionType complex type identifies a specific definition that has been extended by the criteria. The optional applicability_check attribute provides a Boolean flag that when true indicates that the extend_definition is being used to determine whether the OVAL Definition applies to a given system. The required definition_ref attribute is the actual id of the extended definition. The required version attribute is the specific version of the OVAL Definition used during analysis. The optional variable_instance attribute is a unique id that differentiates each unique instance of a definition. Capabilities that use OVAL may reference the same definition multiple times and provide different variable values each time the definition is referenced. This will result in multiple instances of a definition being included in the OVAL Results document (definitions that do not use variables can only have one unique instance). The inclusion of this unique instance identifier allows the OVAL Results document to associate the correct objects and items for each combination of supplied values. The optional negate attribute signifies that the result of an extended definition should be negated during analysis. For example, consider a definition that evaluates TRUE if certain software is installed. By negating the definition, it now evaluates to TRUE if the software is NOT installed. The required result attribute holds the result of the evaluation. Please refer to the description of the ResultEnumeration for details about the different result values. """ applicability_check: Optional[bool] = field( default=None, metadata={ "type": "Attribute", } ) definition_ref: Optional[str] = field( default=None, metadata={ "type": "Attribute", "required": True, "pattern": r"oval:[A-Za-z0-9_\-\.]+:def:[1-9][0-9]*", } ) version: Optional[int] = field( default=None, metadata={ "type": "Attribute", "required": True, } ) variable_instance: int = field( default=1, metadata={ "type": "Attribute", } ) negate: bool = field( default=False, metadata={ "type": "Attribute", } ) result: Optional[ResultEnumeration] = field( default=None, metadata={ "type": "Attribute", "required": True, } )
<gh_stars>1-10 package clash import ( "encoding/json" "github.com/ghodss/yaml" "github.com/gin-gonic/gin" mid "github.com/uouuou/ServerManagerSystem/middleware" "net/http" "os" ) // GetClashInfo 读取clash的一些设置给前端 func GetClashInfo(c *gin.Context) { var rawConfig RawConfig config, err := os.ReadFile(mid.Dir + "/config/configClash.yaml") if err != nil { mid.Log.Error(err.Error()) } toJSON, err := yaml.YAMLToJSON(config) if err != nil { mid.Log.Error(err.Error()) } err = json.Unmarshal(toJSON, &rawConfig) if err != nil { mid.Log.Error(err.Error()) } resultBody := mid.ResultBody{ Code: 2000, Data: rawConfig, Message: "查询成功", } c.JSON(http.StatusOK, resultBody) }
Soilless systems as an alternative to wild strawberry (Fragaria vesca L.) traditional open-field cultivation in marginal lands of the Tuscan Apennines to enhance crop yield and producers’ income ABSTRACT Yield and quality of wild strawberry (Fragaria vesca L.) cultivars ‘Regina delle Valli’ and ‘Alpine’ cultivated in soil, as the traditional production system, and in soilless (both in open air and protected environments), as an innovative and sustainable production system, were investigated in a marginal and inner area of the Pistoiese Apennine Mountains during two production seasons. An earlier marketable production was obtained in soilless culture, but total marketable yield was higher from plants grown in a traditional open-field environment. Fruits obtained from the protected soilless system were larger, but developed some mildew, had some visual defects, and revealed a reduced skin chroma index, flesh firmness, and total soluble solids content compared to fruits harvested from plants grown under direct sunlight, while no significant differences were observed in total titratable acidity and pH between growing systems. ‘Alpine’ was more suited to soilless protected cultivation, with a much higher yield compared to ‘Regina delle Valli’. Physicochemical properties of berries were not affected by the cultivar, but fruit quality changed with plant age and seasonal crop cycle. ‘Alpine’ fruits gained a greater sensorial preference, both in traditional soil and soilless culture.
async def checkLevel(cls:"PhaazebotDiscord", Message:discord.Message, ServerSettings:DiscordServerSettings) -> None: result:list = await getDiscordServerUsers(cls, Message.guild.id, member_id=Message.author.id) if not result: LevelUser:DiscordUserStats = await newUser(cls, Message.guild.id, Message.author.id, username=Message.author.name, nickname=Message.author.nick) else: LevelUser:DiscordUserStats = result.pop(0) if str(Message.channel.id) in ServerSettings.disabled_levelchannels: return if ServerSettings.owner_disable_level: return LevelUser.exp += 1 cls.BASE.PhaazeDB.query(""" UPDATE `discord_user` SET `exp` = `exp` + 1, `username` = %s, `nickname` = %s WHERE `discord_user`.`guild_id` = %s AND `discord_user`.`member_id` = %s""", ( Message.author.name, Message.author.nick, str(LevelUser.server_id), str(LevelUser.member_id) ) ) await checkLevelProgress(cls, Message, LevelUser, ServerSettings)
// BuildTrackerStorage builds TrackerStorage for abstraction func BuildTrackerStorage() (TrackerStorage, error) { var ts TrackerStorage switch config.Tracker.Strategy { case "mysql": mts, err := BuildMySQLTrackerStorage() if err != nil { return nil, err } ts = mts default: return nil, errors.New("not supported data source") } if err := ts.Prepare(); err != nil { return nil, err } return ts, nil }
Ding dong over Cameron's 'British' ping pong gift to Obama that was made in China It was meant as a thoughtful gift, fondly recalling the moment when David Cameron and Barack Obama teamed up for a spot of table tennis doubles last year. But the ping-pong table given to the President as a proud example of British manufacturing this week was, in fact, made in China. The Dunlop table handed over during Mr Cameron's official visit to the U.S. has been custom finished with a Union flag at one end, and the Stars and Stripes at the other. It comes complete with bats bearing the two nations' flags and is painted with the legend 'Presented to President Obama by Prime Minister David Cameron, March 2012'. Souvenir: The customised Dunlop table tennis table given to the Obamas by the Camerons as a reminder of a game the two leaders played on the President's visit to the UK last year Doubles: President Obama and David Cameron play table tennis with students during a visit to the Globe Academy in London last May A statement from Number 10 reads: 'The gift was selected as something that the whole Obama family could enjoy and to commemorate President Obama’s state visit to the UK last May, when the President and Prime Minister teamed up to play pupils at table tennis at the Globe Academy school in south London.' 'Founded in 1886, Dunlop is a truly British company which operates in over 70 countries. 'It is the most successful sports brand of the 20th century and a fitting gift for the occasion as we approach the London 2012 Olympics.' But, according to the Sunday Mirror, though the £800 EVO 8000 was designed and finished in Britain, sports retailers confirm it rolled off the factory line in China. RELATED ARTICLES Previous 1 Next Even the President is Irish for a day! Barack O'Bama joins... Share this article Share Meanwhile, the blue silk scarf given to Michelle Obama may be steeped in British tradition, with Number 10 saying the pattern was'inspired by elegant Victorian wallpapers' - but it was actually made in Italy. Glasgow's Jonathan Saunders designed the scarf, which goes for £289 at Harrods. Mr Obama made sure the Camerons will take home a little taste of America, presenting them with a custom-designed speciality barbeque grill that is both steeped in symbolism and kitted out with capabilities that a gourmet chef would appreciate. He chose the gift as a way to commemorate the time that he and Mr Cameron spent grilling together on his visit to Downing Street last year, where the two men served up burgers, sausages, and Kentish lamb chops. Fully equipped: President Obama presented Prime Minister Cameron with a Engelbrecht Grills & Cookers grill, from the company's 1000 Braten series, prices of which start at $1,895 for the most basic model On that visit last May, the two men stood behind table-top grills and served 150 military servicemen and women who attended the casual affair. The new barbeque makes their prior grilling equipment look puny by comparison. It was made by a company made in Mr Obama's home state of Illinois called Engelbrecht Grills & Cookers. This particular grill comes from the company's 1000 Braten series, prices of which start at $1,895 (£1,196) for the most basic model, not to mention the custom additions that were added to Mr Cameron's. Symbolising the two countries' 'special relationship', the grill has a plaque engraved with 'friendship flags' on the utility shelf. In addition to the fact that the company is based in Illinois and has a charming story - it was started by a welder in his garage - the company is also known for its dedication to environmentally friendly practices, according to the White House press statement. Time amongst friends: Compared to the grills the heads of state used last year, Mr Obama's gift is much more intense The Engelbrecht website declares that Braten grills cater to a chef's primal desires. 'We all love our food cooked over a wood fire (it's our caveman instincts) - the problem is most grills can't take the head of burning wood,' the website reads. That isn't a problem for the Braten, however, as it can sustain heat of up to 1000 degrees. 'This would be brutal for most grills, as these temperatures can deform or melt steel,' Englebrecht says. Though the grill is not the one that average Americans use to fry up dinner for the family, the cost was not a burden on them either as the company donated the grill to the State Department. Also involved: Shown here at the London event in 2011, Mrs Obama and Mrs Cameron also exchanged gifts this year. In keeping with the culinary theme, Mrs Obama gave Mrs Cameron a pot of honey To make sure that Mr and Mrs Cameron are not outdone by the grill itself, the President also included custom chef jackets with their names embroidered on each along with the seal of the President. In keeping with the culinary tone of their gifts, Mrs Obama gave Mrs Cameron a crystal jar of honey collected from the White House beehives and had it engraved with 'White House Honey'. Rather than give the three Cameron children a sample of American candy - as that would not fit with Mrs Obama's healthy eating platform - they were each given customised bean bag chairs which have the seal of the President and their names stitched on. As for the Obama children, Malia and Sasha, they made out with a set of classic Penguin books, and though the titles were not announced, they are said to be 'British classics suitable for their age and in customised sleeves'.
import Controller from '@ember/controller'; import { action } from '@ember/object'; import { tracked } from '@glimmer/tracking'; import { inject as service } from '@ember/service'; import Store from '@ember-data/store'; import RouterService from '@ember/routing/router-service'; // @ts-ignore import hljs from 'highlight.js/lib/core'; import { FilterFormErrors } from 'frontend-lokaalbeslist/components/subscription-filter/filter'; enum Frequency { Dagelijks = 'dagelijks', Wekelijks = 'wekelijks', Maandelijks = 'maandelijks', } export default class SubscribeController extends Controller { @service declare store: Store; @service declare router: RouterService; queryParams = ['search', 'governanceAreas']; @tracked search: string = ""; @tracked governanceAreas: string[] = []; @tracked frequency: Frequency = Frequency.Wekelijks; @tracked errors: FilterFormErrors | undefined; constructor() { super(); } get frequencies() { return Object.keys(Frequency).filter((x) => isNaN(Number(x))); } @action changeFrequency(value: Frequency): void { this.frequency = value; } @action updateEmail(event: Event) { this.model.email = (event.target as HTMLInputElement).value; } @action setErrors(errors: FilterFormErrors) { this.errors = errors; } }
def testMknodeUmaskApplied(self): self.os.umask(0o22) self.os.mknod('nod1') self.assertModeEqual(0o644, self.os.stat('nod1').st_mode) self.os.umask(0o27) self.os.mknod('nod2') self.assertModeEqual(0o640, self.os.stat('nod2').st_mode)
By By Amanda Tennis Sep 6, 2010 in Crime New York - A bride faked having cancer in order to receive wedding donations from family and strangers. All the wedding expenses were paid for including the honeymoon and bridal dress. According to The couple, both 23, were featured in an April 26 Times Herald-Record article about strangers who had come together to pay for their dream wedding while Vega was still well enough to walk down the aisle. Vega’s husband, Michael O’Connell, has claimed that Vega’s doctor sent a letter to the couple saying that her condition was fake. O’Connell called the doctor’s office and discovered that his wife was never a patient in that office. Vega denies lying about her cancer diagnosis. In Toronto, Canada, last month, a young woman Jessica Vega, a New York bride, was accused by her husband to have faked terminal leukemia to receive wedding donations. She received money from strangers and family members that paid for everything from the bridal gown to the honeymoon.According to Fox News Vega’s husband, Michael O’Connell, has claimed that Vega’s doctor sent a letter to the couple saying that her condition was fake. O’Connell called the doctor’s office and discovered that his wife was never a patient in that office.Vega denies lying about her cancer diagnosis.In Toronto, Canada, last month, a young woman defrauded $20,000 from friends and locals when she told them she had cancer. She later said she was faking the entire time, earning herself an arrest and three charges of fraud. More about Crime, Cancer, Fake, Wedding More news from crime cancer fake wedding
""" this example shows how to add_grating couplers for single fiber in single fiber out (no fiber array) """ import pp from pp.routing import route_fiber_single from pp.add_labels import get_optical_text def add_grating_couplers( component, get_route_factory=route_fiber_single, optical_io_spacing=50, min_input2output_spacing=200, optical_routing_type=2, bend_factory=pp.c.bend_circular, grating_coupler=pp.c.grating_coupler_te, straight_factory=pp.c.waveguide, with_align_ports=True, layer_label=pp.LAYER.LABEL, ): """ returns component with grating ports and labels on each port can add align_ports reference structure """ component = pp.call_if_func(component) grating_coupler = pp.call_if_func(grating_coupler) c = pp.routing.add_fiber_array( component, optical_io_spacing=optical_io_spacing, bend_factory=bend_factory, straight_factory=straight_factory, grating_coupler=grating_coupler, get_route_factory=get_route_factory, optical_routing_type=optical_routing_type, min_input2output_spacing=min_input2output_spacing, ) if with_align_ports: gc_port_name = list(grating_coupler.ports.keys())[0] gci = c << grating_coupler gco = c << grating_coupler length = c.ysize - 2 * grating_coupler.xsize wg = c << straight_factory(length=length) wg.rotate(90) wg.xmin = c.xmax + optical_io_spacing - grating_coupler.ysize / 2 wg.ymin = c.ymin + grating_coupler.xsize gci.connect(gc_port_name, wg.ports["W0"]) gco.connect(gc_port_name, wg.ports["E0"]) port = wg.ports["E0"] label = get_optical_text( port, grating_coupler, 0, component_name=f"loopback_{component.name}" ) c.add_label(label, position=port.midpoint, layer=layer_label) port = wg.ports["W0"] label = get_optical_text( port, grating_coupler, 1, component_name=f"loopback_{component.name}" ) c.add_label(label, position=port.midpoint, layer=layer_label) return c def test_add_grating_couplers( c=pp.c.ring_single_bus(gap=0.3, bend_radius=5, wg_width=0.45) ): cc = add_grating_couplers(c) assert cc return cc if __name__ == "__main__": c = pp.c.ring_single_bus(gap=0.3, bend_radius=5, wg_width=0.45) cc = test_add_grating_couplers(c) pp.show(cc)
The Effects of International F/X Markets on Domestic Currencies Using Wavelet Networks: Evidence from Emerging Markets This paper proposes a powerful methodology wavelet networks to investigate the effects of international F/X markets on emerging markets currencies. We used EUR/USD parity as input indicator (international F/X markets) and three emerging markets currencies as Brazilian Real, Turkish Lira and Russian Ruble as output indicator (emerging markets currency). We test if the effects of international F/X markets change across different timescale. Using wavelet networks, we showed that the effects of international F/X markets increase with higher timescale. This evidence shows that the causality of international F/X markets on emerging markets should be tested based on 64-128 days effect. We also find that the effects of EUR/USD parity on Turkish Lira is higher on 17-32 days and 65-128 days scales and this evidence shows that Turkish lira is less stable compare to other emerging markets currencies as international F/X markets effects Turkish lira on shorten time scale.
<filename>src/usecases/repositories/User/IUserRepository.ts<gh_stars>0 import UserModel from "entities/User/User.model"; import { User } from "../../../infra/typeorm/entities/User/User.entity"; export default interface IUserRepository { create(user: UserModel): Promise<User>; find(): Promise<User[]>; findById(id: string): Promise<User>; generateInstance(user: UserModel): Promise<User>; }
#include<stdio.h> int main() { int a[5][5]; int i,j,count=0,r,c; for(i=0;i<5;i++) for(j=0;j<5;j++) { scanf("%d",&a[i][j]); if(a[i][j]==1) { r=i; c=j; } } if(r==0||r==4) count=2; else if(r==1||r==3) count=1; else count=0; if(c==0||c==4) count=count+2; else if(c==1||c==3) count=count+1; else count=count+0; printf("%d",count); return 0; }
POLITICO Screen grab Laura Ingraham says she’d be ‘honored’ to be Trump’s press secretary Conservative talk radio host Laura Ingraham said she is “honored” to be under consideration to be the press secretary for President-elect Donald Trump’s presidential administration when it begins next year, she told Fox News Monday night. "We’ll see what happens," she said, adding that "I think people are getting a little far ahead of the narrative." Ingraham has been a longtime supporter of Trump’s, boosting his candidacy on her radio show and during TV appearances. She also helped Trump prepare for the presidential debates against Hillary Clinton. "It's a big decision, but I'm at the point where, if my country needs me, and if I can do something to actually, you know, advance the Trump agenda, which is stuff I have written about now for 15 years, with trade, immigration and just renewing America, then I obviously have to seriously consider that,” she said on Fox News. Press secretary is just one of an avalanche of jobs Trump must fill before he takes office in January. Thus far, he has appointed only his chief of staff, Republican National Committee Chairman Reince Priebus, and his chief strategist, former Breitbart executive Steve Bannon. Other high-profile names reported to be under consideration for posts in Trump’s White House include former New York City Mayor Rudy Giuliani, Sen. Jeff Sessions (R-Ala.), retired neurosurgeon Ben Carson and former House Speaker Newt Gingrich.
// Head returns the metadata of available entries. func (l *mySQLLog) Head(ctx context.Context) ([]byte, int64, error) { size, cp, _, err := l.db.GetLatestCheckpoint(ctx) return cp, int64(size), err }
import hashlib from re import I import sys from functools import wraps from time import time from tqdm import tqdm from einops import rearrange import numpy as np import pandas as pd import torch import torch.nn.functional as F from torch.cuda.amp import autocast from advbench.datasets import FFCV_AVAILABLE from sklearn.metrics import balanced_accuracy_score def timing(f): @wraps(f) def wrap(*args, **kw): ts = time() result = f(*args, **kw) te = time() print(f'func:{f.__name__} took: {te-ts:.3f} sec') return result return wrap def seed_hash(*args): """Derive an integer hash from all args, for use as a random seed.""" args_str = str(args) return int(hashlib.md5(args_str.encode("utf-8")).hexdigest(), 16) % (2**31) def print_full_df(df): with pd.option_context('display.max_rows', None, 'display.max_columns', None): print(df) @torch.no_grad() def accuracy(algorithm, loader, device): correct, total = 0, 0 algorithm.eval() algorithm.export() for imgs, labels in tqdm(loader): imgs, labels = imgs.to(device), labels.to(device) if FFCV_AVAILABLE: with autocast(): output = algorithm.predict(imgs) else: output = algorithm.predict(imgs) pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(labels.view_as(pred)).sum().item() total += imgs.size(0) algorithm.train() algorithm.unexport() return 100. * correct / total @torch.no_grad() def accuracy_mean_overall(algorithm, loader, device): correct, total = 0, 0 true = [] preds = [] algorithm.eval() algorithm.export() for imgs, labels in tqdm(loader): imgs, labels = imgs.to(device), labels.to(device) if FFCV_AVAILABLE: with autocast(): output = algorithm.predict(imgs) else: output = algorithm.predict(imgs) pred = output.argmax(dim=1, keepdim=True) true.append(labels.cpu().numpy()) preds.append(pred.detach().cpu().numpy()) correct += pred.eq(labels.view_as(pred)).sum().item() total += imgs.size(0) algorithm.train() algorithm.unexport() true = np.concatenate(true) preds = np.concatenate(preds) mean = balanced_accuracy_score(true, preds) return 100. * correct / total, 100. * mean @torch.no_grad() def accuracy_mean_overall_loss(algorithm, loader, device, max_batches = None): correct, total = 0, 0 true = [] preds = [] losses = [] algorithm.eval() algorithm.export() for batch_idx, (imgs, labels) in tqdm(enumerate(loader)): if max_batches is not None and batch_idx>max_batches-1: break imgs, labels = imgs.to(device), labels.to(device) if FFCV_AVAILABLE: with autocast(): output = algorithm.predict(imgs) else: output = algorithm.predict(imgs) loss = algorithm.classifier.loss(output, labels, reduction='none') pred = output.argmax(dim=1, keepdim=True) true.append(labels.cpu().numpy()) preds.append(pred.detach().cpu().numpy()) correct += pred.eq(labels.view_as(pred)).sum().item() losses.append(loss.detach().cpu().numpy()) total += imgs.size(0) algorithm.train() algorithm.unexport() true = np.concatenate(true) preds = np.concatenate(preds) loss = np.concatenate(losses) mean = balanced_accuracy_score(true, preds) return 100. * correct / total, 100. * mean, np.mean(loss) def adv_accuracy(algorithm, loader, device, attack): correct, total = 0, 0 algorithm.eval() algorithm.export() for imgs, labels in loader: imgs, labels = imgs.to(device), labels.to(device) adv_imgs, _ = attack(imgs, labels) with torch.no_grad(): if FFCV_AVAILABLE: with autocast(): output = algorithm.predict(adv_imgs) else: output = algorithm.predict(adv_imgs) pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(labels.view_as(pred)).sum().item() total += imgs.size(0) algorithm.train() algorithm.unexport() return 100. * correct / total def adv_accuracy_loss_delta(algorithm, loader, device, attack, max_batches=None): adv_correct, correct, total, total_worst, adv_losses = 0, 0, 0, 0, 0 losses, deltas, accs = [], [], [] algorithm.eval() #algorithm.export() with torch.no_grad(): for batch_idx, (imgs, labels) in tqdm(enumerate(loader)): if max_batches is not None and batch_idx>max_batches-1: break imgs, labels = imgs.to(device), labels.to(device) if FFCV_AVAILABLE: with autocast(): attacked = attack(imgs, labels) if len(attacked) == 2: adv_imgs, delta = attacked elif len(attacked) == 3: adv_imgs, delta, labels = attacked output = algorithm.predict(adv_imgs) else: attacked = attack(imgs, labels) if len(attacked) == 2: adv_imgs, delta = attacked elif len(attacked) == 3: adv_imgs, delta, labels = attacked output = algorithm.predict(adv_imgs) loss = algorithm.classifier.loss(output, labels, reduction='none') pred = output.argmax(dim=1) pred = rearrange(pred, '(B S) -> B S', B=imgs.shape[0]) eq = pred.eq(labels.view_as(pred)) accs.append(eq.view_as(loss).cpu().numpy()) worst , _ = eq.min(dim = 1) adv_correct += worst.sum().item() correct += eq.sum().item() losses.append(loss.cpu().numpy()) loss = rearrange(loss, '(B S) -> B S', B=imgs.shape[0]) worst_loss, _ = loss.max(dim=1) adv_losses += worst_loss.sum().item() deltas.append(delta.cpu().numpy()) total += adv_imgs.size(0) total_worst += imgs.size(0) break algorithm.train() #algorithm.unexport() adv_acc = 100. * adv_correct / total_worst adv_mean = 100. * correct / total adv_loss = adv_losses / total_worst return adv_acc, adv_mean, adv_loss, np.concatenate(accs, axis=0), np.concatenate(losses, axis=0), np.concatenate(deltas, axis=0) def adv_accuracy_loss_delta_balanced(algorithm, loader, device, attack, max_batches = None): adv_correct, correct, total, total_worst, adv_losses = 0, 0, 0, 0, 0 losses, deltas, accs, worst_preds, all_labels, repeated_labels, all_preds = [], [], [], [], [], [], [] algorithm.eval() algorithm.export() with torch.no_grad(): for batch_idx, (imgs, labels) in tqdm(enumerate(loader)): if max_batches is not None and batch_idx>max_batches-1: break imgs, labels = imgs.to(device), labels.to(device) all_labels.append(labels.cpu().numpy()) if FFCV_AVAILABLE: with autocast(): attacked = attack(imgs, labels) if len(attacked) == 2: adv_imgs, delta = attacked elif len(attacked) == 3: adv_imgs, delta, labels = attacked output = algorithm.predict(adv_imgs) else: attacked = attack(imgs, labels) if len(attacked) == 2: adv_imgs, delta = attacked elif len(attacked) == 3: adv_imgs, delta, labels = attacked output = algorithm.predict(adv_imgs) loss = algorithm.classifier.loss(output, labels, reduction='none') pred = output.argmax(dim=1) all_preds.append(pred.cpu().numpy()) repeated_labels.append(labels.cpu().numpy()) pred = rearrange(pred, '(B S) -> B S', B=imgs.shape[0]) eq = pred.eq(labels.view_as(pred)) accs.append(eq.view_as(loss).cpu().numpy()) worst, worst_idx = eq.min(dim = 1) adv_correct += worst.sum().item() correct += eq.sum().item() losses.append(loss.cpu().numpy()) loss = rearrange(loss, '(B S) -> B S', B=imgs.shape[0]) worst_loss, _ = loss.max(dim=1) adv_losses += worst_loss.sum().item() deltas.append(delta.cpu().numpy()) total += adv_imgs.size(0) total_worst += imgs.size(0) worst_preds.append(worst.cpu().numpy()) algorithm.train() algorithm.unexport() adv_acc = 100. * adv_correct / total_worst adv_mean = 100. * correct / total adv_loss = adv_losses / total_worst adv_acc_bal = 100. *balanced_accuracy_score(np.concatenate(all_labels, axis=0), np.concatenate(worst_preds, axis=0)) adv_mean_bal = 100. * balanced_accuracy_score(np.concatenate(repeated_labels, axis=0), np.concatenate(all_preds, axis=0)) return adv_acc, adv_mean, adv_acc_bal, adv_mean_bal, adv_loss, np.concatenate(accs, axis=0), np.concatenate(losses, axis=0), np.concatenate(deltas, axis=0) def adv_accuracy_loss_delta_ensembleacc(algorithm, loader, device, attack): correct, ensemble_correct, total, total_ens = 0, 0, 0, 0 losses, accs, deltas = [], [], [] algorithm.eval() algorithm.export() with torch.no_grad(): for imgs, labels in tqdm(loader): imgs, labels = imgs.to(device), labels.to(device) if FFCV_AVAILABLE: with autocast(): attacked = attack(imgs, labels) else: attacked = attack(imgs, labels) old_labels = labels if len(attacked) == 2: adv_imgs, delta = attacked elif len(attacked) == 3: adv_imgs, delta, labels = attacked if FFCV_AVAILABLE: with autocast(): output = algorithm.predict(adv_imgs) else: output = algorithm.predict(adv_imgs) loss = algorithm.classifier.loss(output, labels, reduction='none') pred = output.argmax(dim=1, keepdim=True) if len(attacked) == 3: # get the models prediction for each transform ensemble_preds = torch.zeros_like(output) ensemble_preds[torch.arange(ensemble_preds.shape[0]), pred.squeeze()] = 1 ensemble_preds = rearrange(ensemble_preds, '(B S) C -> B S C', B=imgs.shape[0], C=output.shape[1]) # Average over transforms (S) ensemble_preds = ensemble_preds.mean(dim=1) # predict using average ensemble_preds = ensemble_preds.argmax(dim=1, keepdim=True) else: ensemble_preds = pred losses.append(loss.cpu().numpy()) deltas.append(delta.cpu().numpy()) corr = pred.eq(labels.view_as(pred)) accs.append(corr.cpu().numpy()) ensemble_correct += ensemble_preds.eq(old_labels.view_as(ensemble_preds)).sum().item() correct += corr.sum().item() total += adv_imgs.size(0) total_ens += imgs.size(0) algorithm.train() algorithm.unexport() acc = 100. * correct / total ensemble_acc = 100. * ensemble_correct / total_ens return acc, np.concatenate(accs, axis=0), np.concatenate(losses, axis=0), np.concatenate(deltas, axis=0), ensemble_acc class Tee: def __init__(self, fname, mode="a"): self.stdout = sys.stdout self.file = open(fname, mode) def write(self, message): self.stdout.write(message) self.file.write(message) self.flush() def flush(self): self.stdout.flush() self.file.flush()
/** * Present user with questions to answer from the console * @param questions Array of questions for the user to answer * @return <pre>{@code ArrayList<String> }</pre> containing the user's answers * @see org.wildfly.bpms.frontend.application.Main#getResponses */ protected static ArrayList<String> askUser(String[] questions) { ArrayList<String> responses = new ArrayList<String>(1); for (int i = 0; i < questions.length; i++) { String response; try { response = FrontendUtil.readLine("Enter %s", questions[i]); if (response == null) { responses.add(""); } else { responses.add(response); } } catch (BpmsFrontendException e) { logger.error(String.format( "Encountered an error while trying to read from the console. " + "The error is: %s%n", e.getMessage())); } } return responses; }
The Chinese government faked financial data for years, Chinese media reports. Foreign observers have long been suspicious of China’s economic data, and those suspicions may very well be justified. The governor of Liaoning province admitted that some provincial economic figures from 2011 to 2014 were fabricated, the People’s Daily reported Wednesday. “Many cities and counties in Liaoning province reported widespread fraudulent economic figures,” Gov. Chen Qiufa revealed at the eighth meeting of the 12th People’s Congress in Liaoning. Some fiscal revenue data was inflated by as much as 20 percent. Shenyang, the capital of Liaoning province, reported a fiscal revenue of $351 million in 2013; however, the actual figure was closer to $160 million. A report from the National Audit Office showed that falsified data was rampant throughout the province. More disconcerting is that the falsification of fiscal data may not be limited to one Chinese province and could be a nationwide concern. “It’s not only happening in Liaoning, but other provinces as well, as local governments are under pressure to show positive GDP figures,” Feng Liguo, a China Enterprise Confederation expert, told the Global Times. Recent revelations cast doubt over China’s 2016 GDP growth report scheduled to be released Friday. The Chinese government asserts that it achieved 6.7 percent growth last year — the accuracy of such reports is questionable. Economists and investors often use foreign proxy indexes to analyze China’s growth. China’s economic growth has been slowing in recent years. The Chinese government refers to the slowdown as the “new normal,” regarding the deceleration as a byproduct of the country’s attempts to restructure domestic economic engines. While these changes are jarring, the government still expects and demands high growth figures, putting pressure on provincial and local officials to perform or, at least, make it look like they are performing. Follow Ryan on Twitter Send tips to ryan@ dailycallernewsfoundation.org. Content created by The Daily Caller News Foundation is available without charge to any eligible news publisher that can provide a large audience. For licensing opportunities of our original content, please contact [email protected].
def alphbetise_names(tree): label_len = ceil(log(len(tree)) / log(26)) labels = [''.join(letters) for letters in itl.product(ascii_uppercase, repeat=label_len)] tiplabels = list(sorted(tree.get_leaf_names(), key=int)) for i, leaf in enumerate(tiplabels): node = tree&leaf node.name = labels[i] return tree
/** * Prints the error to the specified devices. * * @param errorMessage * is the message to be printed. * @param PrintsTo * determines where to send the debug message to */ public void printError (String errorMessage, PrintsTo PrintsTo) { rioTime = ""; matchTime = Hardware.driverStation.getMatchTime(); if (PrintsTo == ErrorMessage.PrintsTo.driverStation || PrintsTo == ErrorMessage.PrintsTo.driverStationAndRoboRIO) { final String dsReport = appendErrorMessage(errorMessage); DriverStation.reportError(dsReport, false); } if (PrintsTo == ErrorMessage.PrintsTo.roboRIO || PrintsTo == ErrorMessage.PrintsTo.driverStationAndRoboRIO) PrintsToRIO(errorMessage); }
Thomas Jefferson: Paleontologist "Nature intended me for the tranquil pursuits of science, by rendering them my supreme delight." (Letter to DuPont de Nemours as cited in Benson, 1971). Thomas Jefferson By Thomas O. Jewett Thomas Jefferson, third President of the United States, was probably our most accomplished man in public life as well as the most versatile. President John F. Kennedy, while entertaining a group of Nobel Laureates, quipped that this was probably the greatest gathering of intellect in the White House since Jefferson dined there alone. During his lifetime, Jefferson was an infallible oracle to half the population and a dangerous demagogue to the other half, but was universally recognized as a man of science. A fine mathematician and astronomer, he could reckon latitude and longitude as well as a ship captain. He calculated the eclipse of 1778 with great accuracy and was able to make suggestions for the improvement of almanacs on the equation of time. Jefferson was considered expert in anatomy, civil engineering, physics, mechanics, meteorology, architecture, and botany. He was able to read and write Greek, Latin, French, Spanish and Italian. He was recognized as a pioneer in ethnology, geography, anthropology and our subject paleontology. Because of his wide range of knowledge, Jefferson was ahead of his time in several lines of inquiry and advanced of contemporary scientists. Even so, Jefferson never failed to acknowledge that in science he was "an amateur." As a scientific man Jefferson was interested in all lines of science, but in all rather as an enthusiastic, highly appreciative, an intelligent amateur rather than a professional. He had no time to make himself thoroughly proficient in any one line. The working out of the details he left to others, whom he assisted and encouraged, to the best of his ability. (Clark, 1943). Jefferson was always ready to accept new discoveries and adopt new theories, even when they might contradict his own beliefs. In the spirit of the Enlightenment, with its faith in human reason and science, he maintained an open receptive frame of mind to all discoveries and scientific speculation. He believed that science held the key to knowledge for society, and this outlook, combined with his reformist, humanitarian, and utilitarian proclivities, motivated much of his life and thought. Jefferson visualized science as essentially utilitarian. His sight focused upon the benefits that science could provide humanity. No matter what line of scientific investigation he undertook, the idea of ultimate practical application seems to always have been in his mind. He seemed never to have followed any line through mere pointless curiosity. Even in his study of fossils Jefferson appeared to have had the idea that at some time a knowledge of them would prove of value to his countrymen. One of the first glimpses of Jefferson's interest in paleontology can be found in his Notes on the State of Virginia. It is his most impressive scientific achievement, in which he recorded his observations of flora, fauna, mountains, rivers, climate, population, laws, politics, customs and fossils of his native state. In Notes Jefferson, also, refuted the contentions of Count de Buffon that the animals common to both old world and new are smaller in the new. One of the reasons Jefferson wrote and published Notes was to refute a claim by the eminent naturalist, the Comte de Buffon, that human and animal life in America was degenerative and therefore inferior to the life forms in Europe. Buffon believed, Jefferson wrote in his Notes,"that nature is less active, less energetic on one side of the globe than she is on the other." Jefferson added with more than a hint of sarcasm, "as if both sides were not warmed by the same genial sun," and launched into a lengthy refutation of Buffon's hypothesis with convincing evidence that animals are actually larger in America than in Europe. The mastodon, or mammoth,was his clincher; Europe had produced no animal to match this behemoth...his shipment of mastodon fossils to Paris, therefore, was not entirely Enlightenment altruism; it was also a final salvo in a scientific war. Buffon's suggestion that infant America was nature's retardate drove him to collect the ancient bones of the mammoth...When he received his fossils, he catalogued them carefully and precisely, as was his habit, sending them off to Philadelphia for admiration, and to Paris for edification. He kept a few choice specimens, however, for his Monticello museum--trophies of a sort in commemoration of his private victory in the battle of New World versus Old. (McLaughlin, 1988) The entry room at Monticello had been turned by Jefferson into a natural history museum which showed his great interest in fossils. George Ticknor, when a young man, visited Jefferson in 1815 and describes the entry hall: On one side hang the head and horns of an elk, a deer, and a buffalo; another is covered with curiosities which Lewis and Clark found in their wild and perilous expedition. On the third, among many other striking matters, was the head of a mammoth, or, as Cuvier calls it, a mastodon, containing only os frontis, Mr. Jefferson tells me, that has yet been found. (Letter by George Ticknor, 1818 as cited in Rosenberger, 1953). These fossils were from the famous cache at Big Bone Lick, Kentucky. Jefferson had commissioned William Clark of the Lewis and Clark Expedition to explore the site, at his own expense. He kept the above mentioned specimens "for a special kind of Cabinet I have at Monticello." Jefferson's was particularly proud do this collection and considered them the prize of his natural history collection. The majority of the bones he sent on to the American Philosophical Society in Philadelphia. (Mclaughlin, 1988). Evidence of Jefferson's interest in paleontology is furnished by his contributions in the form of reports and specimens to the American Philosophical Society, of which he was elected a member in 1786 and became its president in 1797. As a member, Jefferson took the lead in 1792 in raising a thousand guineas to send Andrew Michaud across the continent to find out about...the bones of a mammoth...(Curtis, 1901). On August 19,1796, he wrote a report to the Society (his only truly scientific report) describing bones of extraordinary size found beyond the Blue Mountains in Virginia. (Clark, 1943). In Greenbriar County, Virginia, in 1796, a deposit of bones, supposed to be those of a mammoth, were found and sent to Monticello, where Jefferson set them up and pronounced them to be those of "a carnivorous clawed animal entirely unknown to science." A curious sight might have been witnessed by people who lived the route of travel between Monticello and Philadelphia when the Vice President of the United States, on his way to take the oath of office and assume the second place in the gift of the nation, carried a wagon-load of bones for his baggage. He delivered them to Dr. Wistar, the naturalist of the American Philosophical Society, with a labored report under the date of March 10, 1797, entitled, "A Memoir of the Discovery of Certain Bones of an Unknown Quadruped, of the Clawed Kind, in the Western Part of Virginia. (Curtis, 1901). The Society passed a resolution to publish the report and requested Charles Peale to put the bones in the best order for Society use. These were the bones of Megalonyx, later named "Megalonyx Jefersoni", the first giant sloth found in North America. (Clark, 1943). Jefferson was head of the Society when that organization financed the excavation of the bones of a mastodon in Ulster County, New York in 1801. This was during the exciting and close contest Jefferson was waging for the Presidency. Even so, we find him carrying on a learned correspondence with Dr. Wistar over the fossils. This interest in paleontology often brought him the ridicule and wrath of his political opponents to whom scientific investigation meant neglect of his proper duties. This was particularly true in 1808 when the excitement over the embargo of commerce and the complications with Great Britain were at it height, he had a wagon load of specimens sent to the White House. Here he laid them out in the unfinished East Room, nicknamed the "Bone or Mastodon Room." (Clark, 1943). "Mr. Mammoth" as Jefferson was nicknamed was also roasted in poem for his delight in fossils. Go, wretch resign thy presidential chair, Disclose thy secret measures, foul and fair, Go search with curious eye, for horned frogs, Mid the Wild Louisianian bogs: Or, where the Ohio rolls his turbid stream, Dig for huge bones, thy glory and scheme. (As cited in Clark, 1943). Paleontology seems to have been Jefferson's main interest in a pure science. Some such as Frederick Lucas and Henry Osborn have dubbed him the "Father of Paleontology". They felt that Jefferson laid the foundations of the science with his refutation of Buffon's degeneracy theory, his invention of "stratigraphical" observation which established the fundamental principle of scientific excavation (Lehmann, 1985), and his work on the Megalonyx. There are some though, who feel that Jefferson does not deserve the title. They argue that the entire basis of his beliefs about paleontology were mistaken since he denied that any animal species could ever become extinct. "Such is the economy of nature, that in no instance can be produced her having permitted any race of her animals to become extinct." (As cited in Curtis, 1901). It is this reasoning which allowed Jefferson to put forth the theory that there was a large herd of mammoths wandering wild in the Mississippi Valley and one of the reasons he sponsored expeditions to the West. Perhaps Jefferson's greatest contribution to paleontology is that while President he helped to make it a respectable pursuit and was largely responsible through the American Philosophical Society for bringing together the materials necessary for its advancement. As the first citizen of the young nation, Jefferson's passion brought prestige and respectability to the young science. Bibliography Benson, G. Randolph. (1971). Thomas Jefferson as Social Scientist. Fairleigh Dickinson University Press, Cranbury N.J. Clark, Austin H. (1943). "Thomas Jefferson as Scientist," Journal of the Washington Academy of Sciences. 33, no. 7. Curtis, William Elroy. (1901). The True Thomas Jefferson. A.W. Elson & Co., Philadelphia. Lehmann, Karl. (1985). Thomas Jefferson American Humanist. University Press of Virginia, Charlottesville. McLaughlin, Jack. (1988). Jefferson and Monticello. Henry Holt And Company, New York. Rosenberger, Francis Coleman. (1953). Jefferson Reader. E.P. Dutton & Company, Inc., New York.
/** * Add, remove or re-order any {@link PropertySource}s in this application's * environment. * * @param environment this application's environment * @param args arguments passed to the {@code run} method * @see #configureEnvironment(ConfigurableEnvironment, String[]) */ protected void configurePropertySources(ConfigurableEnvironment environment, String[] args) { MutablePropertySources sources = environment.getPropertySources(); if (this.defaultProperties != null && !this.defaultProperties.isEmpty()) { sources.addLast(new MapPropertySource("defaultProperties", this.defaultProperties)); } if (this.addCommandLineProperties && args.length > 0) { String name = CommandLinePropertySource.COMMAND_LINE_PROPERTY_SOURCE_NAME; if (sources.contains(name)) { PropertySource<?> source = sources.get(name); CompositePropertySource composite = new CompositePropertySource(name); composite.addPropertySource(new SimpleCommandLinePropertySource( "springApplicationCommandLineArgs", args)); composite.addPropertySource(source); sources.replace(name, composite); } else { sources.addFirst(new SimpleCommandLinePropertySource(args)); } } }
import { Connection } from "./connection"; import { ErrorResponse, GenericSuccessResponse, GetUsersResponse, GetUserTodosResponse, Todo, TodoTask, UserAuthResponse, UserCreateResponse, } from "./types"; type Handler<Data> = (data: Data) => void; export type Wrapper = ReturnType<typeof wrap>; export const wrap = (connection: Connection) => ({ connection, query: { user: { auth: (username: string, password?: string, token?: string): Promise<UserAuthResponse | ErrorResponse> => new Promise((resolve, reject) => { connection.fetch('user:auth', { username, password, token }).then((f) => { resolve((f as UserAuthResponse | ErrorResponse)) }) }), getTodos: (username: string): Promise<GetUserTodosResponse | ErrorResponse> => new Promise((resolve, reject) => { connection.fetch('user:get_todos', { username }).then((f) => { resolve((f as GetUserTodosResponse | ErrorResponse)) }) }), }, todo: { subscribe: (): Promise<GenericSuccessResponse | ErrorResponse> => new Promise((resolve, reject) => { connection.fetch('todo:subscribe', {}).then((f) => { resolve((f as GenericSuccessResponse | ErrorResponse)) }) }), }, admin: { getUsers: (): Promise<GetUsersResponse | ErrorResponse> => new Promise((resolve, reject) => { connection.fetch('admin:get_users', {}).then((f) => { resolve((f as GetUsersResponse | ErrorResponse)) }) }), } }, mutation: { user: { create: (username: string, password: string): Promise<UserCreateResponse | ErrorResponse> => new Promise((resolve, reject) => { connection.fetch('user:create', { username, password }).then((f) => { resolve((f as UserCreateResponse | ErrorResponse)) }) }), }, todo: { create: (username: string, name: string, tasks: TodoTask[]): Promise<GenericSuccessResponse | ErrorResponse> => new Promise((resolve, reject) => { connection.fetch('todo:create', { username, name, tasks }).then((f) => { resolve((f as GenericSuccessResponse | ErrorResponse)) }) }), update: (username: string, id: string, name: string, tasks: TodoTask[]): Promise<GenericSuccessResponse | ErrorResponse> => new Promise((resolve, reject) => { connection.fetch('todo:update', { username, id, name, tasks }).then((f) => { resolve((f as GenericSuccessResponse | ErrorResponse)) }) }), delete: (username: string, id: string): Promise<GenericSuccessResponse | ErrorResponse> => new Promise((resolve, reject) => { connection.fetch('todo:delete', { username, id }).then((f) => { resolve((f as GenericSuccessResponse | ErrorResponse)) }) }), updateProgress: (id: string, tasks: Array<{ taskId: string, completed: boolean }>): Promise<GenericSuccessResponse | ErrorResponse> => new Promise((resolve, reject) => { connection.fetch('todo:update_progress', { id, tasks }).then((f) => { resolve((f as GenericSuccessResponse | ErrorResponse)) }) }), } }, subscribe: { todo: { update: (handler: Handler<{ op: string, data: Todo }>) => connection.addListener("todo:updated", handler), added: (handler: Handler<{ op: string, data: Todo }>) => connection.addListener("todo:added", handler), deleted: (handler: Handler<{ op: string, data: Todo }>) => connection.addListener("todo:deleted", handler), } } })
package com.example.sistemidigitali.enums; import android.graphics.Color; public enum WearingModeEnum { MRNW("Mask not Worn", ColorsEnum.RED.getColor(), Color.WHITE), MRCW("Mask Correctly Worn", ColorsEnum.GREEN.getColor(), Color.WHITE), MSFC("Mask Folded above the Chin", ColorsEnum.YELLOW.getColor(), Color.BLACK), MRHN("Mask Hanging from an Ear", ColorsEnum.YELLOW.getColor(), Color.BLACK), MRFH("Mask on the Forehead", ColorsEnum.YELLOW.getColor(), Color.BLACK), MRTN("Mask on the Tip of the Nose", ColorsEnum.YELLOW.getColor(), Color.BLACK), MRNC("Mask Under the Chin", ColorsEnum.YELLOW.getColor(), Color.BLACK), MRNN("Mask Under the Nose", ColorsEnum.YELLOW.getColor(), Color.BLACK), TEST("Test", ColorsEnum.TEST.getColor(), Color.WHITE); private String name; private int backgroundColor; private int textColor; WearingModeEnum(String name, int backgroundColor, int textColor) { this.name = name; this.backgroundColor = backgroundColor; this.textColor = textColor; } public String getFullName() { return this.name; } public int getBackgroundColor() { return this.backgroundColor; } public int getTextColor() { return textColor; } }
/** * This creates an adapter for a {@link de.dc.javafx.xcore.workbench.lecture.OrderedListContent}. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public Adapter createOrderedListContentAdapter() { if (orderedListContentItemProvider == null) { orderedListContentItemProvider = new OrderedListContentItemProvider(this); } return orderedListContentItemProvider; }
def _TimeoutTasks(self, tasks_for_timeout): if not tasks_for_timeout: return inactive_time = int(time.time() * 1000000) - self._TASK_INACTIVE_TIME for task_identifier, task in iter(tasks_for_timeout.items()): last_active_time = task.last_processing_time if not last_active_time: last_active_time = task.start_time if last_active_time < inactive_time: logging.debug('Task {0:s} is abandoned'.format(task_identifier)) self._tasks_abandoned[task_identifier] = task del tasks_for_timeout[task_identifier]
package catalog import ( "context" "encoding/xml" "fmt" "math" "runtime" "sort" "time" "github.com/airbusgeo/geocube-ingester/catalog/entities" "github.com/airbusgeo/geocube-ingester/common" "github.com/airbusgeo/geocube-ingester/interface/catalog" "github.com/airbusgeo/geocube-ingester/interface/catalog/creodias" "github.com/airbusgeo/geocube-ingester/interface/catalog/gcs" "github.com/airbusgeo/geocube-ingester/service" "github.com/airbusgeo/geocube-ingester/service/log" "github.com/paulsmith/gogeos/geos" "golang.org/x/sync/errgroup" ) func sceneBurstsInventory(ctx context.Context, scene *entities.Scene, pareaAOI *geos.PGeometry, annotationsProviders []catalog.AnnotationsProvider) error { log.Logger(ctx).Debug("Load annotations of " + scene.SourceID) bursts, err := burstsFromAnnotations(ctx, scene, annotationsProviders) if err != nil { return err } // Check that burst AOI intersects area AOI for _, burst := range bursts { burstAOI, err := geos.FromWKT(burst.GeometryWKT) if err != nil { return fmt.Errorf("burstsInventory.FromWKT: %w", err) } intersect, err := pareaAOI.Intersects(burstAOI) if err != nil { return fmt.Errorf("burstsInventory.Intersects: %w", err) } if intersect { // Add burst to scene scene.Tiles = append(scene.Tiles, burst) scene.Data.TileMappings[burst.SourceID] = common.TileMapping{SwathID: burst.Data.SwathID, TileNr: burst.Data.TileNr} } } log.Logger(ctx).Sugar().Debugf("Found %d bursts intersecting aoi in "+scene.SourceID, len(scene.Tiles)) return nil } func sceneBurstsInventoryWorker(ctx context.Context, jobs <-chan *entities.Scene, pareaAOI *geos.PGeometry, annotationsProviders []catalog.AnnotationsProvider) error { for scene := range jobs { select { case <-ctx.Done(): default: if err := sceneBurstsInventory(ctx, scene, pareaAOI, annotationsProviders); err != nil { log.Logger(ctx).Sugar().Errorf("%v", err) //return err } } } return nil } // BurstsInventory creates an inventory of all the bursts of the given scenes // Returns the number of bursts func (c *Catalog) BurstsInventory(ctx context.Context, area entities.AreaToIngest, aoi geos.Geometry, scenes []*entities.Scene) ([]*entities.Scene, int, error) { // Prepare geometry for intersection areaAOI, err := aoi.Buffer(0.05) if err != nil { return nil, 0, fmt.Errorf("burstsInventory.Buffer: %w", err) } pareaAOI := areaAOI.Prepare() // Create group wg, ctx := errgroup.WithContext(ctx) jobChan := make(chan *entities.Scene, len(scenes)) var annotationsProviders []catalog.AnnotationsProvider if c.GCSAnnotationsBucket != "" { annotationsProviders = append(annotationsProviders, gcs.AnnotationsProvider{Bucket: c.GCSAnnotationsBucket}) } if area.GCSAnnotationsBucket != "" { annotationsProviders = append(annotationsProviders, gcs.AnnotationsProvider{Bucket: area.GCSAnnotationsBucket}) } annotationsProviders = append(annotationsProviders, creodias.AnnotationsProvider{}) // Start 10 workers for i := 0; i < 10 && i < len(scenes); i++ { wg.Go(func() error { return sceneBurstsInventoryWorker(ctx, jobChan, pareaAOI, annotationsProviders) }) } // Push jobs for _, scene := range scenes { jobChan <- scene } close(jobChan) // Wait if err := wg.Wait(); err != nil { return nil, 0, fmt.Errorf("burstsInventory.%w", err) } runtime.KeepAlive(areaAOI) // Filter empty scenes and get number of bursts n, j := 0, 0 for i := range scenes { if t := len(scenes[i].Tiles); t != 0 { scenes[j] = scenes[i] n += t j++ } else { log.Logger(ctx).Sugar().Infof("Remove empty scene: %s", scenes[i].SourceID) } } return scenes[0:j], n, nil } // BurstsSort defines for each burst the previous and reference bursts // Returns the number of tracks and swaths func (c *Catalog) BurstsSort(ctx context.Context, scenes []*entities.Scene) int { // Sort bursts by track and swath burstsPerTrackSwath := map[string][]*entities.Tile{} for _, scene := range scenes { for _, burst := range scene.Tiles { track := burst.SourceID[0:4] trackSwath := track + burst.Data.SwathID burstsPerTrackSwath[trackSwath] = append(burstsPerTrackSwath[trackSwath], burst) } } // Find previous and reference for each bursts for _, bursts := range burstsPerTrackSwath { // Sort by AnxTime sort.Slice(bursts, func(i, j int) bool { return bursts[i].AnxTime < bursts[j].AnxTime }) // Create pools of burst with similar AnxTime, sort by date and find ref and prev burst for i, il := 0, 0; i < len(bursts); il = i { for ; i < len(bursts) && bursts[i].AnxTime < bursts[il].AnxTime+5; i++ { } sbursts := bursts[il:i] sort.Slice(sbursts, func(j, k int) bool { return sbursts[j].Date.Before(sbursts[k].Date) }) for j := 1; j < len(sbursts); j++ { b := sbursts[j] if !b.Ingested { b.Reference = &sbursts[0].TileLite b.Previous = &sbursts[j-1].TileLite // If the current date is more than 6 days after the previous date, log a warning if b.Date.After(b.Previous.Date.Add(time.Hour * 24 * 7)) { log.Logger(ctx).Sugar().Warnf("%s:%s No burst was found 6 days before. Found %s (%v before)", b.SceneID, b.SourceID, b.Previous.SceneID, b.Date.Sub(b.Previous.Date)) } } } } } return len(burstsPerTrackSwath) } // burstsFromAnnotations loads bursts features (anxtime, swath and geometry) from annotation files func burstsFromAnnotations(ctx context.Context, scene *entities.Scene, annotationsProviders []catalog.AnnotationsProvider) ([]*entities.Tile, error) { var err, e error var annotationsFiles map[string][]byte for _, annotationsProvider := range annotationsProviders { annotationsFiles, e = annotationsProvider.AnnotationsFiles(ctx, scene) if err = service.MergeErrors(false, err, e); err == nil { break } } if err != nil { return nil, fmt.Errorf("burstsFromAnnotations.%w", err) } var burstsInventory []*entities.Tile anxTimes := map[int]struct{}{} for url, file := range annotationsFiles { bursts, err := burstsFromAnnotation(file, url) if err != nil { return nil, fmt.Errorf("burstsFromAnnotations.%w", err) } for anxTime, burst := range bursts { if _, ok := anxTimes[anxTime]; !ok { anxTimes[anxTime] = struct{}{} // Add info from scene burst.Date = scene.Scene.Data.Date burst.SceneID = scene.SourceID burst.SourceID = fmt.Sprintf("%s%s_%s_%d", scene.Tags[common.TagOrbitDirection][0:1], scene.Tags[common.TagRelativeOrbit], burst.Data.SwathID, burst.AnxTime) burstsInventory = append(burstsInventory, burst) } } } return burstsInventory, nil } func burstsFromAnnotation(annotationFile []byte, annotationURL string) (map[int]*entities.Tile, error) { // XML GridPoint structure type GridPoint struct { Pixel int `xml:"pixel"` Line int `xml:"line"` Latitude float64 `xml:"latitude"` Longitude float64 `xml:"longitude"` } // Read annotations file annotation := struct { XMLName xml.Name `xml:"product"` Swath string `xml:"adsHeader>swath"` LinesPerBurst int `xml:"swathTiming>linesPerBurst"` SamplesPerBurst int `xml:"swathTiming>samplesPerBurst"` AzimuthAnxTime []float64 `xml:"swathTiming>burstList>burst>azimuthAnxTime"` GridPoint []GridPoint `xml:"geolocationGrid>geolocationGridPointList>geolocationGridPoint"` }{} if err := xml.Unmarshal(annotationFile, &annotation); err != nil { return nil, fmt.Errorf("readAnnotation.Unmarshal[%s] : %w", annotationFile, err) } // Position of firsts and last points first := map[int]GridPoint{} last := map[int]GridPoint{} for _, point := range annotation.GridPoint { if point.Pixel == 0 { first[point.Line] = point } else if point.Pixel == annotation.SamplesPerBurst-1 { last[point.Line] = point } } // Burst bursts := map[int]*entities.Tile{} for i, anxTime := range annotation.AzimuthAnxTime { // First/Last lines of the burst firstline := i * annotation.LinesPerBurst lastline := (i + 1) * annotation.LinesPerBurst if _, ok := first[firstline]; !ok { firstline-- // -1 because first and lastline sometimes shifts by 1 for some reason? if _, ok := first[firstline]; !ok { return nil, fmt.Errorf("readAnnotation: First line not found in annotation file %s", annotationURL) } } if _, ok := last[lastline]; !ok { lastline-- // -1 because first and lastline sometimes shifts by 1 for some reason? if _, ok := last[lastline]; !ok { return nil, fmt.Errorf("readAnnotation: Last line not found in annotation file %s", annotationURL) } } // Set bursts intAnxTime := int(math.Round(math.Mod(anxTime, float64(12*24*60*60/175)) * 10)) bursts[intAnxTime] = &entities.Tile{ Data: common.TileAttrs{ SwathID: annotation.Swath, TileNr: i + 1, }, AnxTime: intAnxTime, GeometryWKT: fmt.Sprintf("POLYGON((%f %f, %f %f, %f %f, %f %f, %f %f))", first[firstline].Longitude, first[firstline].Latitude, first[lastline].Longitude, first[lastline].Latitude, last[lastline].Longitude, last[lastline].Latitude, last[firstline].Longitude, last[firstline].Latitude, first[firstline].Longitude, first[firstline].Latitude, ), } } return bursts, nil }
/** * Context object attached to a request to retain information across the multiple filter calls * that happen with async requests. */ static class TimingContext { private final Timer.Sample timerSample; TimingContext(Timer.Sample timerSample) { this.timerSample = timerSample; } public Timer.Sample getTimerSample() { return this.timerSample; } public void attachTo(Message request) { request.setContent(TimingContext.class, this); } public static TimingContext get(Message request) { return request.getContent(TimingContext.class); } }
<gh_stars>0 import random import copy # Consider using the modules imported above. class Hat: def __init__(self, **kwargs): self.contents = self.create_contents(kwargs) def create_contents(self, obj): contents = [] for k, v in obj.items(): n = v while n > 0: contents.append(k) n -= 1 return contents def draw(self, n): if n >= len(self.contents): return self.contents removed = [] for x in range(n): random_ball = random.choice(self.contents) self.contents.remove(random_ball) removed.append(random_ball) return removed def experiment(hat, expected_balls, num_balls_drawn, num_experiments): successfull_experiments = 0 for x in range(num_experiments): is_successfull = True testing_hat = copy.deepcopy(hat) balls_drawn_list = testing_hat.draw(num_balls_drawn) balls_drawn_dict = dict() for ball in balls_drawn_list: balls_drawn_dict[ball] = balls_drawn_dict.get(ball, 0) + 1 for color in expected_balls.keys(): if color not in balls_drawn_dict: is_successfull = False elif balls_drawn_dict[color] < expected_balls[color]: is_successfull = False if is_successfull is True: successfull_experiments += 1 return successfull_experiments / num_experiments
def check_for_tools(): available = [] missing = [] tools = [ utils.PLIST_BUDDY, ] def try_call(binary): try: utils.call(binary) available.append(binary) except OSError: missing.append(binary) for tool in tools: try_call(tool) return available, missing
def perp2coast(self,method='smooth',x=10): index_perp=self.perploc() if method =='local' and method =='ext': if method=='local': x=1 slopes=np.array([slope(self.coastline[ii-x,0],self.coastline[ii+x,0], self.coastline[ii-x,1],self.coastline[ii+x,1]) for ii in index_perp]) elif method == 'smooth': slopes=np.array([np.mean([slope(self.coastline[ii-xx,0],self.coastline[ii+xx,0], self.coastline[ii-xx,1],self.coastline[ii+xx,1]) for xx in range(1,x)]) for ii in index_perp]) else: slopes=np.array([np.mean([slope(self.coastline[ii-x,0],self.coastline[ii-x+xx,0], self.coastline[ii-x,1],self.coastline[ii-x+xx,1]) for xx in range(1,(2*x-1))]) for ii in index_perp]) angles=slope2angle(slopes) perp_angle=angles+(np.pi/2) self.x_norm = np.squeeze(np.cos(angles)) self.y_norm = np.squeeze(np.sin(angles)) self.x_perp = np.squeeze(np.cos(perp_angle)) self.y_perp = np.squeeze(np.sin(perp_angle)) return {'Nvector':{'x':self.x_norm,'y':self.x_norm,'angle':angles,'slope':slopes}, 'Pvector':{'x':self.x_perp,'y':self.y_perp,'angles':perp_angle,'slope':-1/slopes}}
<filename>003/main.cpp // 3. LCM of two or more positive integers // Usage: 1. Enter the number of inputs. // Usage: 2. Enter as many times as above. #include <iostream> #include "ponz_utility.h" int main(int argc, char const *argv[]) { Util::ui n, res, element; std::cin >> n >> res; for (size_t i = 1; i < n; i++) { std::cin >> element; res = Util::lcm(res, element); } std::cout << res << "\n"; return 0; }
/** * Validate serializing a {@link Model} to a String and then deserializing it returns the same value. */ @Test public void validateGsonFactorySerde() { BlogOwner expected = BlogOwner.builder() .name("Richard") .createdAt(new Temporal.DateTime(new Date(), 0)) .build(); String str = GsonFactory.instance().toJson(expected); BlogOwner actual = GsonFactory.instance().fromJson(str, BlogOwner.class); assertEquals(expected, actual); }
/* Adjust markers for an insertion that stretches from FROM / FROM_BYTE to TO / TO_BYTE. We have to relocate the charpos of every marker that points after the insertion (but not their bytepos). When a marker points at the insertion point, we advance it if either its insertion-type is t or BEFORE_MARKERS is true. */ static void adjust_markers_for_insert (ptrdiff_t from, ptrdiff_t from_byte, ptrdiff_t to, ptrdiff_t to_byte, bool before_markers) { struct Lisp_Marker *m; bool adjusted = 0; ptrdiff_t nchars = to - from; ptrdiff_t nbytes = to_byte - from_byte; for (m = BUF_MARKERS (current_buffer); m; m = m->next) { eassert (m->bytepos >= m->charpos && m->bytepos - m->charpos <= Z_BYTE - Z); if (m->bytepos == from_byte) { if (m->insertion_type || before_markers) { m->bytepos = to_byte; m->charpos = to; if (m->insertion_type) adjusted = 1; } } else if (m->bytepos > from_byte) { m->bytepos += nbytes; m->charpos += nchars; } } if (adjusted) { fix_start_end_in_overlays (from, to); fix_overlays_before (current_buffer, from, to); } }
/******************************************************************************/ /** * This function checks if the RX device's DisplayPort Configuration Data (DPCD) * indicates that the clock recovery sequence during link training was * successful - the RX device's link clock and data recovery unit has realized * and maintained the frequency lock for all lanes currently in use. * * @param InstancePtr is a pointer to the XDp instance. * @param LaneCount is the number of lanes to check. * * @return * - XST_SUCCESS if the RX device's clock recovery PLL has * achieved frequency lock for all lanes in use. * - XST_FAILURE otherwise. * * @note None. * *******************************************************************************/ static u32 XDp_TxCheckClockRecovery(XDp *InstancePtr, u8 LaneCount) { u8 *LaneStatus = InstancePtr->TxInstance.RxConfig.LaneStatusAdjReqs; switch (LaneCount) { case XDP_TX_LANE_COUNT_SET_4: if (!(LaneStatus[1] & XDP_DPCD_STATUS_LANE_3_CR_DONE_MASK)) { return XST_FAILURE; } if (!(LaneStatus[1] & XDP_DPCD_STATUS_LANE_2_CR_DONE_MASK)) { return XST_FAILURE; } case XDP_TX_LANE_COUNT_SET_2: if (!(LaneStatus[0] & XDP_DPCD_STATUS_LANE_1_CR_DONE_MASK)) { return XST_FAILURE; } case XDP_TX_LANE_COUNT_SET_1: if (!(LaneStatus[0] & XDP_DPCD_STATUS_LANE_0_CR_DONE_MASK)) { return XST_FAILURE; } default: break; } return XST_SUCCESS; }
import { MatierePremiere } from './matierePrem'; export interface IFamille{ id: number; name?: string; details?:string; description?:string; } export class Famille{ id?: number; name?: string; description?:string; imageUrl? : string; prodSaison?: number; prodSaisonAttein?:number; nbrProd?:number; }
The LMX2594 is a high-performance, wideband synthesizer that can generate any frequency from 10 MHz to 15 GHz without using an internal doubler, thus eliminating the need for sub-harmonic filters. The high performance PLL with figure of merit of –236 dBc/Hz and high-phase detector frequency can attain very low in-band noise and integrated jitter. The high speed N-divider has no pre-divider, thus significantly reducing the amplitude and number of spurs. There is also a programmable input multiplier to mitigate integer boundary spurs. The LMX2594 allows users to synchronize the output of multiple devices and also enables applications that need deterministic delay between input and output. A frequency ramp generator can synthesize up to 2 segments of ramp in an automatic ramp generation option or a manual option for maximum flexibility. The fast calibration algorithm allows changing frequencies faster than 20 µs. The LMX2594 adds support for generating or repeating SYSREF (compliant to JESD204B standard) making it an ideal low-noise clock source for high-speed data converters. Fine delay adjustment (9-ps resolution) is provided in this configuration to account for delay differences of board traces. The output drivers within LMX2594 deliver output power as high as 7 dBm at 15-GHz carrier frequency. The device runs from a single 3.3-V supply and has integrated LDOs that eliminate the need for on-board low noise LDOs.
Experimental analysis of shear connection effect composite beams with concrete The subject of this research is the analysis of shear connection effect on the fiber glass reinforced composite beams. The research contains the detailed analysis of materials like concrete and fiber glass reinforced composite. The shear connection has been tested by the experiment of push out test run in the Centre of research and innovation in construction of Technical University of Košice. The shear connection has been tested on two types of connectors used on composite beams. The main idea of this research is find out whether it is possible to use the alternative material, which should substitute the steel. The aim of research is to design a type of connector for fiber reinforced composite materials or another composite materials developping in the world of material engineering. The part od research is devoted to the FEM analysis.
FIVB Beach Volleyball Open in Lucerne, Switzerland for the first time One of six new venues the FIVB Beach Volleyball World Tour 2015 calendar, Lucerne, Switzerland will host the double-gender FIVB Lucerne Open. The Lucerne event, to be held May 12 to 17, will mark the 15th straight year a men’s competition will be held in Switzerland and 16th consecutive year that a women’s tournament will take place. Lausanne, Switzerland, is the headquarters of the International Volleyball Federation (FIVB) and this will be just the first time that Switzerland will be the site of more than one FIVB World Tour event in a single year. A total of 29 previous FIVB World Tour events have been staged in the picturesque Swiss village of Gstaad, including 14 straight men’s events and 15 consecutive women’s events. This year’s FIVB Gstaad Major Series will be third of three events that are part of the new FIVB Swatch Beach Volleyball Major Series. The FIVB World Tour will visit Gstaad from July 7 to 12. Through the 2014 event, Switzerland has hosted a total of 29 FIVB World Tour events (14 men, 15 women) with all of them being held in Gstaad with Brazil leading the medal count in both genders. In the men’s totals after 14 events, Brazil has 19 medals, followed by USA with eight, Germany with six, Switzerland three, and with one men’s medal each are Argentina, Australia, Italy, the Netherlands, Russia and Spain. After 15 FIVB World Tour women’s events held in Switzerland through 2014, Brazil leads the medal parade with 18 total medals followed by the USA with 11, China with eight, German with three and with one each women’s medals in Gstaad are Australia, Czech Republic, Italy, the Netherlands and Switzerland. The FIVB Beach Volleyball World Tour 2015 calendar will include five FIVB Grand Slams, the new FIVB Swatch Major Series of three events, six FIVB Opens and a special FIVB Swatch Beach Volleyball Season Final in the United States. The showcase event will be the $1 million FIVB Beach Volleyball World Championships The Netherlands 2015. Lucerne is a city in central in Switzerland, in the German-speaking portion of the country. Lucerne is the capital of the Canton of Lucerne and the capital of the district of the same name. With a population of about 80,501 people, Lucerne is the most populous city in Central Switzerland, and a nexus of transportation, telecommunications and government of this region. The city's urban area consists of 17 cities and towns located in three different cantons with an overall population of about 250,000 people. Because of its location on the shore of Lake Lucerne (der Vierwaldstättersee), within sight of Mount Pilatus and Rigi in the Swiss Alps, Lucerne has long been a destination for tourists. One of the city's famous landmarks is the Chapel Bridge (Kapellbrücke), a wooden bridge first erected in the 14th century. A total of 14 countries will host events this year as the FIVB World Tour 2015 also begins the Olympic qualification process with all FIVB World Tour events (except the World Tour Finals) in 2015 up until June 13, 2016 counting towards the Olympic Ranking in order to determine the 15 vacancies for each gender that will take part in the 2016 Rio Olympic Games. The 2015 FIVB World Ranking system will again include FIVB Beach Volleyball Grand Slam and Open events along with additional pre-approved events at the inter-continental, continental and national levels. Implemented in 2013, the format of all the FIVB Beach Volleyball international tournaments – whether FIVB Beach Volleyball World Championships, FIVB Beach Volleyball Grand Slam or FIVB Beach Volleyball Open – are the same, featuring pool play followed by single elimination knockout rounds. The 2014-2016 FIVB Beach Volleyball World Continental Cup also returns this year as it starts its portion of the qualification process for the 2016 Rio Olympic Games in Brazil.
import tensorflow as tf from tensorflow.keras.layers import * from tensorflow.keras.models import Model print(tf.__version__) def grad_loss(v_gt, v): # Gradient loss loss = tf.reduce_mean(tf.abs(v - v_gt), axis=[1,2,3]) jy = v[:,:,1:,:,:] - v[:,:,:-1,:,:] jx = v[:,:,:,1:,:] - v[:,:,:,:-1,:] jy_ = v_gt[:,:,1:,:,:] - v_gt[:,:,:-1,:,:] jx_ = v_gt[:,:,:,1:,:] - v_gt[:,:,:,:-1,:] loss += tf.reduce_mean(tf.abs(jy - jy_), axis=[1,2,3]) loss += tf.reduce_mean(tf.abs(jx - jx_), axis=[1,2,3]) return loss def uNet(input, time, lat, lon, height, kernel = [5, 3, 3], nodes = [72, 144, 288, 576]): ''' This function defines a U-Net architecture :param input: the main-input layer :param time: the time input layer :param lat, lon, height: additional fields :param kernel: Kernel-sizes (default = [5, 3, 3]) :param nodes: different neuron-sizes if needed (default = [72, 144, 288, 576]) :return: last layer of constructed model ''' # set Timesteps TS = 3 ##################################################### 1st Block #################################################### conv1 = Conv3D(filters = nodes[0], kernel_size = (TS, kernel[0], kernel[0]), activation = 'relu', padding = 'same', data_format = 'channels_last')(input) mergetime = Concatenate(axis=4)([conv1, lat, lon, height]) conv1 = Conv3D(filters = nodes[0], kernel_size = kernel[0], activation = 'relu', padding = 'same', data_format = 'channels_last')(mergetime) pool1 = MaxPooling3D(pool_size = (1, 2, 2))(conv1) ##################################################### 2nd Block #################################################### conv2 = Conv3D(filters = nodes[1], kernel_size = (TS, kernel[1], kernel[1]), activation = 'relu', padding = 'same', data_format = 'channels_last')(pool1) conv2 = Conv3D(filters = nodes[1], kernel_size = (TS, kernel[1], kernel[1]), activation = 'relu', padding = 'same', data_format = 'channels_last')(conv2) pool2 = MaxPooling3D(pool_size = (1, 2, 2))(conv2) ##################################################### 3rd Block #################################################### conv3 = Conv3D(filters = nodes[2], kernel_size = (TS, kernel[2], kernel[2]), activation = 'relu', padding = 'same', data_format = 'channels_last')(pool2) conv3 = Conv3D(filters = nodes[2], kernel_size = (TS, kernel[2], kernel[2]), activation='relu', padding = 'same', data_format = 'channels_last')(conv3) pool3 = MaxPooling3D(pool_size = (1, 2, 2))(conv3) ##################################################### 4th Block #################################################### conv4 = Conv3D(filters = nodes[3], kernel_size = (TS, kernel[2], kernel[2]), activation='relu', padding = 'same', data_format = 'channels_last')(pool3) conv4 = Conv3D(filters = nodes[3], kernel_size = (TS, kernel[2], kernel[2]), activation='relu', padding = 'same', data_format = 'channels_last')(conv4) ####################################################### TIME ####################################################### # Merge time-layer at this point mergetime = Concatenate(axis=4)([conv4, time]) ################################################### UP 3rd Block ################################################### # Up-Size again up3 = UpSampling3D(size = (1, 2, 2))(mergetime) up3 = Conv3D(filters = nodes[2], kernel_size = (TS, kernel[1], kernel[1]), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up3) # Skip connection merge3 = Concatenate(axis=4)([conv3, up3]) conv3 = Conv3D(filters = nodes[2], kernel_size = (TS, kernel[1], kernel[1]), activation = 'relu', padding = 'same', data_format = 'channels_last')(merge3) conv3 = Conv3D(filters = nodes[2], kernel_size = (TS, kernel[1], kernel[1]), activation = 'relu', padding = 'same', data_format = 'channels_last')(conv3) ################################################### UP 2nd Block ################################################### up2 = UpSampling3D(size = (1, 2, 2))(conv3) up2 = Conv3D(filters = nodes[1], kernel_size = (TS, kernel[1], kernel[1]), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up2) # Skip connection merge2 = Concatenate(axis=4)([conv2, up2]) conv2 = Conv3D(filters = nodes[1], kernel_size = (TS, kernel[1], kernel[1]), activation = 'relu', padding = 'same', data_format = 'channels_last')(merge2) conv2 = Conv3D(filters = nodes[1], kernel_size = (TS, kernel[1], kernel[1]), activation = 'relu', padding = 'same', data_format = 'channels_last')(conv2) ################################################### UP 1st Block ################################################### up1 = UpSampling3D(size = (1, 2, 2))(conv2) up1 = Conv3D(filters = nodes[0], kernel_size = (TS, kernel[0], kernel[0]), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up1) merge1 = Concatenate(axis=4)([conv1, up1]) conv1 = Conv3D(filters = nodes[0], kernel_size = (TS, kernel[0], kernel[0]), activation = 'relu', padding = 'same', data_format = 'channels_last')(merge1) conv1 = Conv3D(filters = nodes[0], kernel_size = (TS, kernel[0], kernel[0]), activation = 'relu', padding = 'same', data_format = 'channels_last')(conv1) # last layer is the output output = conv1 return output def get_model(PS=32, loss = grad_loss, optimizer = 'adam', nodes = [72, 144, 288, 576], residual = False): ''' This function creates the DCN-architecture (residual = False) or RPN-architecture (residual = True). :param PS: Patch size :param loss: loss function (default = grad_loss) :param optimizer: optimizer (default = 'adam') :param nodes: different neuron-sizes if needed (default = [72, 144, 288, 576]) :param residual: boolean toggeling between RPN (True) and DCN (False) :return: Model ''' # Input layers main_input = Input(shape = (3, PS, PS, 1)) time = Input(shape = (3, PS/8, PS/8, 1)) lat = Input(shape = (3, PS, PS, 1)) lon = Input(shape = (3, PS, PS, 1)) height = Input(shape = (3, PS, PS, 1)) # Load U-Net unet = uNet(main_input, time, lat, lon, height, nodes = nodes) # Define output layer after U-Net temp_out = Conv3D(filters = 1, kernel_size = (3, 1, 1), activation = 'linear', padding = 'valid', data_format = "channels_last")(unet) # residual layer if residual: temp_out = Add()([main_input[:,1,:,:], temp_out]) # create model with the defined Layers model = Model(inputs = [main_input, time, lat, lon, height], outputs = temp_out) # compile with defined loss and optimizer model.compile(loss = loss, optimizer = optimizer, metrics = ['mse', 'mae', 'mape']) return model def main(): model = get_model() # DCN # model = get_model(residual=True) # RPN # model.summary() if __name__ == '__main__': main()
package main import ( "fmt" "io" "os" "path/filepath" ) type fileAccess struct { srcPath string } type fileSource struct { fullPath string subPath string info os.FileInfo } func newFileAccess(srcPath string) (fa *fileAccess) { fa = new(fileAccess) isDir, _ := isDirectory(srcPath) fa.srcPath = normalizePath(srcPath, isDir) return } func newFileSource(srcPath string, subPath string, info os.FileInfo) (fs *fileSource) { fs = new(fileSource) fs.fullPath = srcPath fs.subPath = subPath fs.info = info return } // SourceAccess func (fa *fileAccess) EachSource(callback FileSourceFunc) error { return filepath.Walk(fa.srcPath, func (fullPath string, info os.FileInfo, err error) error { if err != nil { return err } subPath := toSubPath(fa.srcPath, fullPath) err = callback(newFileSource(fullPath, subPath, info)) if err != nil { fmt.Println("EachSource return error:", err) } return err }) } // FileSource func (fs *fileSource) SubPath() string { return fs.subPath } func (fs *fileSource) IsDir() bool { return fs.info.IsDir() } func (fs *fileSource) Reader() (io.ReadCloser, error) { return os.Open(fs.fullPath) }
import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; import { HTTP_INTERCEPTORS, HttpClientModule } from '@angular/common/http'; import { HttpCredentialInterceptor } from './interceptors/http.credential.interceptor'; import { HttpTokenInterceptor } from './interceptors/http.token.interceptor'; import { StorageService } from './services/storage.service'; import { RepositoryService } from './services/repository.service'; import { AuthService } from './services/auth.service'; import { StudentService } from './services/student.service'; import { CourseService } from './services/course.service'; import { AuthGuard } from './services/auth.guard'; @NgModule({ declarations: [], imports: [ CommonModule, HttpClientModule ], providers: [ { provide: HTTP_INTERCEPTORS, useClass: HttpTokenInterceptor, multi: true }, { provide: HTTP_INTERCEPTORS, useClass: HttpCredentialInterceptor, multi: true }, RepositoryService, StorageService, AuthService, StudentService, CourseService, AuthGuard ] }) export class CoreModule { }
Email is a huge time and efficiency suck. It’s a perpetuating cycle of call and response, a vortex of distraction and a black hole for productivity. But there is a way out… In the movie Every Day’s a Holiday, Mae West famously remarked, “Let’s get out of these wet clothes and into a dry martini.” That’s how I feel about email. The problem is that it’s not a holiday. It’s Monday morning and I won’t have my first martini until lunch. Okay, even if you’re not trading emailing for martinis during the day, who is guilty of hopping right back into their inboxes after work? Checking your email on your smartphone en route to dinner? Sound familiar? Just stop. The benefits are clear. Communicating smarter saves time, increases efficiency, and makes work more enjoyable for everyone. It all starts with process Email is just one of the tools at our disposal. Organizations can tailor communication based on different needs by using the myriad technologies available. For example, no one is going to read your 4,000 word email, no matter how pretty your embedded charts look. Instead, create a wiki or a white paper to disseminate important information that will be repeatedly accessed by your team. Then use Dropbox or Google Drive to make it available for future reference. Likewise, high-level conversations about team progress, performance, and satisfaction should not be circulated via email. That method is not just annoying and frustrating, it taxes the resources of the entire company. Important stuff inevitably falls through cracks. There is a need to structure a weekly process for stakeholders to communicate these essential focal points. I know this because of the overwhelming response to an internal question from our early days. (Yes, we at 15Five use our own reporting system every week.) We were asked for “one suggestion to improve your role, team, or the organization”. Almost every answer involved streamlining how we communicate. The consensus: Keep the most important conversations out of email. Period. Otherwise, you have to juggle multiple team members, all of whom will respond at different times. And if an important response actually does surface, it will be lost in a sea of potentially pointless drivel. Try getting your important tasks completed when ten different people are blowing up your inbox all day long. Loop me out, damnit! Have you ever taken a few days off work only to return to hundreds of emails in your inbox? After filtering out the crap and repeatedly asking, ‘Why am I cc’d on this?’, you are left with a mere handful of useful and important conversations. Increase efficiency by creating a hierarchy of people who will own certain tasks while others report directly to them. The people who are working together at one level can communicate among themselves, and then pass the information up as needed. You can schedule a quick meeting to define the detailed structure and tailor the process to your team’s specific needs. Put your email where your mouth is When used wisely, verbal communication can be far more efficient: – Schedule a one-on-one. Anticipate the need for a longer conversation based on how individual members of your team process things. Several short meetings in the conference room can save the team from hours of drowning in group threads. – Gather your key players. Are you ideating and iterating? While reading 312 emails from a 12-member team sounds like a really valuable use of your time, it is far better to meet and instantly hash-out suggestions and responses. If your team is spread out or works remotely, meetings can have a secondary benefit of building connections and overall employee satisfaction. – Face Time. Our entire team checks in for 15 minutes every morning via Google Hangout. Two or three members of the team will stay on after the rest of the group logs off. Those few minutes of face-to-face contact make the rest of the day flow so much better. – Get on a call. I usually reserve emailing for situations where I can clearly state a need to a consultant or team-member, and I don’t anticipate a response other than “Got it” or “I’ll have that for you by EOD”. Once my list of things to discuss hits three or more, I change strategies and resolve the issue with a five-minute call. Sure, there is sometimes resistance. The person on the line is wondering why I just called rather than emailing them first to schedule the call. I explain that five minutes right now will save an hour of typing back and forth throughout the day. Email can be a valuable tool when we take the time to do it properly. So, before furiously tapping your fingers and hitting send, take three minutes to review. Does it make sense? Does the recipient have enough information to proceed? Can I remove any recipients? If the email is a reply, make sure you have adequately responded to the original questions, because the alternative is getting stuck in an endless wormhole of confused responses and nervous requests for more information. How do you keep the communication channels flowing smoothly? Leave a comment below or send us a tweet.
def unset_alias(self, alias): _check(_lib.jack_port_unset_alias(self._ptr, alias.encode()), 'Error unsetting port alias')
/* * going to be a standard treatment capacity transition */ public class SusceptiblesToRecovered extends AbstractTransitionFunction { StateNode unVaccinatedNode; StateNode recoveredNode; public SusceptiblesToRecovered(String name, IStateNode fromNodeName, IStateNode toNode, AbstractStateTransitionNetwork network) { super(name, fromNodeName, toNode, network, MovementType.PROPHYLAXED); addAttribute(NodeGroupConsts.TREATMENT, ""); addAttribute(NodeGroupConsts.SEIR_TO_IMMUNE, ""); unVaccinatedNode = (StateNode) fromNode; recoveredNode = (StateNode) toNode; } public double getP(int t) { if (t > 0) { // if ((fromNode.getNumberThatHaveBeenInNodeForNHours(t) > 0.0) // && network.isInTreatmentMode()) { // ArrayList<String> groups = new ArrayList<String>(); // // if (fromNode // .hasAttribute(NodeGroupConsts.WILL_TAKE_PROPHYLAXIS_BEFORE_CERTAIN // )) // groups // .add(NodeGroupConsts.WILL_TAKE_PROPHYLAXIS_BEFORE_CERTAIN); // // if (network.getState().isOutbreakCertain()) { // if (groups.size() == 0) // groups // .add(NodeGroupConsts.WILL_TAKE_PROPHYLAXIS_BEFORE_CERTAIN); // groups // .add(NodeGroupConsts.WILL_TAKE_PROPHYLAXIS_AFTER_CERTAIN); // // } // Double rate = network.getResponseCapacityForGroup(groups, // fromNode, t); Double vaccCap = ((InfluenzaStateTransitionNetwork) network).getVaccinationCapacity(t); Double vaccEfficacy = ((InfluenzaStateTransitionNetwork) network).getVaccinationEfficacy(); Double numToTreat = fromNode.getNumberOfTreatablePeopleInTheEntireNode(t); Double rawRate = (vaccCap > numToTreat ? 1d : vaccCap / numToTreat); Double rate = vaccEfficacy * rawRate; // Of the available capacity, what percentage will go to the recovered bin? // -- include the vaccine efficacy in this calculation return rate; } else { return 0.0; } } }
/** * Created by Htet Aung Hlaing on 12/21/2017. */ public class MoviesVO { private int voteCount; private int id; private boolean video; private double voteAverage; private String title; private long populatity; private String posterPath; private String originalLanguage; private String originalTitle; private List<Integer> genreIds; private boolean backdropPath; private String adult; private String overView; private String releaseDate; public int getVoteCount() { return voteCount; } public int getId() { return id; } public boolean isVideo() { return video; } public double getVoteAverage() { return voteAverage; } public String getTitle() { return title; } public long getPopulatity() { return populatity; } public String getPosterPath() { return posterPath; } public String getOriginalLanguage() { return originalLanguage; } public String getOriginalTitle() { return originalTitle; } public List<Integer> getGenreIds() { return genreIds; } public boolean isBackdropPath() { return backdropPath; } public String getAdult() { return adult; } public String getOverView() { return overView; } public String getReleaseDate() { return releaseDate; } }
// ICO parses the Windows Icon / Cursor image resource format func ICO(c *parse.Checker) (*parse.ParsedLayout, error) { if !isICO(c.Header) { return nil, nil } return parseICO(c) }
<reponame>ericasiegel/my-dream-dog from flask import Blueprint, render_template, request, session, redirect, jsonify from app.db import get_db from app.models import Breed from .temp_list import temperament_list as tl import random from .api_requests import get_size from .api_requests import dog_sizes from .api_requests import breed_stats from app.utils.auth import login_required # Blueprint bp = Blueprint('quiz', __name__, url_prefix='/quiz') def get_temperament(*args): """ function to return the list of dog breeds and ids from dog api """ # print(args) sizes = args[0] temps = args[1:] temperament_ids = [] new_temp_ids = [] top_temps = [] # print(top_temps) # get the list of dog ids that match the temperament args for c in temps: for t in sizes: # print(t['breed_group']) if c in t["temperament"]: temperament_ids.append(t['id']) # print(sorted(temperament_ids)) st = sorted(temperament_ids) # find all of the ids if they occur more than 3 times for r in range(265): if r in st: x = st.count(r) if x >= 3: new_temp_ids.append(r) # if the main list is less than 5 ids # find all of the ids if they occur 2 times if len(top_temps) < 5: for r in range(265): if r in st: x = st.count(r) if x == 2: new_temp_ids.append(r) # if the main list is still less than 5 ids # randomy shuffle the remaining ids and add to final list if len(top_temps) < 5: rest = [] for r in range(265): if r in st: x = st.count(r) if x == 1: rest.append(r) # print(rest) random.shuffle(rest) # print(rest) top_temps = new_temp_ids + rest # print(top_temps) # print(top_temps[:5]) return top_temps[:5] #return the first 5 in list @bp.route('/', methods=['GET', 'POST']) @login_required def quiz(): return render_template('quiz.html', tl = tl, loggedIn = session.get('loggedIn') ) # top_five = [] @bp.route('/results', methods=['GET', 'POST']) @login_required def results(): top_five = [] if request.method == 'POST': results = request.form breed_size = results['size'] # print(breed_size) sizes = get_size(breed_size) results_ids = get_temperament(sizes, results['temp1'],results['temp2'],results['temp3'],results['temp4'],results['temp5']) # print(sizes) for i in results_ids: # print(i) stats = breed_stats(i) # print(stats) top_five.append(stats) # print(top_five) # query the Breed database to display the dog names and ids db = get_db() single_id = ( db.query(Breed) .filter(Breed.user_id == session.get('user_id')) .all() ) ids = [] # print(single_id.__list__) for s in single_id: bid = s.__dict__['breed_id'] ids.append(bid) return render_template('results.html', top_five = top_five, ids=ids, loggedIn = session.get('loggedIn') )
/// Wraps the cell's content to the provided width. /// /// New line characters are taken into account. pub fn wrapped_content(&self, width: usize) -> Vec<String> { let pad_char = if self.pad_content { ' ' } else { '\0' }; let hidden: HashSet<usize> = STRIP_ANSI_RE .find_iter(&self.data) .flat_map(|m| m.start()..m.end()) .collect(); let mut res: Vec<String> = Vec::new(); let mut buf = String::new(); buf.push(pad_char); let mut byte_index = 0; for c in self.data.chars() { if !hidden.contains(&byte_index) && (string_width(&buf) >= width - pad_char.width().unwrap_or(1) || c == '\n') { buf.push(pad_char); res.push(buf); buf = String::new(); buf.push(pad_char); if c == '\n' { byte_index += 1; continue; } } byte_index += c.len_utf8(); buf.push(c); } buf.push(pad_char); res.push(buf); res }
<reponame>avik191/Gossip-chat-app package com.example.hi.gossip; /** * Created by HI on 02-Sep-17. */ public class Request { String request_type; int timestamp; public String getRequest_type() { return request_type; } public void setRequest_type(String request_type) { this.request_type = request_type; } public int getTimestamp() { return timestamp; } public void setTimestamp(int timestamp) { this.timestamp = timestamp; } }
def forward(self, pair): embedded_pair = self.embedding_layer(pair).view(-1, self.units[0]) encoded_pair = self.stacked_dense_layers(embedded_pair) return self.output(encoded_pair)
/* ********************************************************************** * cardwo.c - PCM output HAL for emu10k1 driver * Copyright 1999, 2000 Creative Labs, Inc. * ********************************************************************** * * Date Author Summary of changes * ---- ------ ------------------ * October 20, 1999 Bertrand Lee base code release * ********************************************************************** * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, * USA. * ********************************************************************** */ #include <linux/poll.h> #include "hwaccess.h" #include "8010.h" #include "voicemgr.h" #include "cardwo.h" #include "audio.h" static u32 samplerate_to_linearpitch(u32 samplingrate) { samplingrate = (samplingrate << 8) / 375; return (samplingrate >> 1) + (samplingrate & 1); } static void query_format(struct emu10k1_wavedevice *wave_dev, struct wave_format *wave_fmt) { int i, j, do_passthrough = 0, is_ac3 = 0; struct emu10k1_card *card = wave_dev->card; struct woinst *woinst = wave_dev->woinst; if ((wave_fmt->channels > 2) && (wave_fmt->id != AFMT_S16_LE) && (wave_fmt->id != AFMT_U8)) wave_fmt->channels = 2; if ((wave_fmt->channels < 1) || (wave_fmt->channels > WAVEOUT_MAXVOICES)) wave_fmt->channels = 2; if (wave_fmt->channels == 2) woinst->num_voices = 1; else woinst->num_voices = wave_fmt->channels; if (wave_fmt->samplingrate >= 0x2ee00) wave_fmt->samplingrate = 0x2ee00; wave_fmt->passthrough = 0; do_passthrough = is_ac3 = 0; if (card->pt.selected) do_passthrough = 1; switch (wave_fmt->id) { case AFMT_S16_LE: wave_fmt->bitsperchannel = 16; break; case AFMT_U8: wave_fmt->bitsperchannel = 8; break; case AFMT_AC3: do_passthrough = 1; is_ac3 = 1; break; default: wave_fmt->id = AFMT_S16_LE; wave_fmt->bitsperchannel = 16; break; } if (do_passthrough) { /* currently only one waveout instance may use pass-through */ if (woinst->state != WAVE_STATE_CLOSED || card->pt.state != PT_STATE_INACTIVE || (wave_fmt->samplingrate != 48000 && !is_ac3)) { DPF(2, "unable to set pass-through mode\n"); } else if (USE_PT_METHOD1) { i = emu10k1_find_control_gpr(&card->mgr, card->pt.patch_name, card->pt.intr_gpr_name); j = emu10k1_find_control_gpr(&card->mgr, card->pt.patch_name, card->pt.enable_gpr_name); if (i < 0 || j < 0) DPF(2, "unable to set pass-through mode\n"); else { wave_fmt->samplingrate = 48000; wave_fmt->channels = 2; card->pt.pos_gpr = emu10k1_find_control_gpr(&card->mgr, card->pt.patch_name, card->pt.pos_gpr_name); wave_fmt->passthrough = 1; card->pt.intr_gpr = i; card->pt.enable_gpr = j; card->pt.state = PT_STATE_INACTIVE; DPD(2, "is_ac3 is %d\n", is_ac3); card->pt.ac3data = is_ac3; wave_fmt->bitsperchannel = 16; } }else{ DPF(2, "Using Passthrough Method 2\n"); card->pt.enable_gpr = emu10k1_find_control_gpr(&card->mgr, card->pt.patch_name, card->pt.enable_gpr_name); wave_fmt->passthrough = 2; wave_fmt->bitsperchannel = 16; } } wave_fmt->bytesperchannel = wave_fmt->bitsperchannel >> 3; wave_fmt->bytespersample = wave_fmt->channels * wave_fmt->bytesperchannel; wave_fmt->bytespersec = wave_fmt->bytespersample * wave_fmt->samplingrate; if (wave_fmt->channels == 2) wave_fmt->bytespervoicesample = wave_fmt->channels * wave_fmt->bytesperchannel; else wave_fmt->bytespervoicesample = wave_fmt->bytesperchannel; } static int get_voice(struct emu10k1_card *card, struct woinst *woinst, unsigned int voicenum) { struct emu_voice *voice = &woinst->voice[voicenum]; /* Allocate voices here, if no voices available, return error. */ voice->usage = VOICE_USAGE_PLAYBACK; voice->flags = 0; if (woinst->format.channels == 2) voice->flags |= VOICE_FLAGS_STEREO; if (woinst->format.bitsperchannel == 16) voice->flags |= VOICE_FLAGS_16BIT; if (emu10k1_voice_alloc(card, voice) < 0) { voice->usage = VOICE_USAGE_FREE; return -1; } /* Calculate pitch */ voice->initial_pitch = (u16) (srToPitch(woinst->format.samplingrate) >> 8); voice->pitch_target = samplerate_to_linearpitch(woinst->format.samplingrate); DPD(2, "Initial pitch --> %#x\n", voice->initial_pitch); voice->startloop = (voice->mem.emupageindex << 12) / woinst->format.bytespervoicesample; voice->endloop = voice->startloop + woinst->buffer.size / woinst->format.bytespervoicesample; voice->start = voice->startloop; voice->params[0].volume_target = 0xffff; voice->params[0].initial_fc = 0xff; voice->params[0].initial_attn = 0x00; voice->params[0].byampl_env_sustain = 0x7f; voice->params[0].byampl_env_decay = 0x7f; if (voice->flags & VOICE_FLAGS_STEREO) { if (woinst->format.passthrough == 2) { voice->params[0].send_routing = voice->params[1].send_routing = card->waveout.send_routing[ROUTE_PT]; voice->params[0].send_routing2 = voice->params[1].send_routing2 = card->waveout.send_routing2[ROUTE_PT]; voice->params[0].send_dcba = 0xff; voice->params[1].send_dcba = 0xff00; voice->params[0].send_hgfe = voice->params[1].send_hgfe=0; } else { voice->params[0].send_dcba = card->waveout.send_dcba[SEND_LEFT]; voice->params[0].send_hgfe = card->waveout.send_hgfe[SEND_LEFT]; voice->params[1].send_dcba = card->waveout.send_dcba[SEND_RIGHT]; voice->params[1].send_hgfe = card->waveout.send_hgfe[SEND_RIGHT]; if (woinst->device) { // /dev/dps1 voice->params[0].send_routing = voice->params[1].send_routing = card->waveout.send_routing[ROUTE_PCM1]; voice->params[0].send_routing2 = voice->params[1].send_routing2 = card->waveout.send_routing2[ROUTE_PCM1]; } else { voice->params[0].send_routing = voice->params[1].send_routing = card->waveout.send_routing[ROUTE_PCM]; voice->params[0].send_routing2 = voice->params[1].send_routing2 = card->waveout.send_routing2[ROUTE_PCM]; } } voice->params[1].volume_target = 0xffff; voice->params[1].initial_fc = 0xff; voice->params[1].initial_attn = 0x00; voice->params[1].byampl_env_sustain = 0x7f; voice->params[1].byampl_env_decay = 0x7f; } else { if (woinst->num_voices > 1) { // Multichannel pcm voice->params[0].send_dcba=0xff; voice->params[0].send_hgfe=0; if (card->is_audigy) { voice->params[0].send_routing = 0x3f3f3f00 + card->mchannel_fx + voicenum; voice->params[0].send_routing2 = 0x3f3f3f3f; } else { voice->params[0].send_routing = 0xfff0 + card->mchannel_fx + voicenum; } } else { voice->params[0].send_dcba = card->waveout.send_dcba[SEND_MONO]; voice->params[0].send_hgfe = card->waveout.send_hgfe[SEND_MONO]; if (woinst->device) { voice->params[0].send_routing = card->waveout.send_routing[ROUTE_PCM1]; voice->params[0].send_routing2 = card->waveout.send_routing2[ROUTE_PCM1]; } else { voice->params[0].send_routing = card->waveout.send_routing[ROUTE_PCM]; voice->params[0].send_routing2 = card->waveout.send_routing2[ROUTE_PCM]; } } } DPD(2, "voice: startloop=%#x, endloop=%#x\n", voice->startloop, voice->endloop); emu10k1_voice_playback_setup(voice); return 0; } int emu10k1_waveout_open(struct emu10k1_wavedevice *wave_dev) { struct emu10k1_card *card = wave_dev->card; struct woinst *woinst = wave_dev->woinst; struct waveout_buffer *buffer = &woinst->buffer; unsigned int voicenum; u16 delay; DPF(2, "emu10k1_waveout_open()\n"); for (voicenum = 0; voicenum < woinst->num_voices; voicenum++) { if (emu10k1_voice_alloc_buffer(card, &woinst->voice[voicenum].mem, woinst->buffer.pages) < 0) { ERROR(); emu10k1_waveout_close(wave_dev); return -1; } if (get_voice(card, woinst, voicenum) < 0) { ERROR(); emu10k1_waveout_close(wave_dev); return -1; } } buffer->fill_silence = 0; buffer->silence_bytes = 0; buffer->silence_pos = 0; buffer->hw_pos = 0; buffer->free_bytes = woinst->buffer.size; delay = (48000 * woinst->buffer.fragment_size) / (woinst->format.samplingrate * woinst->format.bytespervoicesample); emu10k1_timer_install(card, &woinst->timer, delay); woinst->state = WAVE_STATE_OPEN; return 0; } void emu10k1_waveout_close(struct emu10k1_wavedevice *wave_dev) { struct emu10k1_card *card = wave_dev->card; struct woinst *woinst = wave_dev->woinst; unsigned int voicenum; DPF(2, "emu10k1_waveout_close()\n"); emu10k1_waveout_stop(wave_dev); emu10k1_timer_uninstall(card, &woinst->timer); for (voicenum = 0; voicenum < woinst->num_voices; voicenum++) { emu10k1_voice_free(&woinst->voice[voicenum]); emu10k1_voice_free_buffer(card, &woinst->voice[voicenum].mem); } woinst->state = WAVE_STATE_CLOSED; } void emu10k1_waveout_start(struct emu10k1_wavedevice *wave_dev) { struct emu10k1_card *card = wave_dev->card; struct woinst *woinst = wave_dev->woinst; struct pt_data *pt = &card->pt; DPF(2, "emu10k1_waveout_start()\n"); if (woinst->format.passthrough == 2) { emu10k1_pt_setup(wave_dev); sblive_writeptr(card, (card->is_audigy ? A_GPR_BASE : GPR_BASE) + pt->enable_gpr, 0, 1); pt->state = PT_STATE_PLAYING; } /* Actual start */ emu10k1_voices_start(woinst->voice, woinst->num_voices, woinst->total_played); emu10k1_timer_enable(card, &woinst->timer); woinst->state |= WAVE_STATE_STARTED; } int emu10k1_waveout_setformat(struct emu10k1_wavedevice *wave_dev, struct wave_format *format) { struct emu10k1_card *card = wave_dev->card; struct woinst *woinst = wave_dev->woinst; unsigned int voicenum; u16 delay; DPF(2, "emu10k1_waveout_setformat()\n"); if (woinst->state & WAVE_STATE_STARTED) return -1; query_format(wave_dev, format); if (woinst->format.samplingrate != format->samplingrate || woinst->format.channels != format->channels || woinst->format.bitsperchannel != format->bitsperchannel) { woinst->format = *format; if (woinst->state == WAVE_STATE_CLOSED) return 0; emu10k1_timer_uninstall(card, &woinst->timer); for (voicenum = 0; voicenum < woinst->num_voices; voicenum++) { emu10k1_voice_free(&woinst->voice[voicenum]); if (get_voice(card, woinst, voicenum) < 0) { ERROR(); emu10k1_waveout_close(wave_dev); return -1; } } delay = (48000 * woinst->buffer.fragment_size) / (woinst->format.samplingrate * woinst->format.bytespervoicesample); emu10k1_timer_install(card, &woinst->timer, delay); } return 0; } void emu10k1_waveout_stop(struct emu10k1_wavedevice *wave_dev) { struct emu10k1_card *card = wave_dev->card; struct woinst *woinst = wave_dev->woinst; DPF(2, "emu10k1_waveout_stop()\n"); if (!(woinst->state & WAVE_STATE_STARTED)) return; emu10k1_timer_disable(card, &woinst->timer); /* Stop actual voices */ emu10k1_voices_stop(woinst->voice, woinst->num_voices); emu10k1_waveout_update(woinst); woinst->state &= ~WAVE_STATE_STARTED; } /** * emu10k1_waveout_getxfersize - * * gives the total free bytes on the voice buffer, including silence bytes * (basically: total_free_bytes = free_bytes + silence_bytes). * */ void emu10k1_waveout_getxfersize(struct woinst *woinst, u32 *total_free_bytes) { struct waveout_buffer *buffer = &woinst->buffer; int pending_bytes; if (woinst->mmapped) { *total_free_bytes = buffer->free_bytes; return; } pending_bytes = buffer->size - buffer->free_bytes; buffer->fill_silence = (pending_bytes < (signed) buffer->fragment_size * 2) ? 1 : 0; if (pending_bytes > (signed) buffer->silence_bytes) { *total_free_bytes = (buffer->free_bytes + buffer->silence_bytes); } else { *total_free_bytes = buffer->size; buffer->silence_bytes = pending_bytes; if (pending_bytes < 0) { buffer->silence_pos = buffer->hw_pos; buffer->silence_bytes = 0; buffer->free_bytes = buffer->size; DPF(1, "buffer underrun\n"); } } } /** * copy_block - * * copies a block of pcm data to a voice buffer. * Notice that the voice buffer is actually a set of disjointed memory pages. * */ static void copy_block(void **dst, u32 str, u8 __user *src, u32 len) { unsigned int pg; unsigned int pgoff; unsigned int k; pg = str / PAGE_SIZE; pgoff = str % PAGE_SIZE; if (len > PAGE_SIZE - pgoff) { k = PAGE_SIZE - pgoff; if (__copy_from_user((u8 *)dst[pg] + pgoff, src, k)) return; len -= k; while (len > PAGE_SIZE) { if (__copy_from_user(dst[++pg], src + k, PAGE_SIZE)) return; k += PAGE_SIZE; len -= PAGE_SIZE; } if (__copy_from_user(dst[++pg], src + k, len)) return; } else __copy_from_user((u8 *)dst[pg] + pgoff, src, len); } /** * copy_ilv_block - * * copies a block of pcm data containing n interleaved channels to n mono voice buffers. * Notice that the voice buffer is actually a set of disjointed memory pages. * */ static void copy_ilv_block(struct woinst *woinst, u32 str, u8 __user *src, u32 len) { unsigned int pg; unsigned int pgoff; unsigned int voice_num; struct emu_voice *voice = woinst->voice; pg = str / PAGE_SIZE; pgoff = str % PAGE_SIZE; while (len) { for (voice_num = 0; voice_num < woinst->num_voices; voice_num++) { if (__copy_from_user((u8 *)(voice[voice_num].mem.addr[pg]) + pgoff, src, woinst->format.bytespervoicesample)) return; src += woinst->format.bytespervoicesample; } len -= woinst->format.bytespervoicesample; pgoff += woinst->format.bytespervoicesample; if (pgoff >= PAGE_SIZE) { pgoff = 0; pg++; } } } /** * fill_block - * * fills a set voice buffers with a block of a given sample. * */ static void fill_block(struct woinst *woinst, u32 str, u8 data, u32 len) { unsigned int pg; unsigned int pgoff; unsigned int voice_num; struct emu_voice *voice = woinst->voice; unsigned int k; pg = str / PAGE_SIZE; pgoff = str % PAGE_SIZE; if (len > PAGE_SIZE - pgoff) { k = PAGE_SIZE - pgoff; for (voice_num = 0; voice_num < woinst->num_voices; voice_num++) memset((u8 *)voice[voice_num].mem.addr[pg] + pgoff, data, k); len -= k; while (len > PAGE_SIZE) { pg++; for (voice_num = 0; voice_num < woinst->num_voices; voice_num++) memset(voice[voice_num].mem.addr[pg], data, PAGE_SIZE); len -= PAGE_SIZE; } pg++; for (voice_num = 0; voice_num < woinst->num_voices; voice_num++) memset(voice[voice_num].mem.addr[pg], data, len); } else { for (voice_num = 0; voice_num < woinst->num_voices; voice_num++) memset((u8 *)voice[voice_num].mem.addr[pg] + pgoff, data, len); } } /** * emu10k1_waveout_xferdata - * * copies pcm data to the voice buffer. Silence samples * previously added to the buffer are overwritten. * */ void emu10k1_waveout_xferdata(struct woinst *woinst, u8 __user *data, u32 *size) { struct waveout_buffer *buffer = &woinst->buffer; struct voice_mem *mem = &woinst->voice[0].mem; u32 sizetocopy, sizetocopy_now, start; unsigned long flags; sizetocopy = min_t(u32, buffer->size, *size); *size = sizetocopy; if (!sizetocopy) return; spin_lock_irqsave(&woinst->lock, flags); start = (buffer->size + buffer->silence_pos - buffer->silence_bytes) % buffer->size; if (sizetocopy > buffer->silence_bytes) { buffer->silence_pos += sizetocopy - buffer->silence_bytes; buffer->free_bytes -= sizetocopy - buffer->silence_bytes; buffer->silence_bytes = 0; } else buffer->silence_bytes -= sizetocopy; spin_unlock_irqrestore(&woinst->lock, flags); sizetocopy_now = buffer->size - start; if (sizetocopy > sizetocopy_now) { sizetocopy -= sizetocopy_now; if (woinst->num_voices > 1) { copy_ilv_block(woinst, start, data, sizetocopy_now); copy_ilv_block(woinst, 0, data + sizetocopy_now * woinst->num_voices, sizetocopy); } else { copy_block(mem->addr, start, data, sizetocopy_now); copy_block(mem->addr, 0, data + sizetocopy_now, sizetocopy); } } else { if (woinst->num_voices > 1) copy_ilv_block(woinst, start, data, sizetocopy); else copy_block(mem->addr, start, data, sizetocopy); } } /** * emu10k1_waveout_fillsilence - * * adds samples of silence to the voice buffer so that we * don't loop over stale pcm data. * */ void emu10k1_waveout_fillsilence(struct woinst *woinst) { struct waveout_buffer *buffer = &woinst->buffer; u32 sizetocopy, sizetocopy_now, start; u8 filldata; unsigned long flags; sizetocopy = buffer->fragment_size; if (woinst->format.bitsperchannel == 16) filldata = 0x00; else filldata = 0x80; spin_lock_irqsave(&woinst->lock, flags); buffer->silence_bytes += sizetocopy; buffer->free_bytes -= sizetocopy; buffer->silence_pos %= buffer->size; start = buffer->silence_pos; buffer->silence_pos += sizetocopy; spin_unlock_irqrestore(&woinst->lock, flags); sizetocopy_now = buffer->size - start; if (sizetocopy > sizetocopy_now) { sizetocopy -= sizetocopy_now; fill_block(woinst, start, filldata, sizetocopy_now); fill_block(woinst, 0, filldata, sizetocopy); } else { fill_block(woinst, start, filldata, sizetocopy); } } /** * emu10k1_waveout_update - * * updates the position of the voice buffer hardware pointer (hw_pos) * and the number of free bytes on the buffer (free_bytes). * The free bytes _don't_ include silence bytes that may have been * added to the buffer. * */ void emu10k1_waveout_update(struct woinst *woinst) { u32 hw_pos; u32 diff; /* There is no actual start yet */ if (!(woinst->state & WAVE_STATE_STARTED)) { hw_pos = woinst->buffer.hw_pos; } else { /* hw_pos in sample units */ hw_pos = sblive_readptr(woinst->voice[0].card, CCCA_CURRADDR, woinst->voice[0].num); if(hw_pos < woinst->voice[0].start) hw_pos += woinst->buffer.size / woinst->format.bytespervoicesample - woinst->voice[0].start; else hw_pos -= woinst->voice[0].start; hw_pos *= woinst->format.bytespervoicesample; } diff = (woinst->buffer.size + hw_pos - woinst->buffer.hw_pos) % woinst->buffer.size; woinst->total_played += diff; woinst->buffer.free_bytes += diff; woinst->buffer.hw_pos = hw_pos; }
Former Defense Secretary Chuck Hagel Charles (Chuck) Timothy HagelHillicon Valley: Senators urge Trump to bar Huawei products from electric grid | Ex-security officials condemn Trump emergency declaration | New malicious cyber tool found | Facebook faces questions on treatment of moderators Overnight Defense: White House eyes budget maneuver to boost defense spending | Trump heads to Hanoi for second summit with Kim | Former national security officials rebuke Trump on emergency declaration 58 ex-national security officials rebuke Trump over emergency declaration MORE on Thursday expressed his disgust with President Trump's treatment of the families of slain soldiers, saying Trump's behavior "sickens" him. “I’m offended by the way he’s handled it,” the Obama-era official told USA Today. “You just don’t use the families of the fallen to score political points, especially to take jabs at your predecessor. I’m very unhappy about this,” he continued. ADVERTISEMENT Hagel's comments come after Trump found himself embroiled in a controversy over his treatment of Gold Star families in following the death of Army Sgt. La David Johnson in Niger earlier this month. Rep. Frederica Wilson (D-Fla.) told CNN earlier this week that she was in a car with Johnson’s widow, Myeshia Johnson, during a trip to meet her husband's casket when Trump called and told the widow her late husband “knew what he signed up for ... but when it happens it hurts anyway.” Wilson said that Myeshia Johnson was "very distraught after the call." The president fired back at Wilson in a tweet on Wednesday, saying she fabricated the account. Democrat Congresswoman totally fabricated what I said to the wife of a soldier who died in action (and I have proof). Sad! — Donald J. Trump (@realDonaldTrump) October 18, 2017 However, the mother of the mother of La David Johnson told The Washington Post on Wednesday that the president did disrespect her family. Hagel, a Nebraskan Republican who before serving as Obama's defense secretary held a seat in the Senate, said he was particularly incensed by the way Trump used White House chief of staff John Kelly John Francis KellyMORE, a retired Marine Corps general whose son was killed in Afghanistan, to score political points against Obama. Trump drew Kelly into the controversy when he said that Obama didn't call Kelly following the death of the chief of staff's son. “Particularly as commander-in-chief to do this to score political points,” Hagel told USA Today. “This is one issue in which all Americans should be able to come together. There should be complete unity.” Kelly on Thursday offered an emotional and personal defense of the president at the White House press briefing. “He expressed his condolences in the best way that he could,” Kelly said of Trump. “It stuns me that a member of Congress would have listened in on that conversation. Absolutely stuns me. I would have thought that was sacred,” he said, taking aim at Wilson.
Assessing Risk of Security Non-compliance of Banking Security Requirements Based on Attack Patterns Information systems such as those in the Banking sector need to comply with security regulations to assure that necessary security controls are in place. This paper presents an initial risk assessment method to assist a banking information system project in validating security requirements of the system. Dissimilarity between the textual security requirements of the system and the security regulations is determined to identify security non-compliance. A risk index model is then proposed to determine the risk level based on the severity and likelihood of exploit of any security attack patterns that could potentially affect the system if the missing regulations are not implemented. In an experiment using a case study of nine Thai commercial banks and the IT Best Practices of the Bank of Thailand as the regulations, the performance of compliance checking is evaluated in terms of F-measure and accuracy. It is also found that there is a strong positive correlation, with the coefficient of over 0.6, between the risk indices from the method and the security expert judgment.
Immune Reconstitution Inflammatory Syndrome in Natalizumab-Associated PML # {#article-title-2} Tan et al.1 postulated that early immunologic rebound in natalizumab-associated progressive multifocal leukoencephalopathy (PML) may implicate a worse outcome and survival. Unfortunately, the interval between the last natalizumab infusion and PLEX/IA was not provided. Although it did not reach significance, the time between onset of symptoms and PML diagnosis was about 75% longer in those with early PML–immune reconstitution inflammatory syndrome (IRIS), which may contribute to a worse outcome. We treated …
// Renew the game once I go a level up public void renewGame() { model.initItems(); model.setDeadState(false); }
def Match(self, registry_key): value_names = frozenset([ registry_value.name for registry_value in registry_key.GetValues()]) return self._value_names.issubset(value_names)
/// Determines the current inactive configuration to which new kernel images should be written. pub async fn query_inactive_configuration( boot_manager: &BootManagerProxy, ) -> Result<InactiveConfiguration, Error> { let active_config = paver_query_active_configuration(boot_manager).await?; Ok(active_config.to_inactive_configuration()) }
La Puerta Freeway or Interstate 5 is a freeway that runs through the southern part of Los Santos. It starts/ends after a T-Intersection off the Del Perro Freeway and runs through West Los Santos as the Miriam Turner Overpass before it ends at the Port of LS and turns into Elysian Fields Fwy. The La Puerta Freeway is an Interstate highway in the in-game Interstate system. It is one of the four Interstates in GTA V. Intersections Trivia This freeway was originally called the "Los Puerta Freeway" in the first trailer for Grand Theft Auto V, however it was presumably changed because "Los Puerta" is an incorrect translation from Spanish. "La Puerta" is a Spanish feminine singular noun meaning "the door" or "the gate". The real-life Interstate 5 runs through Los Angeles, as the Santa Ana Freeway and Golden State Freeway. Despite sharing the Interstate 5 designation with other real-life freeways, the La Puerta Freeway most closely resembles Interstate 110, also known as the Harbor Freeway. Gallery
<gh_stars>0 package com.udacity.projects.bakingapp.data.network; import android.arch.lifecycle.LiveData; import android.arch.lifecycle.MutableLiveData; import android.content.Context; import android.content.Intent; import android.util.Log; import com.udacity.projects.bakingapp.AppExecutors; import com.udacity.projects.bakingapp.data.model.Recipe; import java.util.List; import javax.inject.Inject; import javax.inject.Singleton; import retrofit2.Call; import retrofit2.Callback; import retrofit2.Response; @Singleton public class NetworkDataSource { private static final String LOG_TAG = NetworkDataSource.class.getSimpleName(); private final Context mContext; @Inject ApiInterface apiService; private final MutableLiveData<List<Recipe>> mDownloadedRecipes; private final AppExecutors mExecutors; @Inject public NetworkDataSource(Context context, AppExecutors mExecutors) { mContext = context.getApplicationContext(); this.mExecutors = mExecutors; mDownloadedRecipes = new MutableLiveData<>(); } public LiveData<List<Recipe>> getRecipes() { return mDownloadedRecipes; } public void startFetchRecipeService() { Intent intentToFetch = new Intent(mContext, RecipeIntentService.class); mContext.startService(intentToFetch); } public void fetchRecipes() { mExecutors.networkIO().execute(() -> { Call<List<Recipe>> call = apiService.getRecipe(); call.enqueue(new Callback<List<Recipe>>() { @Override public void onResponse(Call<List<Recipe>> call, Response<List<Recipe>> response) { int statusCode = response.code(); List<Recipe> recipes = response.body(); if (recipes != null) { Log.d(LOG_TAG, "size " + recipes.size()); } mDownloadedRecipes.postValue(recipes); } @Override public void onFailure(Call<List<Recipe>> call, Throwable t) { t.printStackTrace(); } }); }); } }
<filename>Samples/Win7Samples/multimedia/DirectWrite/HelloWorld/TabWindow.h /************************************************************************ * * File: TabWindow.h * * Description: * * * This file is part of the Microsoft Windows SDK Code Samples. * * Copyright (C) Microsoft Corporation. All rights reserved. * * This source code is intended only as a supplement to Microsoft * Development Tools and/or on-line documentation. See these other * materials for detailed information regarding Microsoft code samples. * * THIS CODE AND INFORMATION ARE PROVIDED AS IS WITHOUT WARRANTY OF ANY * KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A * PARTICULAR PURPOSE. * ************************************************************************/ #pragma once #include <commctrl.h> #include "SimpleText.h" #include "MultiformattedText.h" #include "CustomText.h" /****************************************************************** * * * TabWindow * * * ******************************************************************/ class TabWindow { public: TabWindow(); ~TabWindow(); HRESULT Initialize(); HWND GetHwnd() { return hwnd_; } private: HWND CreateTabControl(); HRESULT CreateChildWindows(); HRESULT OnPaint( const PAINTSTRUCT &ps ); void OnResize( UINT width, UINT height ); static LRESULT CALLBACK WndProc( HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam ); private: HWND hwnd_; HWND hwndTab_; HWND hwndChild_; SimpleText simpleText_; MultiformattedText multiformattedText_; CustomText customText_; };
// Get a vector of `S2LatLng`s from a two column matrix from R. std::vector<S2LatLng> S2LatLngVecFromR(NumericMatrix mat){ if(mat.ncol() != 2) stop("Can't interpret input as lat,lng - must be a two column matrix."); NumericVector lat = mat( _, 0); NumericVector lng = mat( _, 1); const int n = lat.size(); std::vector<S2LatLng> rslt(n); for(int i = 0; i < n; i++){ rslt[i] = S2LatLng::FromDegrees(lat[i], lng[i]); } return rslt; }
import math h,m=map(int,input().split(" ")) H,D,C,N=map(int,input().split(" ")) if(h>=20): t1=math.ceil(H/N) total=t1*C print(total*0.8000) else: t2=math.ceil(H/N) total=t2*C t3=(19-h)*60+(60-m) t3=t3*D H=H+t3 tk=math.ceil(H/N) total1=tk*C t=total1*(0.8000) if(t<total): print(t) else: print(total)
import base64 import json import os from datetime import datetime import inquirer SUPPORTED_BANKS = ['Nubank', 'Bradesco', 'Alelo'] def validate_nubank_cert(_, path: str): return os.path.exists(path) def validate_date(_, date: str): try: datetime.strptime(date, '%Y-%m-%d') return True except ValueError: return False def init_config(console=None): questions = [ inquirer.Text('ynab_token', 'Qual o seu token do YNAB ?'), inquirer.Text('ynab_budget', 'Qual o nome do orçamento no YNAB ?'), inquirer.Checkbox('banks', 'Quais bancos você irá importar os dados', choices=SUPPORTED_BANKS), inquirer.Text('start_import_date', 'A partir de qual data deseja importar as transações ? (YYYY-MM-DD)', validate=validate_date), # TODO: Fazer no futuro # inquirer.Password('encrypt_password', '<PASSWORD>ha para criptografar o arquivo de configuração') ] answers = inquirer.prompt(questions, console) if 'Nubank' in answers['banks']: questions = [ inquirer.Text('nubank_login', 'Qual o seu CPF ? (Somente numeros)'), inquirer.Password('nubank_token', message='Qual o seu refresh token ? (Obtido pelo pynubank)'), inquirer.Text('nubank_cert', 'Qual o caminho para o seu certificado ? (Obtido pelo pynubank)', validate=validate_nubank_cert), inquirer.Text('nubank_credit_card_account', 'Qual o nome da conta cadastrada no YNAB para o cartão de crédito'), inquirer.Text('nubank_checking_account', 'Qual o nome da conta cadastrada no YNAB para a Nuconta'), ] nubank_answers = inquirer.prompt(questions, console) with open(nubank_answers['nubank_cert'], 'rb') as f: nubank_answers['nubank_cert'] = base64.b64encode(f.read()).decode('utf-8') answers = {**answers, **nubank_answers} if 'Bradesco' in answers['banks']: questions = [ inquirer.Text('bradesco_branch', 'Qual a sua agência ?'), inquirer.Text('bradesco_account_no', 'Qual o número de sua conta ?'), inquirer.Text('bradesco_account_digit', 'Qual o dígito verificador ?'), inquirer.Password('bradesco_web_password', message='Qual a sua senha do internet banking ?'), inquirer.Text('bradesco_credit_card_account', 'Qual o nome da conta cadastrada no YNAB para o cartão de crédito'), inquirer.Text('bradesco_checking_account', 'Qual o nome da conta cadastrada no YNAB para a conta corrente'), ] bradesco_answers = inquirer.prompt(questions, console) answers = {**answers, **bradesco_answers} if 'Alelo' in answers['banks']: questions = [ inquirer.Text('login', 'Qual seu CPF (somente números) ?'), inquirer.Text('alelo_password', 'Qual a sua senha para logar no app ?'), inquirer.Text('alelo_flex_account', 'Qual o nome da conta cadastrada para o cartão Flex'), inquirer.Text('alelo_refeicao_account', 'Qual o nome da conta cadastrada para o cartão Refeição'), inquirer.Text('alelo_alimentacao_account', 'Qual o nome da conta cadastrada para o cartão Alimentação'), ] alelo_answers = inquirer.prompt(questions, console) answers = {**answers, **alelo_answers} with open('./brbanks2ynab.json', 'w') as f: json.dump(answers, f, ensure_ascii=False, indent=2) if __name__ == '__main__': init_config()
def binary_score_func(candidates: cp.ndarray, target: cp.ndarray) -> int: target = target[:, cp.newaxis] scores = - (target * cp.log(candidates) + (1 - target) * cp.log(1 - candidates)).mean(axis=0) idx = scores.argmin() return idx
/** * Simple implementation of the EnvScreenshotEngine interface. * * @author Marvin Froehlich (aka Qudus) */ class EnvScreenshotEngineImpl implements EnvScreenshotEngine { private final Xith3DEnvironment env; /** * {@inheritDoc} */ public void takeScreenshot( Canvas3D canvas, File file, boolean alpha ) { if ( env.getOperationScheduler() == null ) { System.out.print( "taking screenshot..." ); canvas.takeScreenshot( file, true ); System.out.println( "screenshot: " + file.getAbsolutePath() + " saved." ); } else { final ScheduledScreenshot schededShot = SchedOpsPool.allocateSchededScreenshot( canvas, file, alpha ); env.getOperationScheduler().scheduleOperation( schededShot ); } } /** * {@inheritDoc} */ public void takeScreenshot( File file, boolean alpha ) { if ( env.getCanvas() == null ) throw new NullPointerException( "No Canvas3D added to the Environment. Cannot take screenshot." ); env.getCanvas().takeScreenshot( file, alpha ); } /** * {@inheritDoc} */ public File takeScreenshot( Canvas3D canvas, String filenameBase, boolean alpha ) { return ( canvas.takeScreenshot( filenameBase, alpha ) ); } /** * {@inheritDoc} */ public File takeScreenshot( Canvas3D canvas, boolean alpha ) { return ( canvas.takeScreenshot( "screenshot", alpha ) ); } /** * {@inheritDoc} */ public File takeScreenshot( String filenameBase, boolean alpha ) { if ( env.getCanvas() == null ) throw new NullPointerException( "No Canvas3D added to the Environment. Cannot take screenshot." ); return ( env.getCanvas().takeScreenshot( filenameBase, alpha ) ); } /** * {@inheritDoc} */ public File takeScreenshot( boolean alpha ) { return ( takeScreenshot( "screenshot", alpha ) ); } EnvScreenshotEngineImpl( Xith3DEnvironment env ) { this.env = env; } }
Trump Is Going to Regret Not Having a Grand Strategy Throughout the presidential campaign and since Donald Trump’s election, former diplomats, retired generals, and foreign-policy analysts have attempted to decipher, explain, and predict his foreign-policy strategy. Will he pursue the big-stick model of Teddy Roosevelt? Embrace a neo-Nixonian “madman” strategy? Is Trump actually a champion of foreign-policy realism , or perhaps no realist at all? But all those questions make the same mistake — they assume the incoming administration has an incipient grand strategy at all. In reality, the president-elect’s foreign-policy approach is explicitly anti-strategic. Rather, Trump’s worldview suggests the outlines of a doctrine of “tactical transactionalism” — a foreign-policy framework that seeks discrete wins (or the initial tweet-able impression of them), treats foreign relations bilaterally rather than multidimensionally, and resists the alignment of means and ends that is necessary for effective grand strategy. The Trump administration seems determined to muddle through its foreign policy without initial guiding principles, benchmarks for progress, or the means of adjudicating between competing objectives, and with a wildly improvisational leadership style that has no precedent in recent history. Such an approach is dangerously nearsighted and presents an exceptionally high risk of failure — not only in achieving his few stated foreign-policy goals, from the defeat of the Islamic State to the containment of China, but also in assuring basic peace and prosperity for the American people. The Strategic Imperative A grand strategy is a coherent theory of national security based on the careful linkage of means and ends: It establishes priorities, accounts for trade-offs among those priorities, and aligns available resources accordingly. The United States has political, economic, and security interests that span the globe, as well as the unmatched military and economic capabilities to shape or respond to an extraordinary range of international challenges. A grand strategy, in theory, disciplines the use of diplomatic, military, and economic power, marshaling it in service of specific objectives. Without some semblance of a grand strategy in a complex and competitive international environment, any country is adrift. In assessing the importance of grand strategy, it is equally important to understand what it is not. Grand strategy is not the same as strategy writ large. Anyone can have a strategy to achieve a desired objective. Presidents constantly engage in strategic interaction when they negotiate with Congress, wrangle their cabinet members and staff, and seek approval from voters. A presidential administration may even have carefully considered strategies for discrete foreign-policy issues that nevertheless fail to account for the interaction among priorities and resources, thereby undermining the possibility of grand strategy. Moreover, grand strategy is not merely a conceptual exercise — rather, the articulation and implementation of how one guides the ship of state in ways that are consequential for the daily management and execution of foreign policy. Grand strategy provides an essential framework for the vast national security bureaucracy, serving as a policy lodestar that facilitates the implementation of the commander in chief’s agenda absent daily White House direction on every issue. For decades, a bipartisan strategic vision has sought to maintain America’s status as the world’s lead diplomatic, military, and economic actor and extend the reach of the liberal international order. Yet as stresses build on the post-World War II order and an increasingly multipolar distribution of power emerges, inertia alone will not sustain the trajectory of progress toward those goals. A well-defined and carefully constructed American grand strategy is more necessary today than it has been in decades. The next administration will face a choice between preserving the contours of existing grand strategy using shrewd statecraft or pursuing a new vision for the United States’ role in the world. Alternatively, in the absence of a grand strategy, the Trump White House will allow the country’s competitors to determine what the country’s new international role should be. The Trump Doctrine The Trump Doctrine, as gleaned from his pre-inaugural statements about world affairs, is not a grand strategy. Rather, it is a collection of principles — some operational, some philosophical — that will likely guide U.S. foreign policy over the next four years. These principles are united by three core attributes: first, a focus on short-term tactical wins rather than longer-term foresight; second, a “zero-sum” worldview where all gains are relative and reciprocity is absent; third, a transactional view of American foreign policy that is devoid of moral or ethical considerations. We dub this emergent approach “tactical transactionalism.” Trump’s decision-making style is famously improvisational, open to sudden inexplicable shifts and rooted in gut instinct. While tactical transactionalism is designed to allow Trump to triumph in discrete strategic interactions — for example against a political opponent or a counterparty in a negotiation — when applied to foreign policy, such an approach is fundamentally at odds with the careful analysis and planning required for grand strategy. For major foreign-policy issues and decisions, which require policymakers to make judgments despite imperfect information and persistent uncertainty, careful analysis and deliberation make rash and counterproductive outcomes less likely. Trump’s principles do not amount to a coherent conception of the United States’ role in the world, Washington’s core interests, and the appropriate uses of American power. Although the president-elect is fond of historical slogans — like “America First” or “Peace Through Strength” — he seems to prefer such taglines for their marketing value, rather than as shorthand for a set of strategic assumptions. (Indeed, anyone who has studied American history recognizes that the strategic assumptions associated with the slogans cited above are utterly incompatible with each other.) Leading Indicators The pitfalls of Trump’s strategic incoherence become quickly apparent upon considering his two most prominent foreign-policy actions since winning the election. First, Trump signaled his willingness to enter into a nuclear arms race with unnamed foreign adversaries. On Dec. 22, the president-elect tweeted, “The United States must greatly strengthen and expand its nuclear capability until such time as the world comes to its senses regarding nukes.” After his advisors attempted to soften and reinterpret this statement, Trump doubled down, telling MSNBC : “Let it be an arms race. We will outmatch them at every pass and outlast them all.” Yet there is no apparent logic to Trump’s nuclear saber-rattling, beyond the assertion of American strength and stamina. Perhaps he only meant his tweet as an extension of the critique, stated repeatedly during the campaign, that the U.S. nuclear arsenal “doesn’t work” anymore. But presidential rhetoric has strategic consequences, especially in the nuclear realm, which is why semantics tend to be carefully parsed by foreign governments. Whether Trump intended it or not, his words sent a threatening message about American intentions. Even interpreted modestly, these pronouncements herald important shifts in American nuclear policy. Changes of this magnitude would typically be carefully deliberated through a Nuclear Posture Review, of the kind undertaken by the past three administrations to evaluate the strategic imperatives and budgetary constraints governing their approaches to nuclear weapons. If the Trump administration elects to conduct such a review, Trump’s personal tweets and comments — if taken as policy guidance — would prejudge important deliberations, undermining civilian and military experts’ ability to make strategically prudent recommendations. Second, Trump weighed in on the most expensive and controversial military procurement program, the Joint Strike Fighter, or F-35. On Dec. 22, he tweeted: “Based on the tremendous cost and cost overruns of the Lockheed Martin F-35, I have asked Boeing to price-out a comparable F-18 Super Hornet!” This followed an earlier tweet, which stated: “The F-35 program and cost is out of control. Billions of dollars can and will be saved on military (and other) purchases after January 20th.” Setting aside the irregularity of a president-elect — let alone a sitting president — directly intervening in federal contracting, these statements further demonstrate how Trump’s desire for tactical wins overshadows long-term strategic considerations. Most significantly, there is no F-18 model comparable to the F-35. The F-35, unlike the F-18, is designed with a stealth profile, which enables it to evade enemy radar and attack ground targets. Although cost overruns for the F-35 program are a legitimate concern, the decision to procure fighter jets without stealth capabilities has long-term implications for U.S. national security that merit serious consideration. Discontinuation of the F-35 would also be highly disruptive to the 11 American allies that have already purchased or plan to purchase the platform. These statements may be explained away as tactical maneuvering by Trump, creating bargaining space when he can avoid full accountability for his words. After all, the president-elect himself assigns great value to unpredictability. In his major foreign-policy address during the campaign, Trump pointed to secrecy as the basis of his counter-Islamic State policy: “We must as a nation be more unpredictable. We are totally predictable. We tell everything. We’re sending troops. We tell them. We’re sending something else. We have a news conference. We have to be unpredictable. And we have to be unpredictable starting now.” But while unpredictability may be tactically useful, it is strategically vacuous — and deeply at odds with grand strategy. It is also nearly impossible to operationalize given the logistical requirements of U.S. foreign-policy implementation. Until Trump assumes office, it will be impossible to judge whether these statements will translate into official U.S. policy. Even Trump’s advisors seem unsure whether to take his words literally, seriously, or symbolically. Nevertheless, Trump’s positions on national security issues consistently demonstrate an inclination toward tactical moves that create the appearance of leverage. This approach resists prioritization or acknowledgement of trade-offs, the hallmarks of sound grand strategy. Transactionalism Trumps Grand Strategy This tactical emphasis flows from Trump’s transactional view of international relations. Importing his real estate deal-making mentality to conducting U.S. foreign policy, he envisions foreign relations as 193 individually crafted bilateral deals with every other nation in the world. Trump appears to consider these deals to be zero-sum and lacking moral content. This attitude is most marked in his long-standing antipathy to (certain) American alliances. In his 1990 Playboy interview, Trump summed up his view: “We Americans are laughed at around the world for losing a hundred and fifty billion dollars year after year, for defending wealthy nations for nothing, nations that would be wiped off the face of the earth in about 15 minutes if it weren’t for us. Our ‘allies’ are making billions screwing us.” Trump is hardly alone in complaining about allies’ free-riding on U.S. military power, but he is unique in his fixation on the need for financial compensation. During the campaign, Trump went so far as to suggest that security guarantees would be conditional on NATO allies’ defense spending, and he even touched the third rail of American politics when he suggested he would cut military aid to Israel. The desire to negotiate winning deals apparently overrides broader and more fundamental strategic objectives, like deterring adversaries, assuring regional stability, and preventing the spread of nuclear weapons. Trump seems perfectly comfortable calling the reliability of the U.S. extended deterrent into question, even if the result is nuclear proliferation by close allies and partners like Japan, South Korea, and Saudi Arabia. The robustness of these views for Trump, in the face of countervailing evidence — namely the relative cost savings and security gains that result from overseas basing — indicates that bilateral, zero-sum transactionalism trumps strategic considerations in his thinking. Within this transactional framework, Trump has no compunction about cutting a grand bargain with Russia. Above all else, the president-elect professes to admire Putin’s admiration for him, and both share a mutual worldview that favors power while eschewing international norms. Trump disputed evidence of Russian interference in the U.S. election and shares Putin’s dismissive attitude toward American exceptionalism. Putin’s antagonism toward NATO is not terribly worrying for Trump given his aforementioned indifference to the alliance, except insofar as it can serve as a protection racket. Although the contours of such a deal remain unclear, Trump views the fight against the Islamic State as the cornerstone of a U.S.-Russia rapprochement. From Words to Deeds Will a different doctrine take shape once Trump assumes the obligations of the Oval Office? Will the new administration demonstrate a knack for strategy heretofore obscured by the president-elect’s Twitter storms? Confirmation hearings will yield early clues into the prospects for a Trump grand strategy. In particular, one should look for clues in the testimony by Secretary of State nominee Rex Tillerson, Secretary of Defense nominee Jim Mattis, and CIA Director nominee Mike Pompeo, and the extent to which they express a common view of foreign-policy challenges (such as credibly supporting treaty allies) and opportunities (such as sustaining the international consensus behind the Iran nuclear deal). But looking beyond Inauguration Day, three decisions in the first 100 days of the Trump administration will provide crucial insight. First, how will national security decision-making be structured? Donald Trump Jr. reportedly said during the campaign that the vice president in a Trump administration would be charged with both domestic and foreign policy. Trump’s delegation of regular intelligence briefings to Vice President-elect Mike Pence suggests there may be truth to this promise. Yet much about the incoming administration’s decision-making procedures remain unknown. In the first 100 days, Trump will likely release the customary presidential directive outlining the organization of the National Security Council (NSC) system, which reveals the formal arrangements for creating and executing national security policy, including the role to be played by the president, vice president, national security advisor, and other NSC principals. Second, will Trump change his communication style once in office? His press secretary-designate, Sean Spicer, indicated that the president will continue his personal use of Twitter. The extent to which seemingly off-the-cuff tweets are intended and interpreted as declaratory government policy will have important implications for U.S. foreign relations. In particular, it will become clear in the first 100 days whether presidential statements align with concrete policy decisions. Typically, new presidents are loath to backtrack on campaign commitments because they fear backlash during their early-term “honeymoon period” and seek to affirm their credibility domestically as well as for international audiences. But this is a governance question Trump has not yet confronted. Third, to what extent will the foreign policies pursued by the Trump administration accord with campaign commitments? Early political appointments, legislative priorities, and budget request documents will provide insight into the flexibility with which the new administration interprets the president’s prior promises. These actions will signal whether Trump’s policy pronouncements will be subject to revision within the framework of strategic reviews, such as the National Security Strategy, or a possible Nuclear Posture Review. Each of these decisions will have consequences for the new administration’s ability to achieve discrete foreign-policy objectives, let alone articulate an overarching framework for its statecraft. But given the consistency with which Trump has espoused a doctrine of tactical transactionalism, it is doubtful that a grand strategy will emerge after Jan. 20. The president may feel that the absence of strategy empowers him personally. But it will inevitably obscure the United States’ vital national interests, confuse allies and partners, and blunt the exercise of American power. Photo credit: WIN MCNAMEE/Getty Images
def do_trace(self, line): cl = shlex.split(line) if len(cl) < 1: print("Error, please specify the trace command to run.") print(self.do_trace.__doc__) return if cl[0] == "flush": SendCmd(OP_TRACE, [SUBCMD_TRACE_FLUSH]) elif cl[0] == "start": parser = CmdArgumentParser("trace start") parser.add_argument("--period", type=int) parser.add_argument("var", nargs="*") args = parser.parse_args(cl[1:]) if len(args.var) > TRACE_VAR_CT: print(f"Can't trace more than {TRACE_VAR_CT} variables at once.") return if args.period: period = Split32(args.period) SendCmd(OP_TRACE, [SUBCMD_TRACE_SET_PERIOD] + period) else: SendCmd(OP_TRACE, [SUBCMD_TRACE_SET_PERIOD] + Split32(1)) if args.var: var_names = args.var + [""] * (TRACE_VAR_CT - len(args.var)) for (i, var_name) in enumerate(var_names): var_id = -1 if var_name in varDict: var_id = varDict[var_name].id var = Split16(var_id) SendCmd(OP_TRACE, [SUBCMD_TRACE_SET_VARID, i] + var) SendCmd(OP_TRACE, [SUBCMD_TRACE_FLUSH]) SendCmd(OP_TRACE, [SUBCMD_TRACE_START]) elif cl[0] == "download": parser = CmdArgumentParser(prog="trace download") parser.add_argument( "--separator", type=str, default=" ", help="field separator in file" ) parser.add_argument( "--dest", type=str, default="trace.dat", help="filename to save the trace to", ) args = parser.parse_args(cl[1:]) tv = TraceActiveVars() if len(tv) < 1: print("No active trace variables") return dat = TraceDownload() TraceSaveDat(dat, fname=args.dest, separator=args.separator) elif cl[0] == "graph": TraceGraph(cl[1:]) elif cl[0] == "current": print("Traced variables:") for var in TraceActiveVars(): print(" - %s" % var.name) dat = SendCmd(OP_TRACE, [SUBCMD_TRACE_GET_PERIOD]) period = Build32(dat)[0] print("Trace period: %i" % period) dat = SendCmd(OP_TRACE, [SUBCMD_TRACE_GET_NUM_SAMPLES]) samples = Build32(dat)[0] print("Samples in buffer: %i" % samples) else: print("Unknown trace sub-command %s" % cl[0]) return
// The configuration of this bean is in the spring application context. public abstract class BeanShellScriptStepFactory extends AbstractSingleFileScriptStepFactory { @Override protected boolean canCreateStepWith(final File aFile) { return aFile.getName().toLowerCase(Locale.getDefault()).endsWith(".beanshell"); } }
<reponame>wout/plutus<filename>prettyprinter-configurable/test/Main.hs module Main where import Default import Expr import NonDefault import Universal import Test.Tasty main :: IO () main = defaultMain $ testGroup "all" [ test_default , test_nonDefault , test_universal , test_expr ]
The Aspergillus nidulans MAPK Module AnSte11-Ste50-Ste7-Fus3 Controls Development and Secondary Metabolism The sexual Fus3 MAP kinase module of yeast is highly conserved in eukaryotes and transmits external signals from the plasma membrane to the nucleus. We show here that the module of the filamentous fungus Aspergillus nidulans (An) consists of the AnFus3 MAP kinase, the upstream kinases AnSte7 and AnSte11, and the AnSte50 adaptor. The fungal MAPK module controls the coordination of fungal development and secondary metabolite production. It lacks the membrane docking yeast Ste5 scaffold homolog; but, similar to yeast, the entire MAPK module's proteins interact with each other at the plasma membrane. AnFus3 is the only subunit with the potential to enter the nucleus from the nuclear envelope. AnFus3 interacts with the conserved nuclear transcription factor AnSte12 to initiate sexual development and phosphorylates VeA, which is a major regulatory protein required for sexual development and coordinated secondary metabolite production. Our data suggest that not only Fus3, but even the entire MAPK module complex of four physically interacting proteins, can migrate from plasma membrane to nuclear envelope. Introduction Eukaryotic organisms communicate between cell surface and nucleus to respond to environmental signals. The mitogenactivated protein kinase (MAPK) module consisting of a cascade of three protein kinases represents a highly conserved eukaryotic signal transduction system present from yeast to man. MAP3K phosphorylates a second kinase, MAP2K which itself phosphorylates the MAPK. This final kinase phosphorylates nuclear target proteins to activate appropriate gene expression . The sexual pathway of the budding yeast Saccharomyces cerevisiae represents a paradigm for signal transduction in eukaryotes . This MAP kinase pathway responds to pheromones and induces differentiation processes which trigger sexual mating of yeast . The central complex of MAP3K Ste11, MAP2K Ste7 and MAPK Fus3 is assembled on the scaffold protein Ste5 as a hub to keep these kinases in a close proximity for enhanced relay of phosphorylation and thereby controls the flow of information . Binding of pheromone to the transmembrane receptors Ste2 or Ste3, which are coupled to guanine nucleotide binding proteins (G protein, G protein coupled receptor: GPCR), initiates signal transduction. This induces the release of the Gbc subunit from the trimeric Gabc protein. The Ste5 RING domain binds to activated free Gbc complex and recruits the MAP kinase module Ste11-Ste7-Fus3 to the membrane in close distance to the p21 activated kinase (PAK) Ste20. Preactivated Ste20 is localized in the membrane and initiates the kinase cascade system by phosphorylating the MAP3K Ste11 . Ste50 represents a second adaptor which binds to the Opy2 membrane anchor and provides membrane association of the entire MAPK module. Ste50 mediated membrane localization is required for Ste11 activation . The information is transmitted as phosphate signal from Ste11 via Ste7 to the MAPK Fus3. According to the current model phosphorylated Fus3 is released from the Ste5 scaffold complex and leaves the membrane associated complex . Phosphorylated Fus3 crosses the cytoplasm and enters the nucleus where it phosphorylates target transcription factors as Ste12. Ste12 is necessary to activate the sexual pathway and also controls developmental processes . Pheromone pathway genes have been studied in various fungi and are not only involved in sexual reproduction but also in fungal pathogenicity . The Fus3 MAPK module is highly conserved in filamentous fungi with the exception that homologs for Ste5 are absent . In the self-fertile model fungus Aspergillus nidulans, the Ste11 MAP3K homolog SteC (AnSte11) , the Fus3 MAPK homolog MpkB (AnFus3) , and the Ste12 homolog for the transcription factor SteA (AnSte12) are necessary for sexual fruiting body formation, suggesting that there are similarities in the molecular function of the MAPK signal transduction as in yeast. A. nidulans grows vegetatively as a filament. When placed on a surface, after germination of the spores at least 12 hours of growth is required to establish developmental competence in response to external signals . There are two developmental options: light supports the asexual and inhibits the sexual developmental pathway ( Figure 1A). AnFus3 is not only required for sexual development but also for the control of secondary metabolism which is a typical feature of many filamentous fungi . Sexual development of A. nidulans is coordinated with the production of secondary metabolites, including mycotoxins. This coordination requires velvet domain proteins which are common for filamentous fungi but absent in yeast . The velvet heterodimers VeA-VelB and VosA-VelB have different developmental functions. VeA-VelB heterodimer promotes sexual development whereas VelB-VosA dimer inhibits asexual differentiation. Association of the putative methyltransferase LaeA with the VelB-VeA heterodimer, which makes the VelB-VeA-LaeA trimeric complex, coordinates development and secondary metabolism . Comparison of the intracellular molecular mechanism of signal transduction of Fus3 MAPK of yeast and A. nidulans revealed that AnFus3 MAPK can reach the nuclear envelope in a complex with other proteins of the MAPK module, including the adaptor protein AnSte50. Only AnFus3 enters the nucleus and phosphorylates VeA, which elucidates a novel link between MAPK and velvet domain proteins that act as control elements at the interface of fungal development and secondary metabolism. Results The A. nidulans Fus3 MAP kinase of the mating pathway phosphorylates the velvet domain protein VeA, and VelB-VeA complex formation is reduced in Anfus3 deletion S. cerevisiae Fus3 interacts with transcription factor Ste12 that activates the mating pathway. The A. nidulans MAP kinase AnFus3 also controls sexual development . Tagged AnFus3 recruited the transcription factor AnSte12 by tandem affinity purification (TAP) only when the fungus was induced for sexual development but not during vegetative filamentous growth or asexual development ( Figure 1B, Table S1). Endogenously expressed AnFus3::sGFP was functional ( Figure S1) and immunoprecipitation of the fusion protein was able to enrich the SteA protein in a sexually induced culture (Table S1). The AnFus3-SteA interaction was further verified by bimolecular fluorescence complementation (BiFC) and was observed in fungal nuclei ( Figure 1C). This corroborates that the interaction between kinase and transcription factor is conserved from yeast to filamentous fungi. Due to their similar roles in development and secondary metabolism , we examined whether AnFus3 interacts with the velvet domain proteins and LaeA. AnFus3 interacted in vivo in a BiFC assay with LaeA and subsequently with VeA, but not with VelB. In addition, AnFus3 interacted with VosA ( Figure 1D). VosA is part of the VosA-VelB heterodimer which represses asexual development . These results suggest that distinct velvet domain proteins or LaeA may include targets of MAPK phosporylation. AnFus3 was immunoprecipitated from vegetatively grown fungal cells as sGFP fusion protein ( Figure 2A) to identify direct substrates of AnFus3 in in vitro kinase assays. VeA expressed and purified from E. coli was the only tested protein which could be specifically phosphorylated by AnFus3, whereas bacterially produced VosA, LaeA or VelB were not phosphorylated. Further phosphorylation experiments performed with phospho-specific serine and threonine antibodies further supported that VeA was phosphorylated by AnFus3 and treatment of phosphorylated samples with lambda protein phosphatase (l-PP) resulted in loss of phosphorylation signal ( Figure 2B). VeA bridges VelB and LaeA in the trimeric VelB-VeA-LaeA complex. We addressed whether AnFus3 activity affects complex formation. VeA protein levels ( Figure 2C) were similar in wild type and mpkB mutant strains. velB RNA was unchanged whereas laeA transcripts were downregulated as previously reported ( Figure S2A) . TAP purification of natively expressed VeA::cTAP revealed that under conditions where sexual development was normally promoted, only significantly reduced amounts of VelB and LaeA proteins were enriched by tagged VeA in the absence of MpkB ( Figure 2D, Tables S2 and S3). The MAP kinase does not affect VeA nuclear import, because the interaction of VeA with the importin KapA was not significantly affected in mpkB mutant. Consistently, nuclear import of the subunits of the trimeric VelB-VeA-LaeA complex was not affected in a mkkB mutant lacking the upstream MAP2K AnSte7 ( Figure S2B). Lack of laeA normally causes enhanced VeA and VelB expression as well as enhanced complex formation . This suggests that decreased VeA-VelB association is not a result of the reduced levels of LaeA in mpkB mutants. These results suggest that AnFus3 phosphorylates VeA in vitro and interacts with VeA in vivo. Furthermore, AnFus3 is required for enhanced association of VeA with VelB which are components of the VelB-VeA-LaeA velvet complex. MAP2K AnSte7 is required for sexual development of A. nidulans MAPKKK (SteC) and MAPK (MpkB) are necessary for sexual development in A. nidulans . Yeast Fus3 receives the phosphorylation signal from MAP2K Ste7. The corresponding filamentous fungus homolog has not yet been described. The ANID_03422 (mkkB) locus of A. nidulans encodes a protein, which is Author Summary Mitogen activated protein (MAP) kinase cascades are conserved from yeast to man to transmit an external signal to the nucleus and induce an appropriate cellular response. The yeast Fus3 MAP kinase module represents a textbook paradigm for signal transduction. The pathway is activated by external sexual hormones triggering several kinases that transmit the signal at the plasma membrane to Fus3. Phosphorylated Fus3 is released from the membrane-associated module, crosses the cytoplasm, and enters the nucleus to activate transcription factors for sexual development. We describe here the Fus3 MAPK pathway of a filamentous fungus that controls sexual development as well as secondary metabolism, which are coordinated processes in filamentous fungi. Aspergillus nidulans is able to release Fus3 as a complex from the membrane. Complexes of Fus3 can include two additional kinases and an adaptor protein, and these complexes can migrate from the membrane to the nuclear envelope where only A. nidulans Fus3 can enter the nucleus to control nuclear regulators. Revealing specific functions of cellular Aspergillus Fus3 complexes in signal transduction to control fungal development and secondary metabolism will be a fascinating future task. conserved in different Aspergilli ( Figure S3) and has 25% identity to yeast Ste7 . AnSte7 is also related to N. crassa MAP2K and human MAP2K1 . Overexpressing the corresponding mkkB gene resulted in two fold increase in the number of fruiting bodies and supported a role in sexual development ( Figure S4A-S4C). mkkB deletion mutants had a slow growth phenotype and were blocked in early sexual development, which resulted in nest-like structures containing clumps of Hülle cells (yellow arrows, Figure 3A, 3B). Hülle cells support sexual development as specialized nursing cells for the growing fruiting body . AnSte7 is required for hyphal fusion as one of the initial steps of fruiting body formation. Hyphal fusion of wild type strains marked with either synthetic cytoplasmic green fluorescent protein (sGFP) or with nuclear monomeric red fluorescent protein (mRFP) resulted in hyphae with green cytoplasm and red nuclei (heterokaryon) ( Figure 3C). In contrast, a mkkB deletion strain was unable to fuse with the wild type strain. We found the same hyphal fusion defect for the steCD strain as in the mkkB mutant ( Figure 3C, 3D). This further supports that AnSte11 and AnSte7 act in a common pathway. The analysis of putative additional functions of AnSte7 in later phases of sexual development required a by-pass of initial hyphal fusions. Therefore, heterokaryons were artificially produced by fusing protoplasts. An intact mkkB copy of the wild type strain allowed the development of mature fruiting bodies (red arrows), when wild type and mkkB mutant protoplasts were fused. In contrast, two mkkB mutants forced to form heterokaryons were impaired in fruiting body maturation and produced only early structures of development (yellow arrow, Figure 3D). This suggests several functions of MAP2K AnSte7 during sexual development presumably in concert with AnFus3. AnSte50-Ste11-Ste7-Fus3 form a physically interacting module that is required for sexual development We determined whether the A. nidulans kinases may replace functions of its yeast counterparts. Plasmids containing Anste7 and Anfus3 genes expressed under yeast promoters were transformed into ste7 and fus3 deletion strains. mkkB and mpkB did not alleviate the defects in pheromone response of the yeast mutants ( Figure S4D). However, MpkB moderately suppressed the defects in pheromone response of a fus3 kss1 double mutant, showing that the MpkB is partially able to take over functions of the MAP kinase pair Fus3/Kss1. This suggests a partial overlap of the functions of the MAPK pathways of these two organisms. Ste50 functions as an adaptor for membrane recruitment of Ste11 in yeast . Deletion of the corresponding steD in A. nidulans caused a defect in fruiting body formation ( Figure S1A). Similar to the other MAPK mutants, steD mutant could not produce heterokaryons in outcrossings (not shown). Thus, the adaptor AnSte50 is as important for accurate fungal development as the other components of the MAPK module. A. nidulans AnSte50 was enriched by AnSte7::TAP in wild type, but not in the steCD strain indicating that AnSte11 is required for the AnSte50-Ste7 interaction ( Figure 4A, 4B). These data suggest a physical interaction of AnSte50 and two MAPK module components in a AnSte50-Ste11-Ste7 complex. Interaction partners of AnSte50 were identified to explore the entire fungal MAPK mating module. A functional steD::ctap ( Figure S1A, S1B) recruited the MAP3K AnSte11 and the MAPK AnFus3 but not the MAP2K AnSte7 ( Figure 4C, 4D and Table S6). This further supports that AnSte50-Ste11-Ste7-Fus3 forms a module similar to yeast Ste5-Ste50-Ste11-Ste7-Fus3 with the exception that a counterpart for the yeast Ste5 scaffold is missing in A. nidulans. AnSte50-Ste11-Ste7-Fus3 represents an active MAP kinase module required for sexual development and secondary metabolite synthesis We analysed whether AnSte11 and AnSte7 act upstream of MAPK AnFus3. MAPK phosphorylation was monitored by a phospho-specific antibody against the MAPK Thr182XTyr184 motif. Phosphorylated AnFus3 was permanently detectable in vegetative wild type cultures ( Figure 4E). In contrast, modified AnFus3 was absent in mutants lacking AnSte11 or AnSte7, whereas the absence of AnSte12 did not change levels of phosphorylated AnFus3. In the absence of AnSte50, reduced phosphorylation of AnFus3 indicates some residual activity of the untethered AnSte11-Ste7 complex. This supports an active A. nidulans MAPK module consisting of AnSte50-Ste11-Ste7-Fus3 which controls fungal sexual development. The role of AnSte50-Ste11-Ste7-Fus3 for secondary metabolism was examined. Impaired secondary metabolism had only been described for the mpkB mutants . The mycotoxin sterigmatocystin (ST) levels were drastically reduced in the sterile steC, steD, mkkB, or mpkB mutants whereas ST levels in the sterile steAD were similar to wild type ( Figure 4F-4G). Similarly, the expression of the biosynthesis genes for ST (stcU) and terrequinone (tdiA and tdiB), and the expression of laeA and the transcription factor encoding aflR, both required for expression of secondary metabolite genes, were distinctly reduced in each mutant of the MAPK module ( Figure 4H). These data corroborate that active , LaeA and velvet domain proteins. Germination of spores leads to tubelike vegetative filaments (hyphae) which become competent for environmental signals after at least 12 hours of growth. Exposure of developmentally competent hyphae to light (or aeration) leads to asexual development (conidiophores and asexual spores ) in 24 hours. VosA-VelB inhibits asexual differentiation. Incubation in dark (96 hours) induces the sexual cycle with sexual fruiting bodies (cleistothecia) which are nursed by globose Hü lle cells. LaeA is required for Hü lle cell formation. VelB-VeA supports sexual development together with AnSte12 . The VelB-VeA-LaeA trimeric complex coordinates development with secondary metabolism. Co; conidia, S; stalk, Cl; cleistothecium, Hc; Hü lle cells. (B) A silver stain treated 5-14% gradient SDS polyacrylamide gel of AnFus3 ::cTAP from vegetatively, asexually (on plates, under light) and sexually (on plates, in the dark) grown cultures at 30uC for 20 hours. Identified proteins from the excised lanes (Table S1) In vitro phosphorylation of VeA by AnFus3 in a radioactive kinase assay. Left panels; autoradiograph of dried SDS gel run for phosphorylation reactions (30 ml of total 45 ml reaction volume), Coomassie stain of the proteins from phosphorylation reaction (10 ml of total 45 ml reaction). VeA protein in AnFus3 reaction tube is shown with red rectangle. Right panels; ponceau staining of the immunoprecipitated (immobilized to the GFP trap sepharose) AnFus3::sGFP and only sGFP protein. Immunodetection of the fusion protein and free sGFP by the a-gfp. (B) Confirmation of specific VeA phosphorylation by a non-radioactive method. All recombinant proteins (10 mg each) were treated with both AnFus3 and GFP. AnFus3 treated samples were additionaly incubated with the lambda protein phosphatase (l-PP). Proteins were immunodetected by a-Penta His, a-GST. After AnFus3 treatment, VeA showed a 3-5 kDA molecular weight shift (red arrow) that disappeared after L-PP treatment. Only VeA treated with MAPK was recognized by P-ser/thr specific antibody. (C) Protein levels of VeA in the wild type and mpkB mutant background. VeA::cTAP signals were normalized to the internal actin levels. VeA protein levels did not change in the absence of MpkB. (D) Reduced velvet complex formation in the mpkB mutant. The VeA-associated proteins from the cultures of the wild type and mpkBD strains grown in the darkness sexually at 30uC for 20 hours. Three independent experiments were performed and the associated proteins were identified. The ratio of the peptides from VelB and LaeA to the VeA protein drastically reduced in the MAPK mutant, whereas alpha importin KapA interaction slightly increased. Black bars represent the standard error. *LaeA was only found in one of the three purifications in mpkBD strain, thus no error bar is assigned. doi:10.1371/journal.pgen.1002816.g002 AnSte50-Ste11-Ste7-Fus3 MAPK is not only required for sexual development but also for secondary metabolite production. The components of the fungal MAPK module exhibit distinct localization patterns at hyphal tip, nuclear envelope, and septa The yeast mating MAPK module transmits a signal from the plasma membrane to the nucleus by releasing MAPK Fus3 from the Ste5 scaffold at the membrane . We analysed how the signal is transmitted through the filament of A. nidulans to nuclear factors as AnSte12 or VeA. Time course immunoblotting ( Figure S1C) showed that AnFus3 was constantly expressed during development. The mkkB mRNA for the upstream MAPKK was also present throughout all stages ( Figure S5C). The corresponding protein AnSte7::sGFP was present in vegetative as well as in the initial phases of asexual and sexual development, but decreased afterwards ( Figure S5C). Similarly, the AnSte50::sGFP ( Figure S1C) seems to be degraded because the protein disappeared during mid and late asexual development. Confocal spinning disc microscopy revealed that functional AnSte7::sGFP fusion protein expressed under native locus promoter was localised during early phase of growth throughout the cytoplasm, but never found in the nucleus (not shown). After becoming competent for differentiation (16 hours after germination), AnSte7::sGFP accumulated not only at the hyphal tip but also at the plasma membrane and at the septa of hyphae or spore forming cells (white arrows in Figure 5A). The AnSte7 signal was also present on the nuclear envelope. The AnSte7 localization pattern did not change in the absence of the MAP3K AnSte11 (not shown). Like AnSte7, a functional Ste50::sGFP fusion never entered the nucleus. AnSte50 was cytoplasmic and accumulated at later stages of vegetative growth at the hyphal tip, the septa of spore forming cells, the plasma membrane and the nuclear envelope (arrows in Figure 5B, 5D). A functional AnFus3::sGFP expressed under the native promoter accumulated at the hyphal tip and was as well present in the cytoplasm as in the nucleus in vegetative and spore forming cells ( Figure 5C, 5D). This suggests a dynamic and complex distribution of MAPK module subunits from the fungal membrane to the nucleus. It also revealed that the MAPK AnFus3 is as yeast Fus3 the only subunit with the potential to enter the nucleus. The entire MAPK module colocalizes and interacts at hyphal tip and nuclear envelope Figure 6). Most of the GFP signals of AnSte7 and Ste50 merge with the RFP signal of MpkB at the fungal tip, the plasma membrane and at the nuclear envelope where they might form dynamic protein complexes. Exclusively at hyphal tips we found two types of co-localizations of kinase pairs. In addition to direct co-localizations, similar to plasma membrane or nuclear envelope, there were extended colocalization patterns at the hyphal tip. This could reflect that a fraction of kinases is localized in vesicles at the hyphal tip. Bimolecular fluorescence complementation (BiFC) was applied to examine whether there are direct transient in vivo interactions between AnSte7 and Fus3, which could not be found by TAP purification ( Figure 7B). Similar to the yeast localization of the Ste5-Ste11-Ste7-Fus3 MAPK module at the membrane, AnSte11-Ste7 and AnSte7-Fus3 interacted at the plasma membrane and also at the hyphal tip ( Figure 7). There was an additional strong interaction of AnSte11-Ste7 at septa which border cellular segments as well as at septa of spore forming cells and spores ( Figure 7C-7E). Quantification of the fluorescence intensity from the bright enhanced yellow fluorescent protein (EYFP) spots of AnSte11-Ste7, Ste7-Fus3 pairs revealed that they emit upto 10 fold more yellow fluorescence than the single EYFP molecules ( Figure S6), suggesting that the kinase pairs form multimeric complexes. Consistently to the yeast situation, the transcription factor AnSte12 as well as fungus specific factors VeA and LaeA specifically interacted with the MAPK Fus3 in the nucleus ( Figure 1C, 1D). AnSte50 also interacted with the kinases at the plasma membrane and hyphal tip ( Figure 8). Only AnSte11-Ste7 strongly interacted at the septa but there was hardly any interaction between AnSte7-Fus3 or between the AnSte50 and any of the kinases at the septa ( Figure 7C and 7D, Figure 8D-8F). The entire MAPK module components migrate to the nuclear envelope to deliver AnFus3 into the nucleus Yeast Ste7-Ste5-Fus3 migrates to tips of mating projections in pheromone treated cells. Only Fus3 travels to the nucleus upon activation by Ste7 . A. nidulans is a homothallic fungus, which does not require a mating partner. Time lapse images revealed that MAPK module components AnSte7 and Ste50::sGFP can move within the fungal cell along the membrane. During the cellular movements, these molecules shortly touched the membrane then hit the nucleus. Sometimes, fusion protein moved back after contacting the nucleus in the opposite direction. (Figure 9A and 9B, Videos S1 and S2). The dynamics of the protein interactions of the BiFC expressing strains were further analysed by time lapse movies (Videos S3, S4, S5, S6). The AnSte7-Fus3 pair moved together along the plasma membrane (Video S3, Figure S7A) towards the first nucleus, then they advanced to the next nucleus while some other spots did not move distinctly. Likewise, AnSte50-Fus3 complexes left one nuclear envelope, touched the membrane and moved to the next nucleus (Video S4, Figure S7B). Similar movements were also observed for other complexes of the MAPK module (not shown). AnSte11-Ste7 can dissociate from the plasma membrane, cross the cytoplasm and reach the nuclear envelope (Videos S5 and S6, Figure S7C, S7D). The major difference to the yeast situation is that the MAPK module of A. nidulans travels from the outer border of the fungal cell through the cytoplasm to the nuclear envelope. The AnSte7-Fus3 pair as well as pairs of AnSte50 with all three kinases interacted at the nuclear envelope ( Figure 7 and Figure 8). These data suggest significant differences in the molecular mechanism how a MAPK signal is transmitted in yeast in comparison to a filamentous fungus. AnSte50 is required for efficient membrane attachment of MAPK complexes The interactions of the AnSte11-Ste7 and AnSte7-Fus3 complexes were examined in steDD strain to examine AnSte50 function for cellular location of the module. The interaction of the three kinases at the plasma membrane of wild type (Figure 7) was abolished for AnSte11-Ste7 and drastically reduced for AnSte7-Fus3 in the steD mutant ( Figure 9C, 9D). Plasma membrane localizations of the AnSte7 and Fus3::sGFP fusions were also reduced in the steD mutant (not shown). Contrastingly, the localization of the entire module at the hyphal tip or for the partial module AnSte11-Ste7 at the septum seems to be mediated by a mechanism which is largely independent of AnSte50. These data suggest that AnSte50 supports association of the A. nidulans MAPK module with the plasma membrane but it does not affect the hyphal tip and septum localizations. Discussion We describe here the A. nidulans Fus3 MAPK module which is involved in sexual development and the control of secondary metabolism and releases AnFus3 into the nucleus. Our data suggest a provocative additional hypothesis: AnFus3 is able to travel along the membrane and to cross the cytoplasm to the nuclear envelope in complexes with AnSte7 MAP2K, AnSte11 MAP3K and the adaptor protein AnSte50. In the nucleus AnFus3 interacts with transcription factor AnSte12 for sexual development. The additional interaction of AnFus3 with VeA or yet unidentifed targets may promote VeA-VelB formation which is required for coordinated development and secondary metabolism ( Figure 9E). The A. nidulans Fus3 MAP kinase module is preferentially assembled at distinct intracellular locations, such as the hyphal tip, the septa, the plasma and nuclear membranes. Membrane localisation of the module is presumably relevant to perceive external signals as in yeast. Sexual development is defective when membrane localization of the module is impaired as in strains without intact AnSte50. Tip localisation could be important for hyphal fusions and cell-cell contacts. MAPKK AnSte7 and MAP3K AnSte11 but not other components interact at septa suggesting additional phosphorylation functions at septa independent of AnFus3. Corresponding mutants displayed strong deformations in the septa between developing asexual spores and spore forming cells but did not show any abnormal septation pattern in vegetative hyphae (not shown). This suggests a possible additional link between kinases of the module and regulators of asexual development. Intracellular distances in a filamentous fungus are significantly larger than in yeast. Several steps can be distinguished for signal transduction from surface to nucleus of A. nidulans. (i) From hyphal tip to plasma membrane: AnSte50 is primarily required for efficiently anchoring the MAPK module to membranes, but not to hyphal tips. AnSte50 might also contribute like in yeast to Ste11 MAP3K activation. The essential function of AnSte50 for signal transduction is supported by the defect of sexual development and lack of AnFus3 phosphorylation in a steD mutant. The AnSte50 independent localization at the hyphal tip suggests an additional yet unknown anchoring function for the AnFus3 module at the hyphal tip. The anchoring mechanism could include small membrane bound vesicles at the Spitzenkörper which could explain some of our localization results ( Figure 6B, Figure 7, Figure 8). The lack of AnSte11 did not cause any changes in the subcellular localization of AnSte7, indicating that AnSte11 is not required for proper AnSte7 localization. The lack of AnSte50 had a drastic effect on the localization of MAPK module complexes. AnSte50 interacts with all components of the MAPK module and might provide a binding platform for the other MAPK components which even works when AnSte11 is absent ( Figure 8). (ii) In yeast Fus3 dissociates from the Ste5 tethered pheromone pathway module and enters into the nucleus . Transport of the AnFus3 in the AnSte50-Ste11-Ste7 complex (or subcomplexes) to the nuclear envelope as additional signal transmission step in A. nidulans might secure that AnFus3 can be kept active over larger distances until it finally reaches the nucleus. It will be interesting to analyse phosphorylation states of kinases at different cellular locations during signal transduction. (iii) Import of AnFus3 from nuclear envelope into nucleus: AnFus3 presumably dissociates from the kinase module at the nuclear envelope in a mechanism wihich is unknown. After entry into the nucleus, AnFus3 interacts with AnSte12, and presumably phosphorylates it. AnFus3 phosphorylates the velvet protein VeA, which efficiently associates with VelB and LaeA. It is yet unclear whether there are additional AnFus3 targets which support VelB-VeA complex formation. VelB-VeA then contributes with AnSte12 to sexual fruiting body development and the trimeric VelB-VeA-LaeA concomitantly promotes expression of distinct genes for secondary metabolites ( Figure 9E). These include the production of the mycotoxin sterigmatocystin or antitumor agent terrequinone but not the antibiotic penicillin synthesis. The MAPK module of A. nidulans is presumably involved in integrating multiple signals and enabling an adequate cellular response. Oxylipins represent currently the only known pheromones of Aspergilli but the receptors are unknown . In yeast nitrogen starvation induces the same kinase module as pheromones, and part of the components are also involved in response to osmotic stress. It is likely that the AnSte50-Ste11-Ste7-Fus3 and the septal AnSte11-Ste7 modules have additional targets other than AnSte12 and VeA, which remain to be identified. An interesting open question is whether other organisms also transport their Fus3 MAPK counterpart together with the entire module from surface to nuclear envelope. This results in questions about transport control points and module attachment sites on the nuclear envelope where future work in A. nidulans could deepen insights into the molecular mechanism of information transfer through the cell. Manipulation of nucleic acids Circular and linear DNA molecules were created based on the standard recombinant DNA technology protocols in detail . Plasmids and oligonucleotides applied and constructed in this study are given in Table S8 and Table S9. Hybridization techniques and analysis of nucleic acids Southern and Northern hybridizations were carried out as explained in detail according to protocols . Immunoblotting Immunoblotting experiments for recognition of GFP, TAP fusion, VeA, and actin in protein extracts was performed according to described protocols . a-phospho 44/42 (4377, CELL SIGNALING TECHNOLOGY INC) was used for detection of the phosphorylated AnFus3 . For the detection of the phosphorylated proteins, a-phosphoserine/threonine (ab17464, ABCAM) was employed. Manufacturers protocols were followed for incubation times and buffer applications of phosphospecific antibodies. Expression of recombinant proteins Proteins were expressed in Rosetta 2 (DE3) using ZYM5052 media supplemented with 30 mg/ml Chloramphenicol and 100 mg/ml Ampicillin (GST-LaeA91) or 30 mg/ml Kanamycin (Velvet proteins) at 16uC. Cells were harvested by centrifugation, resuspended in lysis buffer (30 mM HEPES pH 7.4, 400 mM NaCl, 30 mM Imidazol) and lysed by passing through a Microfluidics Fluidizer at 0.55 MPa. The lysate was cleared by centrifugation at 300006g for 30 minutes. His-tagged proteins were purified with a 5 ml NiNTA-Sepharose (GE HEALTHCARE) and GST-tagged LaeA91 with a 5 ml GSH-Sepharose (GE HEALTHCARE) column connected to an Ä KTA Prime chromatography system. After washing with 10 column volumes with lysis buffer, proteins were eluted with elution buffer plus 400 mM Imidazol or 30 mM reduced Glutathione. Velvet proteins were desalted with a HiPrep Desalting 26/10 column (GE HEALTHCARE) into storage buffer (10 mM HEPES pH 7.4, 400 mM NaCl). GST-LaeA91 was cleaved with PreScission Protease at 4uC for 16 h and further purified by gel-filtration using a Superdex 200 26/60 and a final 5 ml GSH-Sepharose column both equilibrated in gelfiltration buffer (10 mM HEPES pH 7.4, 150 mM NaCl). All proteins were shock-frozen in liquid nitrogen and stored at 280uC until further use. Protein immunoprecipitation In order to immunoprecipitate GFP fusion proteins, protein crude extracts were prepared from vegetatively grown cultures. 100 ml GFP-Trap sepharose (CHROMOTEK) was washed twice with 1 ml protein extraction buffer (50 mM Tris pH 7.5, 100 mM KCl, 10 mM MgCl 2 , 0.1% NP40, 10% Glycerol, 20 mM b-glycerophosphate, 2 mM Na 3 VO 4 , 5 mM NaF, 0.5 mM PMSF, 1 mM benzamidine, 1 mM EGTA, 1 mM DTT). 20 ml (150 mg total) protein crude extract was incubated with 100 ml GFP-Trap sepharose (CHROMOTEK) at 4uC for 2 hours on a rotating platform. Afterwards, sepharose-extract mixture was centrifuged at 4000 rpm at 4uC for 1 min. Crude extract was removed with a 5 ml pipette. The sepharose was washed twice with 20 ml of protein buffer and centrifuged at 4000 rpm at 4uC for 1 min. This step was repeated one more time. Finally, 1 ml of protein buffer was added and the sepharose was resuspended. Each of the 200 ml sepharose buffer mixture was transferred into 1.5 ml eppendorf cups and centrifuged at 4000 rpm at 4uC for 1 min and supernatant was removed. Immunoprecipitated proteins were washed three times with 1 ml kinase reaction buffer (KRB; 20 mM Tris pH 7.5, 10 mM MgCl 2 , 1 mM DTT, 1 mM benzamidine, 1 mM Na 3 VO 4 , 5 mM NaF, 0.1 mCi -ATP). In vitro phosphorylation and dephosphorylation assay In vitro phosporylation assay was performed with modifications according to protocol given in . For in vitro phosphorylation experiment, 30 ml KRB, containing 0.1 mCi -ATP and 10 mg recombinant protein were added to the sepharose beads and incubated at 30uC for 35 minutes with the periodic resuspensions in every five minutes. Afterwards, reaction tubes were centrifuged at 4000 rpm at R/T for 1 min and supernatants containing phosphorylated proteins were transferred into new eppendorf cups. Supernatans and sepharose containing immunoprecipitated proteins were mixed with 36 protein loading dye (30 ml supernatant and 15 ml loading dye) and incubated at 95uC for 10 min. 30 ml of the supernatant fraction was run on 4-15% gradient SDS gel that was dried for 2 h and exposed to Kodak X-omat film for 5 hours. 10 ml of the reaction was used for visualization of the proteins with coomassie staining. 2 ml of sepharose was used for immunoblotting and ponceau staing for validation of equal immunoprecipitated target protein (MpkB or GFP). For non-radioactive kinase experiments, same KRB buffer containing 5 mM ATP was used. Supernatants were treated with 1000 units lambda protein phosphatase (NEW ENGLAND BIOLABS) in the presence of 1 mM MnCl 2 at 30uC for 1 hour. Samples were added with 36 loading dye and boiled at 95uC for 10 min. 3 ml of the samples were used for immunoblotting. Tandem Affinity Purification (TAP) protocol and LC-MS/ MS protein identification For the TAP purification of the MkkB, MpkB, SteD, and VeA interacting proteins and further LC-MS/MS identification previously published protocols were applied . Confocal spinning disc and fluorescence microscopy A. nidulans strains expressing various fluorescence proteins (EYFP/sGFP/mRFP) were inoculated in the 8-well borosilicate coverglass system (NUNC) containing the liquid minimal medium. Widefield fluorescence photographs were taken with an AXIOVERT OBSERVER. Z1 (ZEISS) microscope equipped with a COOLSNAP ES2 (PHOTOMETRICS) digital camera. CSU-X1 A1 confocal scanner unit (YOKOGAWA) connected with QUANTEM:512SC (PHOTOMETRICS) digital camera was used for laser microscopy. The SLIDEBOOK 5.0 software package (INTELLIGENT IMAGING INNOVATIONS) was used for fluorescence and laser confocal image and movie recording as well as productions. We defined signals as plasma-membrane localized if we found the signals that are at the border of the silhouette of the fungal cell or even surmount the fungal cell; similarly, we defined signals as nucleus-associated when we found multiple signals at the border of the nuclear silhouette. Quantification of the YFP fluorescence The EYFP protein was purified by using GFP-Trap as described for GFP protein. EYFP molecules were allowed to attach to poly-L-lysine coated coverslips for 10 minutes, in PBS buffer. Fungal cultures were grown as described above. The preparations were imaged using a SP5 TCS STED microscope (LEICA MICROSYS-TEMS), under 514 nm excitation (provided by an Argon laser), using a 1006oil-immersion objective (1.4 NA, LEICA). The images were processed by a custom-written routine in Matlab (THE MATHWORKS INC.). Briefly, the spots were identified by the application of an automatic threshold based on the intensity of the background. We then used Gaussian fits to the spots to determine their intensity, and to correct for the background intensity, which provided the baseline of the fits. Analysis of secondary metabolites Extraction of sterigmatocystin (ST) and thin layer chromatography (TLC) was carried out as given in detail . Penicillin levels were determined as published previously . expressed under yeast STE7 or FUS3 promoters in a centromeric self-replicating plasmid. These constructs were expressed in the respective fus3, ste7 and fus3/kss1 double mutants. Strains were grown in the presence of 15 mg alpha factor given on the paper discs at 30uC for 3 days. Alpha factor in wild type (empty plasmid) and complementation strains (STE7 in ste7 mutant, FUS3 in fus3 mutant, FUS3 in fus3/kss1 mutant) results in a strong growth inhibition (halo). ste7 and fus3/kss1 mutants do not show any response to the pheromone treatment. fus3 mutant exhibits a reduced response (cloudy halo). AnSte7 and Fus3 do not remediate the halo phenotype of the ste7 and fus3 mutants. mpkB cDNA partially restores the pheromone response of the fus3/kss1 double mutant. (TIF) Figure 7 for examples). The intensities are comparable, although the values tend to be higher for AnSte11-Ste7 (blue) than for AnSte7-Fus3 (red). To obtain an estimate of the number of molecules in the complexes, these values were compared to the intensity of single EYFP molecules attached to coverglasses (green). (B) The bar graphs indicate the average values for the intensities (obtained from the same datasets as in A). The intensity of the complexes is ,9-fold (AnSte7-Fus3) or ,10-fold (AnSte11-Ste7) higher than that of single EYFP molecules, suggesting the presence of 9 to 10 molecules in a complex. The bars show the mean and standard error; 50-300 spots were analyzed for each condition. Video S1 Time-lapse analysis of the subcellular movements of the AnSte7-GFP fusion along the fungal cells. Individual focal planes were captured with a spinning disc confocal microscope at 2 min intervals (total 26 min). AnSte7 protein moves in an internuclear manner. The nuclei were visualized by mRFP::His-tone2A fusion protein. Green spot leaves the first nucleus and shortly touches the plasma membrane (zigzag movement) and sticks to the envelope of the next nucleus. Some spots of the AnSte7 are static (immobile dot at the hyphal tip). The video is presented at a rate of 5 frames/second. (MOV) Video S2 Time-lapse capture of the subcellular movements of the AnSte50-GFP fusion within the fungal cell. Single focal planes were captured at 2 min intervals (total 22 min). AnSte50 protein moves to the nucleus (red), hits the nuclear envelope or nucleus and moves in a retrograde direction. Due to the single focal plane, spot disappears at 6 min. After 6 min, spot movement can be tracked again. The video speed is 5 frames/ second. (MOV) Video S3 Time-lapse analysis of the subcellular movements of the AnSte7-AnFus3 complexes (yellow dot) along the fungal hypha. AnSte7-Fus3 complexes move in a retro and anterograde direction in comparison to the hyphal tip (upper left). These complexes move between the nuclei, which were visualized by mRFP::Histone2A fusion. Single focal layer images were captured at 1 min intervals (total 17 min). The video was produced at a setting 5 frames/second. (MOV) Video S4 Time-lapse analysis of the subcellular dynamics of the AnSte50-AnFus3 complexes (yellow dot) in the fungal hypha. Single focal pictures were taken at 2 min intervals (total 58 min, 48 min is shown). This movie shows the movement of the AnSte50-AnFus3 complexes between two nuclei. Protein complexes (yellow dot) leave the first nucleus (visualized by mRFP::Histone2A fusion) and move to the second one. While moving to the second one, the complexes slightly touch the plasma membrane. Video is presented by using the setting 5 frames/ second. (MOV) Video S5 Retrograde translocation of the AnSte11-Ste7 protein complexes along the fungal hypha. AnSte11-Ste7 complexes move backwards from the hyphal tip. They leave the membrane and touch the nucleus. They also accumulate at septa (faint immobile yellow dot). Single focal planes were captured at 2 min interval (total 54 min, 28 min is shown). The video is presented at the speed of 5 frames/second. (MOV) Video S6 Time-lapse analysis of the migration of AnSte11-Ste7 complexes from the plasma membrane to the nucleus. Membranetethered complexes (yellow spots) slowly move to the nucleus (red). The movie was captured at 1 min interval (total 39 min). 5 frames/second. (MOV)
<filename>src/contestant/dto/update-contestant.dto.ts<gh_stars>1-10 import { PartialType } from '@nestjs/mapped-types'; import { CreateContestantDto } from './create-contestant.dto'; export class UpdateContestantDto extends PartialType(CreateContestantDto) {}
<filename>pkg/ui/v1beta1/hp.go<gh_stars>0 package v1beta1 import ( "context" "encoding/json" "log" "net/http" "strconv" "strings" "time" corev1 "k8s.io/api/core/v1" commonv1beta1 "github.com/kubeflow/katib/pkg/apis/controller/common/v1beta1" trialsv1beta1 "github.com/kubeflow/katib/pkg/apis/controller/trials/v1beta1" api_pb_v1beta1 "github.com/kubeflow/katib/pkg/apis/manager/v1beta1" ) func (k *KatibUIHandler) FetchHPJobInfo(w http.ResponseWriter, r *http.Request) { //enableCors(&w) experimentName := r.URL.Query()["experimentName"][0] namespace := r.URL.Query()["namespace"][0] conn, c := k.connectManager() defer conn.Close() resultText := "trialName,Status" experiment, err := k.katibClient.GetExperiment(experimentName, namespace) if err != nil { log.Printf("GetExperiment from HP job failed: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } log.Printf("Got Experiment") metricsList := map[string]int{} metricsName := experiment.Spec.Objective.ObjectiveMetricName resultText += "," + metricsName metricsList[metricsName] = 0 for i, m := range experiment.Spec.Objective.AdditionalMetricNames { resultText += "," + m metricsList[m] = i + 1 } log.Printf("Got metrics names") paramList := map[string]int{} for i, p := range experiment.Spec.Parameters { resultText += "," + p.Name paramList[p.Name] = i + len(metricsList) } log.Printf("Got Parameters names") trialList, err := k.katibClient.GetTrialList(experimentName, namespace) if err != nil { log.Printf("GetTrialList from HP job failed: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } log.Printf("Got Trial List") for _, t := range trialList.Items { succeeded := false for _, condition := range t.Status.Conditions { if condition.Type == trialsv1beta1.TrialSucceeded && condition.Status == corev1.ConditionTrue { succeeded = true } } var lastTrialCondition string // Take only the latest condition if len(t.Status.Conditions) > 0 { lastTrialCondition = string(t.Status.Conditions[len(t.Status.Conditions)-1].Type) } trialResText := make([]string, len(metricsList)+len(paramList)) if succeeded { obsLogResp, err := c.GetObservationLog( context.Background(), &api_pb_v1beta1.GetObservationLogRequest{ TrialName: t.Name, StartTime: "", EndTime: "", }, ) if err != nil { log.Printf("GetObservationLog from HP job failed: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } for _, m := range obsLogResp.ObservationLog.MetricLogs { if trialResText[metricsList[m.Metric.Name]] == "" { trialResText[metricsList[m.Metric.Name]] = m.Metric.Value } else { currentValue, _ := strconv.ParseFloat(m.Metric.Value, 64) bestValue, _ := strconv.ParseFloat(trialResText[metricsList[m.Metric.Name]], 64) if t.Spec.Objective.Type == commonv1beta1.ObjectiveTypeMinimize && currentValue < bestValue { trialResText[metricsList[m.Metric.Name]] = m.Metric.Value } else if t.Spec.Objective.Type == commonv1beta1.ObjectiveTypeMaximize && currentValue > bestValue { trialResText[metricsList[m.Metric.Name]] = m.Metric.Value } } } } for _, trialParam := range t.Spec.ParameterAssignments { trialResText[paramList[trialParam.Name]] = trialParam.Value } resultText += "\n" + t.Name + "," + lastTrialCondition + "," + strings.Join(trialResText, ",") } log.Printf("Logs parsed, results:\n %v", resultText) response, err := json.Marshal(resultText) if err != nil { log.Printf("Marshal result text for HP job failed: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Write(response) } // FetchHPJobTrialInfo returns all metrics for the HP Job Trial func (k *KatibUIHandler) FetchHPJobTrialInfo(w http.ResponseWriter, r *http.Request) { //enableCors(&w) trialName := r.URL.Query()["trialName"][0] namespace := r.URL.Query()["namespace"][0] conn, c := k.connectManager() defer conn.Close() trial, err := k.katibClient.GetTrial(trialName, namespace) if err != nil { log.Printf("GetTrial from HP job failed: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) } objectiveType := trial.Spec.Objective.Type // resultArray - array of arrays, where [i][0] - metricName, [i][1] - metricTime, [i][2] - metricValue var resultArray [][]string resultArray = append(resultArray, strings.Split("metricName,time,value", ",")) obsLogResp, err := c.GetObservationLog( context.Background(), &api_pb_v1beta1.GetObservationLogRequest{ TrialName: trialName, StartTime: "", EndTime: "", }, ) if err != nil { log.Printf("GetObservationLog failed: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } // prevMetricTimeValue is the dict, where key = metric name, // value = array, where [0] - Last metric time, [1] - Best metric value for this time prevMetricTimeValue := make(map[string][]string) for _, m := range obsLogResp.ObservationLog.MetricLogs { parsedCurrentTime, _ := time.Parse(time.RFC3339Nano, m.TimeStamp) formatCurrentTime := parsedCurrentTime.Format("2006-01-02T15:04:05") if _, found := prevMetricTimeValue[m.Metric.Name]; !found { prevMetricTimeValue[m.Metric.Name] = []string{"", ""} } newMetricValue, err := strconv.ParseFloat(m.Metric.Value, 64) if err != nil { log.Printf("ParseFloat for new metric value: %v failed: %v", m.Metric.Value, err) http.Error(w, err.Error(), http.StatusInternalServerError) return } var prevMetricValue float64 if prevMetricTimeValue[m.Metric.Name][1] != "" { prevMetricValue, err = strconv.ParseFloat(prevMetricTimeValue[m.Metric.Name][1], 64) if err != nil { log.Printf("ParseFloat for prev metric value: %v failed: %v", prevMetricTimeValue[m.Metric.Name][1], err) http.Error(w, err.Error(), http.StatusInternalServerError) return } } if formatCurrentTime == prevMetricTimeValue[m.Metric.Name][0] && ((objectiveType == commonv1beta1.ObjectiveTypeMinimize && newMetricValue < prevMetricValue) || (objectiveType == commonv1beta1.ObjectiveTypeMaximize && newMetricValue > prevMetricValue)) { prevMetricTimeValue[m.Metric.Name][1] = m.Metric.Value for i := len(resultArray) - 1; i >= 0; i-- { if resultArray[i][0] == m.Metric.Name { resultArray[i][2] = m.Metric.Value break } } } else if formatCurrentTime != prevMetricTimeValue[m.Metric.Name][0] { resultArray = append(resultArray, []string{m.Metric.Name, formatCurrentTime, m.Metric.Value}) prevMetricTimeValue[m.Metric.Name][0] = formatCurrentTime prevMetricTimeValue[m.Metric.Name][1] = m.Metric.Value } } var resultText string for _, metric := range resultArray { resultText += strings.Join(metric, ",") + "\n" } response, err := json.Marshal(resultText) if err != nil { log.Printf("Marshal result text in Trial info failed: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Write(response) }
/** * A helper for writing in a {@code PGCopyOutputStream}. */ public class CopyWriter implements AutoCloseable { private static final Charset UTF8 = StandardCharsets.UTF_8; private static final byte IPV4 = 2; private static final byte IPV4_MASK = 32; private static final byte IPV4_IS_CIDR = 0; private static final byte IPV6 = 3; private static final int IPV6_MASK = 128; private static final byte IPV6_IS_CIDR = 0; private final DataOutputStream data; /** * Creates a new writer with the specified {@code PGCopyOutputStream}. */ public CopyWriter(PGCopyOutputStream data) { this.data = new DataOutputStream(new BufferedOutputStream(data, 65536)); } /** * Writes the header of the query. */ public void writeHeader() throws IOException { // 11 bytes required header data.writeBytes("PGCOPY\n\377\r\n\0"); // 32 bit integer indicating no OID data.writeInt(0); // 32 bit header extension area length data.writeInt(0); } /** * Writes the number of columns affected by the query. */ public void startRow(int columns) throws IOException { data.writeShort(columns); } /** * Writes a null value. */ public void writeNull() throws IOException { data.writeInt(-1); } /** * Writes a string value. */ public void writeString(String value) throws IOException { nullableWriter(CopyWriter::stringWriter).write(data, value); } /** * Writes a list of string values. */ public void writeStringList(List<String> value) throws IOException { nullableWriter(collectionWriter(ObjectIdentifier.Text, CopyWriter::stringWriter)).write(data, value); } /** * Writes a boolean value. */ public void writeBoolean(Boolean value) throws IOException { nullableWriter(CopyWriter::booleanWriter).write(data, value); } /** * Writes a list of boolean values. */ public void writeBooleanList(List<Boolean> value) throws IOException { nullableWriter(collectionWriter(ObjectIdentifier.Boolean, CopyWriter::booleanWriter)).write(data, value); } /** * Writes a byte value. */ public void writeByte(Byte value) throws IOException { nullableWriter(CopyWriter::byteWriter).write(data, value); } /** * Writes a byte array value. */ public void writeByteArray(byte[] value) throws IOException { nullableWriter(CopyWriter::byteArrayWriter).write(data, value); } /** * Writes a short value. */ public void writeShort(Short value) throws IOException { nullableWriter(CopyWriter::shortWriter).write(data, value); } /** * Writes a list of short values. */ public void writeShortList(List<Short> value) throws IOException { nullableWriter(collectionWriter(ObjectIdentifier.Int4, CopyWriter::shortWriter)).write(data, value); } /** * Writes an integer value. */ public void writeInteger(Integer value) throws IOException { nullableWriter(CopyWriter::integerWriter).write(data, value); } /** * Writes a list of integer values. */ public void writeIntegerList(List<Integer> value) throws IOException { nullableWriter(collectionWriter(ObjectIdentifier.Int4, CopyWriter::integerWriter)).write(data, value); } /** * Writes a long value. */ public void writeLong(Long value) throws IOException { nullableWriter(CopyWriter::longWriter).write(data, value); } /** * Writes a list of long values. */ public void writeLongList(List<Long> value) throws IOException { nullableWriter(collectionWriter(ObjectIdentifier.Int8, CopyWriter::longWriter)).write(data, value); } /** * Writes a float value. */ public void writeFloat(Float value) throws IOException { nullableWriter(CopyWriter::floatWriter).write(data, value); } /** * Writes a list of float values. */ public void writeFloatList(List<Float> value) throws IOException { nullableWriter(collectionWriter(ObjectIdentifier.Int8, CopyWriter::floatWriter)).write(data, value); } /** * Writes a double value. */ public void writeDouble(Double value) throws IOException { nullableWriter(CopyWriter::doubleWriter).write(data, value); } /** * Writes a list of double values. */ public void writeDoubleArray(List<Double> value) throws IOException { nullableWriter(collectionWriter(ObjectIdentifier.Int8, CopyWriter::doubleWriter)).write(data, value); } /** * Writes a date value. */ public void writeLocalDate(LocalDate value) throws IOException { nullableWriter(CopyWriter::localDateWriter).write(data, value); } /** * Writes a list of date values. */ public void writeLocalDateTime(LocalDateTime value) throws IOException { nullableWriter(CopyWriter::localDateTimeWriter).write(data, value); } /** * Writes an inet adress value. */ public void writeInet4Adress(Inet4Address value) throws IOException { nullableWriter(CopyWriter::inet4AdressWriter).write(data, value); } /** * Writes a list of inet adress values. */ public void writeInet6Adress(Inet6Address value) throws IOException { nullableWriter(CopyWriter::inet6AdressWriter).write(data, value); } /** * Writes a map value. */ public void writeHstore(Map<String, String> value) throws IOException { nullableWriter(CopyWriter::hstoreWriter).write(data, value); } /** * Writes a geometry value. */ public void writeGeometry(Geometry value) throws IOException { nullableWriter(CopyWriter::geometryWriter).write(data, value); } /** * Close the writer. */ @Override public void close() throws IOException { data.writeShort(-1); data.flush(); data.close(); } private static <T> ValueWriter<T> nullableWriter(ValueWriter<T> writer) { return (data, value) -> { if (value == null) { data.writeInt(-1); return; } writer.write(data, value); }; } private static void booleanWriter(DataOutputStream data, Boolean value) throws IOException { data.writeInt(1); if (value) { data.writeByte(1); } else { data.writeByte(0); } } private static void byteWriter(DataOutputStream data, Byte value) throws IOException { data.writeInt(1); data.writeShort(value.byteValue()); } private static void byteArrayWriter(DataOutputStream data, byte[] value) throws IOException { data.writeInt(value.length); data.write(value, 0, value.length); } private static void shortWriter(DataOutputStream data, Short value) throws IOException { data.writeInt(2); data.writeShort(value.shortValue()); } private static void integerWriter(DataOutputStream data, Integer value) throws IOException { data.writeInt(4); data.writeInt(value.intValue()); } private static void floatWriter(DataOutputStream data, Float value) throws IOException { data.writeInt(4); data.writeFloat(value.floatValue()); } private static void doubleWriter(DataOutputStream data, Double value) throws IOException { data.writeInt(8); data.writeDouble(value.doubleValue()); } private static void longWriter(DataOutputStream data, Long value) throws IOException { data.writeInt(8); data.writeLong(value.longValue()); } private static void stringWriter(DataOutputStream data, String value) throws IOException { byte[] bytes = value.getBytes(UTF8); data.writeInt(bytes.length); data.write(bytes); } private static void localDateWriter(DataOutputStream data, LocalDate value) throws IOException { data.writeInt(4); data.writeInt(TimestampUtils.toPgDays(value)); } private static void localDateTimeWriter(DataOutputStream data, LocalDateTime value) throws IOException { data.writeInt(8); data.writeLong(TimestampUtils.toPgSecs(value)); } private static void inet4AdressWriter(DataOutputStream data, Inet4Address value) throws IOException { data.writeInt(8); data.writeByte(IPV4); data.writeByte(IPV4_MASK); data.writeByte(IPV4_IS_CIDR); byte[] inet4AddressBytes = value.getAddress(); data.writeByte(inet4AddressBytes.length); data.write(inet4AddressBytes); } private static void inet6AdressWriter(DataOutputStream data, Inet6Address value) throws IOException { data.writeInt(20); data.writeByte(IPV6); data.writeByte(IPV6_MASK); data.writeByte(IPV6_IS_CIDR); byte[] inet6AddressBytes = value.getAddress(); data.writeByte(inet6AddressBytes.length); data.write(inet6AddressBytes); } private static void geometryWriter(DataOutputStream data, Geometry value) throws IOException { WKBWriter writer = new WKBWriter(2, wkbNDR, true); byte[] wkb = writer.write(value); data.writeInt(wkb.length); data.write(wkb, 0, wkb.length); } private <T> ValueWriter<List<T>> collectionWriter(int oid, ValueWriter<T> writer) { return (data, values) -> { // Write into a temporary byte array ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); // Use 1 for one-dimensional arrays dataOutputStream.writeInt(1); // The collection can contain null values dataOutputStream.writeInt(1); // Write the values using the OID dataOutputStream.writeInt(oid); // Write the number of elements dataOutputStream.writeInt(values.size()); // Ignore Lower Bound. Use PG Default for now dataOutputStream.writeInt(1); // Iterate over the collection and write each values for (T value : values) { writer.write(dataOutputStream, value); } // Write the entire array to the COPY data: data.writeInt(byteArrayOutputStream.size()); data.write(byteArrayOutputStream.toByteArray()); }; } private static void hstoreWriter(DataOutputStream data, Map<String, String> value) throws IOException { // Write into a temporary byte array ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); // Write the number of values to write dataOutputStream.writeInt(value.size()); // Iterate over the map and write each key value pairs for (Map.Entry<String, String> entry : value.entrySet()) { stringWriter(dataOutputStream, entry.getKey()); stringWriter(dataOutputStream, entry.getValue()); } // Write the entire array to the COPY data data.writeInt(byteArrayOutputStream.size()); data.write(byteArrayOutputStream.toByteArray()); } @FunctionalInterface private interface ValueWriter<T> { void write(DataOutputStream data, T value) throws IOException; } }
Muscle-Strengthening Exercise Questionnaire (MSEQ): an assessment of concurrent validity and test–retest reliability Objectives Muscle-strengthening exercise (MSE) has multiple independent health benefits and is a component of global physical activity guidelines. However, the assessment of MSE in health surveillance is often limited to the constructs of frequency (days/week), with little focus on constructs such as MSE type, muscle groups targeted and intensity. This study describes the test–retest reliability and concurrent validity of the Muscle-Strengthening Exercise Questionnaire (MSEQ), which was developed to assess multiple MSE participation constructs. Methods The MSEQ was developed to assess the weekly frequency, session duration and intensity, types of MSE (eg, weight machines, bodyweight exercise) and muscle groups targeted. Two convenience samples of adult participants were recruited. Test–retest reliability was completed online by 85 participants. Concurrent validity was assessed for 54 participants using an online 7-day MSE log. Results The MSEQ shows high test–retest reliability for frequency, duration and level of intensity for each of the four MSE types (using weight machines, bodyweight exercises, resistance exercises and holistic exercises), and for the four types combined (ρ range 0.76–0.91). For muscle groups targeted, the reliability ranged mostly from moderate-to-substantial for each of the four MSE types (κ range 0.44–0.78) and fair-to-moderate for the four types combined (κ range 0.35–0.51). Concurrent validity for frequency, duration and level of intensity for each of the four MSE types, and the four types combined, was moderate-to-high (ρ range 0.30–0.77). Conclusion The MSEQ shows acceptable reliability and validity for four key MSE constructs. This new MSEQ survey instrument could be used to assess adults’ MSE. INTRODUCTION Strong scientific evidence demonstrates that regular muscle-strengthening exercise (MSE: including using weight training equipment and machines, resistance bands and doing bodyweight exercises) is linked to optimal health and well-being in adults. 1 2 In brief, meta-analyses and systematic reviews of controlled clinical exercise studies show that MSE leads to enhanced cardiometabolic, 3 musculoskeletal 4 and mental health, 5 and reductions in visceral fat. 6 Recent data from prospective cohort studies suggest that MSE is independently associated with a reduced risk of all-cause and disease-specific mortality, 7 8 cardiovascular disease, 9 type 2 diabetes, 10 11 obesity 12 13 and some cancers. 8 Despite its multiple independent health benefits, and the fact that MSE was first included as part of the US physical activity guidelines in 2008, 14 and global guidelines since 2010, 15 in comparison to moderate-tovigorous aerobic physical activity (MVPA: eg, walking, running or cycling) and sedentary behaviour (low energy sitting, reclining or lying posture in waking hours), this exercise modality has received little attention in physical activity epidemiology. 16 17 In particular, research has shown that the assessment of MSE is rare in physical activity surveillance. 18 Key messages What is already known ► Muscle-strengthening exercise has multiple independent health benefits. ► Assessment of muscle-strengthening exercise is rare in physical activity surveillance. ► Surveillance instruments assessing musclestrengthening exercise are mostly limited to frequency (days/week) and duration (minutes/session). What are the new findings ► We developed a new online muscle-strengthening exercise assessment instrument, the Muscle-Strengthening Exercise Questionnaire (MSEQ). ► The MSEQ has shown acceptable 7-day test-retest reliability. ► The MSEQ has shown adequate validity when using a 7-day muscle-strengthening exercise log as the standard. ► Future population-level health surveillance of muscle-strengthening exercise may include the MSEQ. Open access Moreover, our recent systematic review of the assessment of MSE within health surveillance highlighted two key limitations in the current assessment of MSE at the population level. 19 First, few surveillance instruments assess MSE participation constructs beyond weekly frequency. Second, unlike aerobic MVPA and sedentary behaviour, there is no standardised instrument for assessing MSE in health surveillance. 19 Developing an understanding of the surveillance of multiple MSE participation constructs is important because clinical exercise studies demonstrate that factors such as type (single vs multijoint; bodyweight vs use of weight machines, etc), duration and intensity of MSE, affect some key outcomes such as skeletal muscle size/ endurance/strength. 23 24 The assessment of MSE participation constructs at the population level, that goes beyond simply frequency, is critical for establishing the optimal dose of this exercise modality for health in future studies. 17 Furthermore, a standardised MSE assessment instrument will be essential for the population-level tracking and monitoring of this important and currently understudied health behaviour. In addition, accurate and consistent assessments of physical activity-related behaviours are key for identifying at-risk population subgroups most in need of future large-scale public health interventions. 25 This study aimed to describe the development of the Muscle-Strengthening Exercise Questionnaire (MSEQ)-a newly designed MSE assessment instrument for adults, with a specific focus on the assessment of its test-retest reliability and concurrent validity. Study population From January to March 2021, a subsample of participants was recruited from a larger online study on MSE participation, barriers/facilitators and attitudes towards this exercise mode ('Main Study'). In this main study, we recruited a convenience sample of 461 adults (aged ≥18 years) via the use of social media (eg, Facebook, Twitter, Instagram) and professional networks. At the end of the Main Study, respondents were invited to participate in further follow-up research for assessing the reliability and validity of survey items concerning their MSE participation during a usual week (MSEQ, described below). If they agreed, they were allocated, on an alternating (one for one) basis, to either: (1) reliability sample or (2) validity sample. Informed consent was obtained from all participants. Muscle-Strengthening Exercise Questionnaire (MSEQ) The MSEQ was designed to be a brief assessment instrument for delivery in an online format. Specifically, we created a 9-item instrument that assesses key MSE constructs for use in future physical activity surveillance. The final version of the MSEQ is shown in online supplemental digital content 1. The initial development of the MSEQ was broadly guided by several key MSE resources, including the 2009 'American College of Sports Medicine Position Stand on Progression Models in Resistance Training for Healthy Adults', 26 Garber et al's 'Guidance for Prescribing Exercise' 2 and the 2018 Physical Activity Guidelines for Americans. 27 After a review of these sources and consideration of what is practical to include in a surveillance instrument, five MSE participation constructs were assessed: (1) type; (2) frequency; (3) duration; (4) intensity and (5) the muscle groups targeted. The preamble of the MSEQ, the key justifications for choosing each MSE construct and their response items are now described. Preamble When developing self-report instruments of physical activity-related behaviours, it is important to provide respondents with some examples of the behaviours of interest to enhance comprehension. 28 Given that at the population level, ~60% of adults do no MSE, 29 an understanding of what constitutes MSE may be limited among the general population. 19 Therefore, to assist respondents, we provided the following statement at the beginning of the survey: 'The next set of questions are about your participation in muscle-strengthening exercise, sometimes called weight or resistance training. When thinking about muscle-strengthening exercise, we are only interested in exercises that you do during your leisure or free time, and not done as part of your work/job, or as a part of household activities (chores). The types of muscle-strengthening exercise we are interested in include: ► Using weight machines-typically in a gym or fitness centre. ► Bodyweight exercises-including push-ups or sit-ups. ► Resistance exercises-using free weights like dumbbells or resistance bands. ► Holistic exercises-including Yoga, Tai-Chi or Pilates'. This phrasing was initially tested with a subset of participants (n=10) to assess readability and comprehension. After this consultation, minor changes were made to the final preamble. Type At present, MSE surveillance instruments typically include a wide variety of MSE-related activities grouped into one category. 19 For example, the Behavioral Risk Factor Surveillance System (BRFSS), the most commonly used MSE surveillance instrument, 19 combines diverse activities such as 'using weight machines', 'free weights', 'elastic bands', 'yoga' or 'sit-ups or push-ups'. 30 The limitation of combining all MSE types into a single group is that it is not possible to examine patterns and prevalence of different MSE-related behaviours and, most importantly, determine the relationship between separate MSE types and health. To address this limitation, in the MSEQ, we selected four MSE types. These were: (1) 'Use of weight machines' (eg, leg press, chest press, lat pulldown); (2) 'Bodyweight exercises' (including push-ups, sit-ups); (3) 'Resistance exercises' (using resistance bands or free weights like dumbbells) and (4) 'Holistic exercises' (including Yoga, Tai-Chi and Pilates)'. The terminology for, and examples of, the MSE types are largely consistent with those within the key texts in this field. 2 27 31 For each of these four MSE types, participants were asked to complete the following items (see online supplemental digital content 1). Frequency (items 2 and 3) In MSE surveillance, weekly frequency is the most commonly assessed MSE construct. 19 Accordingly, for comparisons to previous instruments, we included a similar question to that used in the BRFSS survey, 30 where respondents were asked for all MSE: 'How many days, in a usual week, do you do muscle-strengthening exercise?' (item 2). Response selections were: (i) 'none'; (ii) '1'; (iii) '2'; (iv) '3'; (v) '4'; (vi) '5', (vii) '6' and (viii) '7 days'. This question was asked separately for each type to understand the frequency of the four specific MSE types given above (item 3). The response options for this question were the same as all MSE (eg, (i) 'none' to (viii) '7 days'). Duration (item 4) Despite clinical studies showing a positive dose-response relationship between time spent doing MSE and muscle size and strength, 32 duration is rarely assessed in MSE surveillance. 17 Therefore, to gain a better understanding of this construct, respondents were asked: 'In a usual week please indicate how long you spend doing each of the following types of muscle-strengthening exercise? … in a usual session'. Response options in minutes were: (i) '0'; (ii) 'less than 10'; (iii) '10-20', (iv) '21-30'; (v) '31-40'; (vi) '41-50', (vii) '51-60', (viii) '≥60 min spent in a usual session'. This question was asked separately for each of the four MSE types. Muscle groups targeted (items 5-8) The 2008 Physical Activity Guidelines for Americans first introduced the recommendation that when doing MSE, an adult should engage all major muscle groups. 14 This recommendation is based on the clinical evidence that suggests that using several large muscle groups is more effective in maintaining and increasing muscle strength and bone mineral density, 33 compared with using the smaller muscle groups. 34 However, few existing MSE instruments assess muscle groups targeted. 35 To guide which muscle groups to include in the MSEQ, we used the American College of Sports Medicine (ACSM) definitions, which define all major muscle groups as seven separate groups: legs, hips, back, abdomen, chest, shoulders, and arms. 36 Accordingly, in the MSEQ, respondents were asked, 'In a usual week please indicate which muscle groups you use when you do each of the following types of muscle-strengthening exercise?' Response options of (i) 'yes' or (ii) 'no' were provided for the following seven different muscle groups, and to assist with respondent comprehension, we provided examples of MSE activities that target each group: (i) 'legs (eg, squats, lunges, bridges)'; (ii) 'hips (eg, side leg raises, bridges)'; (iii) 'back (eg, lat pulldown, bent-over row)'; (iv) 'abdomen (eg, crunches, sit-ups)'; (v) 'chest (eg, bench press, push-ups)'; (vi) 'shoulders (eg, lat raise, overhead press)' and (vii) 'arms (eg, bicep curl, tricep dips)'. This question was asked separately for each of the four MSE types. Intensity (item 9) Current global physical activity guidelines state: 'Adults should also do muscle-strengthening activities at moderate or greater intensity'. 15 However, current MSE surveillance instruments do not generally assess intensity. 19 To assess this key MSE participation construct, the MSEQ used the previously validated visual analogue scale developed by Robertson et al, 37 (See online supplemental digital content 1). Specifically, participants were asked: 'For each of the following types of muscle-strengthening exercises, please say how hard (level of intensity) you usually exercise'. The response options were provided on a 12-point scale: (xi) '9'; and (xii) '10 extremely hard'. This question was asked for each of the four MSE types. Test-test reliability assessments To examine test-test reliability, participants allocated to the reliability sample were sent an individualised survey link approximately 7 days after completing the first online survey. Each participant responded to the same set of questions described above. Concurrent assessments (7-day MSE log) To examine concurrent validity, participants allocated to the validity sample were asked to complete a 7-day MSE log. Approximately 7 days after completing the first online survey, participants were sent an individualised link to complete an MSE log for 7 consecutive days. During this week and on each day, participants were asked, 'Did you do any muscle-strengthening exercise today?' (response options: (i) 'yes'; or (ii) 'no'). Those reporting no MSE were automatically taken to the end of the survey, advising them they would receive the invitation to complete the MSE log the next day, or if day 7, they were directed to the end of the survey. Participants who responded 'yes' to doing MSE on a given day were then asked to respond to the same set of questions described above. All response options were the same as the original survey. Based on the 7-day MSE log responses, we created weekly averages for all MSE and each MSE type for comparison to the original survey. To calculate the average data for the frequency, duration, muscle groups used, and intensity, we collated each Open access response to these items from the 7-day MSE log, then divided the respective values by the total number of days of reported MSE in the 7-day log. For example, for frequency, the original 'times per usual week' value (from the baseline responses to the MSEQ) was compared with the total times per week value calculated from the 7 day MSE log. Statistical analysis Statistical Package for the Social Sciences V.26 (SPSS, IBM) was used to conduct all data analyses. Descriptive statistics were used to describe the characteristics of the participants included in the two individual samples (see table 1), and significance was set at p<0.05 throughout. To examine the test-retest reliability of the MSEQ items, four statistical tests were used to assess the relative agreement between the participant responses to the initial MSEQ survey (test) against the responses to the follow-up survey (retest). For the continuous variables (frequency, duration and intensity), we used intraclass correlation coefficients (ICC) and Spearman's rank correlation coefficients (Spearman's rho; ρ). For the dichotomous variable (muscle groups used), we used Kappa coefficients (κ) and percentage agreement. We applied a two-way random effects model for ICC analysis, including tests for absolute agreement. We presented ICC and Spearman's r value and its 95% CI for each frequency, duration, and level of intensity question. We present the κ value and 95% CI and the percentage agreement for the muscle groups used. Using previously established ICC correlation coefficient thresholds, 38 40 For percentage agreement, we adopted the following classifications:<60% poor, 60%-74% moderate or ≥75% excellent. 41 To examine the concurrent validity of the MSEQ items on frequency, duration and intensity, ICCs and Spearman's r are presented to show the relative agreement between the responses to the initial MSEQ survey against the 7-day MSE log as the standard. For validity assessments, we used the same ICC and Spearman's r thresholds for the level of agreement used for the reliability. To investigate the viability of a short version of the MSEQ (see Discussion), we combined the response of all four MSE types (using weight machines, bodyweight exercises, resistance exercises and holistic exercises) to create a fifth type ('all types of muscle-strengthening exercise'). We examined the validity and reliability using the same for each of the individual types described above. Patient and public involvement Patients and/or the public were not involved in the design, or conduct, or reporting, or dissemination plans of this research. RESULTS A summary of the participant characteristics and weekly frequency of MSE for each sub-sample is shown in table 1. Full data were available from 85 participants included in the reliability sample and 54 in the validity sample. Overall, the sociodemographic characteristics were similar for each sample (see online supplemental digital content 3 for a copy of the sociodemographic questions). In brief, over half were female, ~70% aged between 18 and 54 years, ~54% living in Australia and ~60% living in urban areas. Most were university qualified and selfrated their health as good-excellent, just under half were married, and over half were employed and working ≥40 hours in a usual week. While the validity sample met the MSE guideline ≥2 days/week, ~30% of the reliability sample did not. Test-retest reliability The results of the test-retest reliability of the MSEQ are shown in tables 2 and 3. For the frequency of MSE days during a usual week, there was a substantial agreement for all four types of MSE (ICC range: 0.85-0.95) and fair agreement when all types were combined (ICC 0.58; 95% CI 0.40 to 0.73). For MSE duration, items for each type showed substantial agreement (ICC range: 0.88-0.96) and moderate agreement for all types combined (ICC 0.69; 95% CI 0.55 to 0.80). For the level of intensity, there was substantial agreement across all four types (ICC range: 0.89-0.93) and moderate agreement for the combined analysis (ICC 0.51; 95% CI 0.31 to 0.68). Spearman's rank correlations were high for all four MSE types, and all types combined for the frequency, duration and level of intensity of MSE (ρ range 0.76-0.91). For the muscle groups used (see table 3), there was substantial to an almost perfect agreement for using weight machines (κ range 0.61-0.85) for all groups, except for 'abdomen' (κ=0.33; 95% CI −0.01 to 0.65). Moderate to almost perfect agreement was shown for muscle groups used when doing body weight exercises (κ range 0.51-0.83) except for 'back' (κ=0.40; 95% CI 0.14 to 0.64). The agreement for muscle groups using resistance bands or free weights (κ range 0.44-0.84) was similar to body weight exercises. However, the 'chest' agreement was only fair (κ=0.33; 95% CI 0.02 to 0.60). Agreements for holistic exercises were less diverse for all seven muscle groups (moderate to substantial), with κ ranges from 0.57 to 0.76. When all MSE types were combined, the agreement for each of the muscle groups was moderate (κ range 0.41-0.51) except for 'abdomen' (κ=0.35; 95% CI 0.17 to 0.52). Percentage agreement for four types of MSE and the types combined was excellent (range 75.9%-96.3%), except for 'back' when doing Open access Open access body weight exercises and 'abdomen' when using resistance bands or free weights (range 72.2%-72.7%). Concurrent validity The concurrent validity of the MSEQ are shown in DISCUSSION This study describes the test-retest reliability and concurrent validity of a newly developed online survey instrument assessing muscle-strengthening exercise. The MSEQ was specifically designed to assess multiple MSE participation constructs (eg, frequency, duration, intensity, muscle groups) across different types of MSE (eg, use weight machines, body weight exercises, use resistance bands or free weights, and holistic exercises) in adults. Overall, among our active, young, and well-educated sample, the MSEQ showed substantial test-retest reliability and adequate validity when using a 7-day MSE log as the standard. While these findings need to be replicated in studies with a more representative sample, this Open access study suggests that the MSEQ has potential for use in future physical activity surveillance. Few studies have reported on the reliability and validity of existing MSE survey instruments, but 19 comparing our findings to similar studies is limited. For weekly MSE frequency, the MSEQ shows similar reliability and stronger validity compared with the MSE item from the BRFSS using a physical activity log. 30 However, the MSEQ expands on the BRFSS by assessing the frequency of four different types of MSE and muscle groups targeted, duration and intensity. A recent study examined the reliability and validity of MSE items (using a 7-day diary) from the Cancer Prevention Study-3. 42 That study assessed the MSE frequency and duration of similar MSE types using a single MSE question. In comparison, the MSEQ shows stronger reliability and similar validity to that study. While the study 42 included similar MSE types, expanding on the BRFSS, the MSEQ is more extensive as it allows for the additional assessment of intensity and muscle groups targeted. Compared with commonly used MVPA surveillance instruments, the MSEQ showed stronger reliability and validity. For example, compared with the frequency and duration items in the Global Physical Activity Questionnaire (GPAQ), the MSEQ was superior for both 7-day test-retest reliability and concurrent validity using activity logs. 43 Validity of the MSEQ is stronger when compared with the leisure-time frequency and duration items contained in the International Physical Activity Questionnaire (IPAQ)-Long. 44 The potential reason for stronger reliability and validity observed in MSE, compared with MVPA, is likely because MSE is easier to recall and a more memorable physical activity. 17 Moreover, that in this study a high percentage of participants also meet the MSE guidelines. When designing the MSEQ, we decided to solely target MSE-related behaviours within the context of leisure time, and consequently not to include any activities accrued during occupational (eg, labouring/lifting) and domestic tasks (eg, carry shopping bags, gardening). This decision was to avoid any potential misclassification of other MSE-related behaviours. Furthermore, occupation-related physical activity is often undertaken at low/moderate intensity for long durations with limited time for recovery. 45 Moreover, it has been argued that the repetitive nature of undertaking MSE outside the context of leisure time may negatively influence health. For example, MSE within the occupational and domestic context may result in an increased risk of musculoskeletal disorders (eg, back, shoulder, neck injuries/pain) and arthritis/rheumatic diseases (eg, osteoarthritis, rheumatoid arthritis). 46 MSEQ: short and long format For potential use in future health surveillance, we adapted the MSEQ to be consistent with existing selfreported physical activity surveillance instruments, such as the widely used IPAQ 20 and GPAQ. 47 Specifically, we Open access developed two versions of the MSEQ, the MSEQ-Short and MSEQ-Long, each designed to be used in either a self-administered or interview-administered format (full versions shown in online supplemental digital content 2). The MSEQ-Short is a brief 6-item instrument that assesses any engagement in MSE ('yes' or 'no'), the usual weekly frequency (number of days), duration (minutes spent), intensity (range from 0 to 10), type of musclestrengthening exercise ('yes' or 'no' response to the four types of MSE), and muscle groups targeted ('yes' or 'no' response to seven muscle groups). The MSEQ-Long is a 20-item instrument that assesses the usual weekly frequency, duration, intensity, and the muscle groups targeted (similar responses as in MSEQ-Short), separately for all four types of MSE (weight machines, bodyweight exercises, resistance exercises and holistic exercises). As shown in online supplemental digital content 3, table, the reliability and validity of the MSEQ-Short items displayed mostly fair-to-moderate agreement and was moderateto-high for most of the MSEQ-Long items (see online supplemental digital content 4, table). These preliminary data suggest that the MSEQ-Short and MSEQ-Long have promise as a standardised MSE surveillance instrument. However, we urge caution, as both instruments have not yet been tested for reliability and validity in the format provided in online supplemental digital content 2. We now call for future studies to assess the psychometric properties of the MSEQ-Short and MSEQ-Long, with diverse population subgroups (eg, older adults, those from differing income/education levels) and translated into different languages. Limitations A key limitation of this study was our recruitment of a non-representative sample, which is likely to affect the generalisability of our findings. A further limitation was the self-reported nature of the online responses to the survey. There is a risk of responder recall bias (eg, social desirability or over-reporting/under-reporting of actual behaviour). However, there is no device-based measurement available for the assessment of MSE. This behaviour is routinely assessed by self-report in physical activity surveillance. A further limitation is that we were unable to establish the validity of the muscle groups targeted items, as this was not possible when comparing a single 'yes' or 'no' response in the 'main survey' to the daily 'yes' or 'no' response in the 7-day diary. A key strength of this study is that it is one of the first to assess the test-retest Open access reliability and concurrent validity of questions specifically developed to explore the constructs of MSE beyond frequency and duration. Moreover, the inclusion of the assessment of MSE intensity is a unique and important component that is not currently well understood. 48 CONCLUSION The newly developed MSEQ displayed adequate testretest reliability and concurrent validity in assessing multiple MSE participating constructs. Given that the current study included a sample of young, well educated, and active adults, further research is needed to examine whether these findings are generalisable to more representative samples.
import LinksCollection from "../../../src/DataModel/Collections/LinksCollection"; import { hydra } from "../../../src/namespaces"; describe("Given instance of the LinksCollection", () => { beforeEach(() => { const target = "some:resource"; this.link1 = { relation: "some:resource-url", target, type: [hydra.Link] }; this.link2 = { relation: "some:other-url", target, type: [hydra.TemplatedLink] }; this.link3 = { relation: "yet:another-url", target, type: [hydra.Link] }; this.link4 = { relation: "yet:another-other-url", target, type: [hydra.Link] }; this.allLinks = [this.link1, this.link2, this.link3, this.link4]; this.links = new LinksCollection(this.allLinks); }); it("should provide all links", () => { expect([...this.links]).toEqual(this.allLinks); }); describe("when narrowing filters with relation type", () => { beforeEach(() => { this.relationTypeNorrowedOperations = this.links.withRelationOf("yet:another-url"); }); it("should provide only type matching links", () => { expect([...this.relationTypeNorrowedOperations]).toEqual([this.link3]); }); }); describe("when narrowing filters with template", () => { beforeEach(() => { this.templateNorrowedOperations = this.links.withTemplate(); }); it("should provide only type matching links", () => { expect([...this.templateNorrowedOperations]).toEqual([this.link2]); }); }); });
#!/usr/bin/env python3 # Copyright (c) 2021 oatsu """ モノラベルを休符周辺で切断する。 pau の直前で切断する。休符がすべて結合されていると考えて実行する。 """ from glob import glob from os import makedirs from os.path import basename, splitext from sys import argv from typing import List, Union import utaupy as up import yaml from natsort import natsorted from tqdm import tqdm from utaupy.hts import HTSFullLabel from utaupy.label import Label def all_phonemes_are_rest(label: Union[Label, HTSFullLabel]) -> bool: """ フルラベルまたはモノラベル中に休符しかないかどうか判定 """ rests = {'pau', 'sil'} # 全部の音素が休符であるか否か result = all(phoneme.symbol in rests for phoneme in label) return result # def all_phonemes_are_rest_old(label: Union[Label, HTSFullLabel]) -> bool: # """ # フルラベル中に休符しかないかどうか判定(旧実装) # """ # rests = set(['pau', 'sil']) # # モノラベルのとき # if isinstance(label, Label): # for phoneme in label: # if phoneme.symbol not in rests: # return False # return True # # フルラベルのとき # if isinstance(label, HTSFullLabel): # for oneline in label: # if oneline.phoneme.identity not in rests: # return False # return True # # フルラベルでもモノラベルでもないとき # raise ValueError("Argument 'label' must be Label object or HTSFullLabel object.") def split_mono_label_short(label: Label) -> List[Label]: """ モノラベルを分割する。分割後の複数のLabelからなるリストを返す。 """ new_label = Label() result = [new_label] new_label.append(label[0]) for phoneme in label[1:-1]: if phoneme.symbol == 'pau': new_label = Label() result.append(new_label) new_label.append(phoneme) # 最後の音素を追加 new_label.append(label[-1]) return result def split_mono_label_middle(label: Label, frequency) -> List[Label]: """ モノラベルを分割する。分割後の複数のLabelからなるリストを返す。 pauが10回出現するたびに分割する。 """ if frequency <= 0: raise ValueError('Argument "frequency" must be positive integer.') new_label = Label() result = [new_label] new_label.append(label[0]) # pauが出現する回数をカウントする counter = 0 for phoneme in label[1:-1]: if phoneme.symbol == 'pau': counter += 1 # pauが出現してfrequency回目のとき if counter == frequency: new_label = Label() result.append(new_label) # 回数をリセット counter = 0 new_label.append(phoneme) # 最後の音素を追加 new_label.append(label[-1]) return result def split_mono_label_long(label: Label) -> List[Label]: """ モノラベルを分割する。分割後の複数のLabelからなるリストを返す。 [pau][pau], [pau][sil] のいずれかの並びで切断する。 """ new_label = Label() result = [new_label] new_label.append(label[0]) for i, current_phoneme in enumerate(label[1:-1]): previous_phoneme = label[i - 1] if (previous_phoneme.symbol, current_phoneme.symbol) in [('pau', 'sil'), ('pau', 'pau')]: new_label = Label() result.append(new_label) new_label.append(current_phoneme) # 最後の音素を追加 new_label.append(label[-1]) return result def split_full_label_short(full_label: HTSFullLabel) -> list: """ フルラベルを分割する。 できるだけコンテキストを保持するため、SongではなくHTSFullLabelで処理する。 """ new_label = HTSFullLabel() new_label.append(full_label[0]) result = [new_label] for oneline in full_label[1:-1]: if oneline.phoneme.identity == 'pau': new_label = HTSFullLabel() result.append(new_label) new_label.append(oneline) # 最後の行を追加 new_label.append(full_label[-1]) # 休符だけの後奏部分があった場合は直前のラベルにまとめる。 if len(result) >= 2 and all_phonemes_are_rest(result[-1]): result[-2] += result[-1] del result[-1] return result def split_full_label_middle(full_label: HTSFullLabel, frequency: int) -> List[HTSFullLabel]: """ モノラベルを分割する。分割後の複数のLabelからなるリストを返す。 pauが10回出現するたびに分割する。 """ if frequency <= 0: raise ValueError('Argument "frequency" must be positive integer.') new_label = HTSFullLabel() result = [new_label] new_label.append(full_label[0]) # pauが出現する回数をカウントする counter = 0 for oneline in full_label[1:-1]: if oneline.phoneme.identity == 'pau': counter += 1 if counter == frequency: new_label = HTSFullLabel() result.append(new_label) counter = 0 new_label.append(oneline) # 最後の行を追加 new_label.append(full_label[-1]) # 休符だけの後奏部分があった場合は直前のラベルにまとめる。 if len(result) >= 2 and all_phonemes_are_rest(result[-1]): result[-2] += result[-1] del result[-1] return result def split_full_label_long(full_label: HTSFullLabel) -> list: """ フルラベルを分割する。 できるだけコンテキストを保持するため、SongではなくHTSFullLabelで処理する。 split_full_label_short ではうまく学習できなかった。 そこで、全部の休符で切ったらさすがに短かったので長めにとる。 [pau][pau], [pau][sil] のいずれかの並びで切断する。 """ new_label = HTSFullLabel() new_label.append(full_label[0]) result = [new_label] for oneline in full_label[1:-1]: if ((oneline.previous_phoneme.identity, oneline.phoneme.identity) in [('pau', 'sil'), ('pau', 'pau')]): print(oneline.previous_phoneme.identity, oneline.phoneme.identity) new_label = HTSFullLabel() result.append(new_label) new_label.append(oneline) # 最後の行を追加 new_label.append(full_label[-1]) # 休符だけの後奏部分があった場合は直前のラベルにまとめる。 if len(result) >= 2 and all_phonemes_are_rest(result[-1]): result[-2] += result[-1] del result[-1] return result def split_label(label: Union[Label, HTSFullLabel], mode: str, middle_frequency: int ) -> List[Union[Label, HTSFullLabel]]: """ ラベルを分割してリストにして返す。フルラベルとモノラベルを自動で使い分ける。 mode: 'short' か 'long' のいずれか """ if mode not in ('short', 'middle', 'long'): raise ValueError('Argument "mode" must be "short" or "long".') if isinstance(label, Label): if mode == 'short': result = split_mono_label_short(label) elif mode == 'middle': result = split_mono_label_middle(label, middle_frequency) elif mode == 'long': result = split_mono_label_long(label) elif isinstance(label, HTSFullLabel): if mode == 'short': result = split_full_label_short(label) elif mode == 'middle': result = split_full_label_middle(label, middle_frequency) elif mode == 'long': result = split_full_label_long(label) return result def remove_zensou_and_kousou(path_lab): """ 長すぎてGPUメモリを食いつぶすような音素を除去(前奏、間奏、後奏とか) """ label = up.label.load(path_lab) label.data = label.data[1:-1] label.write(path_lab) def main(path_config_yaml): """ ラベルファイルを取得して分割する。 """ with open(path_config_yaml, 'r') as fy: config = yaml.load(fy, Loader=yaml.FullLoader) out_dir = config['out_dir'] mode = config['stage0']['segmentation_mode'] middle_frequency = config['stage0']['middle_frequency'] full_score_round_files = natsorted(glob(f'{out_dir}/full_score_round/*.lab')) mono_score_round_files = natsorted(glob(f'{out_dir}/mono_score_round/*.lab')) full_align_round_files = natsorted(glob(f'{out_dir}/full_align_round/*.lab')) mono_align_round_files = natsorted(glob(f'{out_dir}/mono_align_round/*.lab')) makedirs(f'{out_dir}/full_score_round_seg', exist_ok=True) makedirs(f'{out_dir}/full_align_round_seg', exist_ok=True) makedirs(f'{out_dir}/mono_score_round_seg', exist_ok=True) makedirs(f'{out_dir}/mono_align_round_seg', exist_ok=True) print('Segmenting full_score_round label files') for path in tqdm(full_score_round_files): songname = splitext(basename(path))[0] label = up.hts.load(path) for idx, segment in enumerate(split_label(label, mode, middle_frequency)): path_out = f'{out_dir}/full_score_round_seg/{songname}_seg{idx}.lab' segment.write(path_out, strict_sinsy_style=False) print('Segmenting full_align_round label files') for path in tqdm(full_align_round_files): songname = splitext(basename(path))[0] label = up.hts.load(path) for idx, segment in enumerate(split_label(label, mode, middle_frequency)): path_out = f'{out_dir}/full_align_round_seg/{songname}_seg{idx}.lab' segment.write(path_out, strict_sinsy_style=False) print('Segmenting mono_score_round label files') for path in tqdm(mono_score_round_files): songname = splitext(basename(path))[0] label = up.label.load(path) for idx, segment in enumerate(split_label(label, mode, middle_frequency)): path_out = f'{out_dir}/mono_score_round_seg/{songname}_seg{idx}.lab' segment.write(path_out) print('Segmenting mono_align_round label files') # NOTE: ここだけ出力フォルダ名が 入力フォルダ名_seg ではないので注意 for path in tqdm(mono_align_round_files): songname = splitext(basename(path))[0] label = up.label.load(path) for idx, segment in enumerate(split_label(label, mode, middle_frequency)): path_out = f'{out_dir}/mono_align_round_seg/{songname}_seg{idx}.lab' segment.write(path_out) if __name__ == '__main__': if len(argv) == 1: main('config.yaml') else: main(argv[1].strip('"'))
A Comparison of Retorting and Supercritical Extraction Techniques on El-Lajjun Oil Shale In this study, the use of nitrogen retorting, carbon dioxide retorting, supercritical CO2 extraction, and supercritical H2O are compared for oil yield, quality, and the types and amounts of compounds eluted from Jordanian El-Lajjun oil shale. Results show that supercritical H2O (SC-H2O) produces 50% higher yields than nitrogen retorting (R-N2) while releasing higher molecular weight materials through solvation and pyrolysis. The use of supercritical CO2 (SC-CO2) provides the greatest production of mid-distillate compounds while producing the lowest overall yield due to the lack of pyrolysis. Retorting using CO2 (R-CO2) provides a narrower molecular-weight distribution than N2 while improving the oil yield slightly. It is also established that shale oil can be extracted by supercritical fluid extraction that is operated at substantially lower temperatures, where solvation dominates pyrolysis as a predominant mechanistic step. The potential of El-Lajjun oil shale as a valuable energy source has also been analyzed.
Interaction of olanzapine and propranolol It is well‐known that atypical antipsychotics increase the risk of several metabolic conditions, such as glucose intolerance and diabetes, weight gain, and hyperlipidemia. As with other psychotropic medications, concurrent use of atypical antipsychotics with some drugs used to treat comorbid medical conditions can also lead to drug‐drug interactions. The following report described a drug interaction with olanzapine that resulted in the occurrence of sleepwalking.
// UnmarshalText unmarshals decimal128 from a textual representation. Satisfies // `encoding.TextUnmarshaler`. func (d *Decimal128) UnmarshalText(text []byte) error { var err error *d, err = ParseDecimal128(string(text)) return err }
def above_threshold(student_scores, threshold): pass
<gh_stars>0 print ('Script Aula 1 - Desafio 1') print ('Crie um script python que leia o nome de uma pessoa e mostra uma mensagemde boas vindas de acordo com o valor digitado') nome = input ('Qual seu nome?') print ('Seja bem vindo gafanhoto', nome)
#include <string.h> #include <iostream> #include <bits/stdc++.h> using namespace std; int main() { int a,b,w; cin>>w>>a>>b; if(a>=0 && b>=0 && w>=0){ if(b>a+w){ cout<<b-(a+w);} else if(a>b+w) { cout<<a-(b+w); } else { cout<<"0"; } } }