content
stringlengths
10
4.9M
def gis_location_onaccept(form): auth = current.auth vars = form.vars id = vars.id if vars.path and current.response.s3.bulk: db = current.db db(db.gis_location.id == id).update(path=None) if not auth.override and \ not auth.rollback: feature = json.dumps(dict(id=id, level=vars.get("level", False), )) current.s3task.async("gis_update_location_tree", args=[feature]) return
//! Capture the screen with DXGI Desktop Duplication //! This is a modified version of: https://github.com/bryal/dxgcap-rs #![cfg(windows)] extern crate winapi; extern crate wio; use std::mem::zeroed; use std::{mem, ptr, slice}; use log::warn; use winapi::shared::dxgi::{ CreateDXGIFactory1, IDXGIAdapter, IDXGIAdapter1, IDXGIDevice, IDXGIDevice1, IDXGIFactory1, IDXGIOutput, IDXGISurface1, IID_IDXGIFactory1, DXGI_MAP_READ, DXGI_OUTPUT_DESC, DXGI_RESOURCE_PRIORITY_MAXIMUM, }; use winapi::shared::dxgi1_2::{IDXGIOutput1, IDXGIOutputDuplication}; use winapi::shared::dxgitype::*; // use winapi::shared::ntdef::*; use std::io::Error; use winapi::shared::minwindef::{BOOL, LPARAM, TRUE}; use winapi::shared::windef::*; use winapi::shared::winerror::*; use winapi::um::d3d11::*; use winapi::um::d3dcommon::*; use winapi::um::unknwnbase::*; use winapi::um::winnt::GENERIC_READ; use winapi::um::winuser::*; use wio::com::ComPtr; /// Color represented by additive channels: Blue (b), Green (g), Red (r), and Alpha (a). #[derive(Copy, Clone, Debug, PartialOrd, PartialEq, Eq, Ord)] pub struct BGRA8 { pub b: u8, pub g: u8, pub r: u8, pub a: u8, } /// Possible errors when capturing #[derive(Debug)] pub enum CaptureError { /// Could not duplicate output, access denied. Might be in protected fullscreen. AccessDenied, /// Access to the duplicated output was lost. Likely, mode was changed e.g. window => full AccessLost, /// Error when trying to refresh outputs after some failure. RefreshFailure, /// AcquireNextFrame timed out. Timeout, /// General/Unexpected failure Fail(&'static str), } /// Check whether the HRESULT represents a failure pub fn hr_failed(hr: HRESULT) -> bool { hr < 0 } fn create_dxgi_factory_1() -> ComPtr<IDXGIFactory1> { unsafe { let mut factory = ptr::null_mut(); let hr = CreateDXGIFactory1(&IID_IDXGIFactory1, &mut factory); if hr_failed(hr) { panic!("Failed to create DXGIFactory1, {:x}", hr) } else { ComPtr::from_raw(factory as *mut IDXGIFactory1) } } } fn d3d11_create_device( adapter: *mut IDXGIAdapter, ) -> (ComPtr<ID3D11Device>, ComPtr<ID3D11DeviceContext>) { unsafe { let (mut d3d11_device, mut device_context) = (ptr::null_mut(), ptr::null_mut()); let mut feature_level = D3D_FEATURE_LEVEL_9_1; let hr = D3D11CreateDevice( adapter, D3D_DRIVER_TYPE_UNKNOWN, ptr::null_mut(), 0, ptr::null_mut(), 0, D3D11_SDK_VERSION, &mut d3d11_device, &mut feature_level, &mut device_context, ); if hr_failed(hr) { panic!("Failed to create d3d11 device and device context, {:x}", hr) } else { ( ComPtr::from_raw(d3d11_device as *mut ID3D11Device), ComPtr::from_raw(device_context), ) } } } fn get_adapter_outputs(adapter: &IDXGIAdapter1) -> Vec<ComPtr<IDXGIOutput>> { let mut outputs = Vec::new(); for i in 0.. { unsafe { let mut output = ptr::null_mut(); if hr_failed(adapter.EnumOutputs(i, &mut output)) { break; } else { let mut out_desc = zeroed(); (*output).GetDesc(&mut out_desc); if out_desc.AttachedToDesktop != 0 { outputs.push(ComPtr::from_raw(output)) } else { break; } } } } outputs } fn output_is_primary(output: &ComPtr<IDXGIOutput1>) -> bool { unsafe { let mut output_desc = zeroed(); output.GetDesc(&mut output_desc); let mut monitor_info: MONITORINFO = zeroed(); monitor_info.cbSize = mem::size_of::<MONITORINFO>() as u32; GetMonitorInfoW(output_desc.Monitor, &mut monitor_info); (monitor_info.dwFlags & 1) != 0 } } fn get_capture_source( output_dups: Vec<(ComPtr<IDXGIOutputDuplication>, ComPtr<IDXGIOutput1>)>, cs_index: usize, ) -> Option<(ComPtr<IDXGIOutputDuplication>, ComPtr<IDXGIOutput1>)> { if cs_index == 0 { output_dups .into_iter() .find(|&(_, ref out)| output_is_primary(out)) } else { output_dups .into_iter() .filter(|&(_, ref out)| !output_is_primary(out)) .nth(cs_index - 1) } } #[allow(clippy::type_complexity)] fn duplicate_outputs( mut device: ComPtr<ID3D11Device>, outputs: Vec<ComPtr<IDXGIOutput>>, ) -> Result< ( ComPtr<ID3D11Device>, Vec<(ComPtr<IDXGIOutputDuplication>, ComPtr<IDXGIOutput1>)>, ), HRESULT, > { let mut out_dups = Vec::new(); for output in outputs .into_iter() .map(|out| out.cast::<IDXGIOutput1>().unwrap()) { let dxgi_device = device.up::<IUnknown>(); let output_duplication = unsafe { let mut output_duplication = ptr::null_mut(); let hr = output.DuplicateOutput(dxgi_device.as_raw(), &mut output_duplication); if hr_failed(hr) { return Err(hr); } ComPtr::from_raw(output_duplication) }; device = dxgi_device.cast().unwrap(); out_dups.push((output_duplication, output)); } Ok((device, out_dups)) } struct DuplicatedOutput { device: ComPtr<ID3D11Device>, device_context: ComPtr<ID3D11DeviceContext>, output: ComPtr<IDXGIOutput1>, output_duplication: ComPtr<IDXGIOutputDuplication>, } impl DuplicatedOutput { fn get_desc(&self) -> DXGI_OUTPUT_DESC { unsafe { let mut desc = zeroed(); self.output.GetDesc(&mut desc); desc } } fn capture_frame_to_surface( &mut self, timeout_ms: u32, ) -> Result<ComPtr<IDXGISurface1>, HRESULT> { let frame_resource = unsafe { let mut frame_resource = ptr::null_mut(); let mut frame_info = zeroed(); let hr = self.output_duplication.AcquireNextFrame( timeout_ms, &mut frame_info, &mut frame_resource, ); if hr_failed(hr) { return Err(hr); } ComPtr::from_raw(frame_resource) }; let frame_texture = frame_resource.cast::<ID3D11Texture2D>().unwrap(); let mut texture_desc = unsafe { let mut texture_desc = zeroed(); frame_texture.GetDesc(&mut texture_desc); texture_desc }; // Configure the description to make the texture readable texture_desc.Usage = D3D11_USAGE_STAGING; texture_desc.BindFlags = 0; texture_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ; texture_desc.MiscFlags = 0; let readable_texture = unsafe { let mut readable_texture = ptr::null_mut(); let hr = self .device .CreateTexture2D(&texture_desc, ptr::null(), &mut readable_texture); if hr_failed(hr) { return Err(hr); } ComPtr::from_raw(readable_texture) }; // Lower priorities causes stuff to be needlessly copied from gpu to ram, // causing huge ram usage on some systems. unsafe { readable_texture.SetEvictionPriority(DXGI_RESOURCE_PRIORITY_MAXIMUM) }; let readable_surface = readable_texture.up::<ID3D11Resource>(); unsafe { self.device_context.CopyResource( readable_surface.as_raw(), frame_texture.up::<ID3D11Resource>().as_raw(), ); self.output_duplication.ReleaseFrame(); } readable_surface.cast() } } /// Manager of DXGI duplicated outputs pub struct DXGIManager { desktop: HDESK, duplicated_output: Option<DuplicatedOutput>, capture_source_index: usize, timeout_ms: u32, screen_count: usize, } struct SharedPtr<T>(*const T); unsafe impl<T> Send for SharedPtr<T> {} unsafe impl<T> Sync for SharedPtr<T> {} impl DXGIManager { /// Construct a new manager with capture timeout pub fn new(timeout_ms: u32) -> Result<DXGIManager, &'static str> { // try to capture uac let desktop = unsafe { OpenInputDesktop(0, 0, GENERIC_READ) }; if !desktop.is_null() && unsafe { SetThreadDesktop(desktop) } == 0 { warn!( "Failed to set thread desktop. This will prevent UAC dialogs from being visible." ); } // let screen_count = unsafe { } let screens = enumerate_monitors(); let screen_count: usize; if screens.is_empty() { screen_count = 0; } else { screen_count = screens.len(); } let mut manager = DXGIManager { desktop, duplicated_output: None, capture_source_index: 0, timeout_ms, screen_count, }; match manager.acquire_output_duplication() { Ok(_) => Ok(manager), Err(_) => Err("Failed to acquire output duplication"), } } pub fn geometry(&self) -> (usize, usize) { let output_desc = self.duplicated_output.as_ref().unwrap().get_desc(); let RECT { left, top, right, bottom, } = output_desc.DesktopCoordinates; ((right - left) as usize, (bottom - top) as usize) } /// Set index of capture source to capture from pub fn set_capture_source_index(&mut self, cs: usize) -> Result<(), ()> { self.capture_source_index = cs; self.acquire_output_duplication() } #[allow(unused)] pub fn get_capture_source_index(&self) -> usize { self.capture_source_index } /// Set timeout to use when capturing #[allow(unused)] pub fn set_timeout_ms(&mut self, timeout_ms: u32) { self.timeout_ms = timeout_ms } pub fn get_screen_count(&self) -> usize { self.screen_count } /// Duplicate and acquire output selected by `capture_source_index` pub fn acquire_output_duplication(&mut self) -> Result<(), ()> { self.duplicated_output = None; let factory = create_dxgi_factory_1(); for (outputs, adapter) in (0..) .map(|i| { let mut adapter = ptr::null_mut(); unsafe { if factory.EnumAdapters1(i, &mut adapter) != DXGI_ERROR_NOT_FOUND { Some(ComPtr::from_raw(adapter)) } else { None } } }) .take_while(Option::is_some) .map(Option::unwrap) .map(|adapter| (get_adapter_outputs(&adapter), adapter)) .filter(|&(ref outs, _)| !outs.is_empty()) { // Creating device for each adapter that has the output let (d3d11_device, device_context) = d3d11_create_device(adapter.up().as_raw()); // raise capture priority unsafe { d3d11_device .cast::<IDXGIDevice>() .unwrap() .SetGPUThreadPriority(7) }; unsafe { d3d11_device .cast::<IDXGIDevice1>() .unwrap() .SetMaximumFrameLatency(1) }; let (d3d11_device, output_duplications) = duplicate_outputs(d3d11_device, outputs).map_err(|_| ())?; if let Some((output_duplication, output)) = get_capture_source(output_duplications, self.capture_source_index) { self.duplicated_output = Some(DuplicatedOutput { device: d3d11_device, device_context, output, output_duplication, }); return Ok(()); } } Err(()) } fn capture_frame_to_surface(&mut self) -> Result<ComPtr<IDXGISurface1>, CaptureError> { if self.duplicated_output.is_none() { if self.acquire_output_duplication().is_ok() { return Err(CaptureError::Fail("No valid duplicated output")); } else { return Err(CaptureError::RefreshFailure); } } let timeout_ms = self.timeout_ms; match self .duplicated_output .as_mut() .unwrap() .capture_frame_to_surface(timeout_ms) { Ok(surface) => Ok(surface), Err(DXGI_ERROR_ACCESS_LOST) => { if self.acquire_output_duplication().is_ok() { Err(CaptureError::AccessLost) } else { Err(CaptureError::RefreshFailure) } } Err(E_ACCESSDENIED) => Err(CaptureError::AccessDenied), Err(DXGI_ERROR_WAIT_TIMEOUT) => Err(CaptureError::Timeout), Err(_) => { if self.acquire_output_duplication().is_ok() { Err(CaptureError::Fail("Failure when acquiring frame")) } else { Err(CaptureError::RefreshFailure) } } } } fn capture_frame_t<T: Copy + Send + Sync + Sized>( &mut self, ) -> Result<(Vec<T>, (usize, usize)), CaptureError> { let frame_surface = match self.capture_frame_to_surface() { Ok(surface) => surface, Err(e) => return Err(e), }; let mapped_surface = unsafe { let mut mapped_surface = zeroed(); if hr_failed(frame_surface.Map(&mut mapped_surface, DXGI_MAP_READ)) { frame_surface.Release(); return Err(CaptureError::Fail("Failed to map surface")); } mapped_surface }; let byte_size = |x| x * mem::size_of::<BGRA8>() / mem::size_of::<T>(); let output_desc = self.duplicated_output.as_mut().unwrap().get_desc(); let stride = mapped_surface.Pitch as usize / mem::size_of::<BGRA8>(); let byte_stride = byte_size(stride); let (output_width, output_height) = { let RECT { left, top, right, bottom, } = output_desc.DesktopCoordinates; ((right - left) as usize, (bottom - top) as usize) }; let mut pixel_buf = Vec::with_capacity(byte_size(output_width * output_height)); let scan_lines = match output_desc.Rotation { DXGI_MODE_ROTATION_ROTATE90 | DXGI_MODE_ROTATION_ROTATE270 => output_width, _ => output_height, }; let mapped_pixels = unsafe { slice::from_raw_parts(mapped_surface.pBits as *const T, byte_stride * scan_lines) }; match output_desc.Rotation { DXGI_MODE_ROTATION_IDENTITY | DXGI_MODE_ROTATION_UNSPECIFIED => { pixel_buf.extend_from_slice(mapped_pixels) } DXGI_MODE_ROTATION_ROTATE90 => unsafe { let ptr = SharedPtr(pixel_buf.as_ptr() as *const BGRA8); mapped_pixels .chunks(byte_stride) .rev() .enumerate() .for_each(|(column, chunk)| { let mut src = chunk.as_ptr() as *const BGRA8; let mut dst = ptr.0 as *mut BGRA8; dst = dst.add(column); let stop = src.add(output_height); while src != stop { dst.write(*src); src = src.add(1); dst = dst.add(output_width); } }); pixel_buf.set_len(pixel_buf.capacity()); }, DXGI_MODE_ROTATION_ROTATE180 => unsafe { let ptr = SharedPtr(pixel_buf.as_ptr() as *const BGRA8); mapped_pixels .chunks(byte_stride) .rev() .enumerate() .for_each(|(scan_line, chunk)| { let mut src = chunk.as_ptr() as *const BGRA8; let mut dst = ptr.0 as *mut BGRA8; dst = dst.add(scan_line * output_width); let stop = src; src = src.add(output_width); while src != stop { src = src.sub(1); dst.write(*src); dst = dst.add(1); } }); pixel_buf.set_len(pixel_buf.capacity()); }, DXGI_MODE_ROTATION_ROTATE270 => unsafe { let ptr = SharedPtr(pixel_buf.as_ptr() as *const BGRA8); mapped_pixels .chunks(byte_stride) .enumerate() .for_each(|(column, chunk)| { let mut src = chunk.as_ptr() as *const BGRA8; let mut dst = ptr.0 as *mut BGRA8; dst = dst.add(column); let stop = src; src = src.add(output_height); while src != stop { src = src.sub(1); dst.write(*src); dst = dst.add(output_width); } }); pixel_buf.set_len(pixel_buf.capacity()); }, n => unreachable!("Undefined DXGI_MODE_ROTATION: {}", n), } unsafe { frame_surface.Unmap() }; Ok((pixel_buf, (output_width, output_height))) } /// Capture a frame /// /// On success, return Vec with pixels and width and height of frame. /// On failure, return CaptureError. pub fn capture_frame(&mut self) -> Result<(Vec<BGRA8>, (usize, usize)), CaptureError> { self.capture_frame_t() } /// Capture a frame /// /// On success, return Vec with pixel components and width and height of frame. /// On failure, return CaptureError. #[allow(unused)] pub fn capture_frame_components(&mut self) -> Result<(Vec<u8>, (usize, usize)), CaptureError> { self.capture_frame_t() } } impl Drop for DXGIManager { fn drop(&mut self) { if !self.desktop.is_null() { unsafe { CloseDesktop(self.desktop) }; } } } fn enumerate_monitors() -> Vec<MONITORINFOEXW> { // Define the vector where we will store the result let mut monitors = Vec::<MONITORINFOEXW>::new(); let userdata = &mut monitors as *mut _; let result = unsafe { EnumDisplayMonitors( ptr::null_mut(), ptr::null(), Some(enumerate_monitors_callback), userdata as LPARAM, ) }; if result != TRUE { // Get the last error for the current thread. // This is analogous to calling the Win32 API GetLastError. panic!("Could not enumerate monitors: {}", Error::last_os_error()); } monitors } unsafe extern "system" fn enumerate_monitors_callback( monitor: HMONITOR, _: HDC, _: LPRECT, userdata: LPARAM, ) -> BOOL { // Get the userdata where we will store the result let monitors: &mut Vec<MONITORINFOEXW> = mem::transmute(userdata); // Initialize the MONITORINFOEXW structure and get a pointer to it let mut monitor_info: MONITORINFOEXW = mem::zeroed(); monitor_info.cbSize = mem::size_of::<MONITORINFOEXW>() as u32; let monitor_info_ptr = <*mut _>::cast(&mut monitor_info); // Call the GetMonitorInfoW win32 API let result = GetMonitorInfoW(monitor, monitor_info_ptr); if result == TRUE { // Push the information we received to userdata monitors.push(monitor_info); } TRUE } #[test] fn test() { let mut manager = DXGIManager::new(300).unwrap(); for _ in 0..100 { match manager.capture_frame() { Ok((pixels, (_, _))) => { let len = pixels.len() as u64; let (r, g, b) = pixels.into_iter().fold((0u64, 0u64, 0u64), |(r, g, b), p| { (r + p.r as u64, g + p.g as u64, b + p.b as u64) }); println!("avg: {} {} {}", r / len, g / len, b / len) } Err(e) => println!("error: {:?}", e), } } } #[test] fn compare_frame_dims() { let mut manager = DXGIManager::new(300).unwrap(); let (frame, (fw, fh)) = manager.capture_frame().unwrap(); let (frame_u8, (fu8w, fu8h)) = manager.capture_frame_components().unwrap(); assert_eq!(fw, fu8w); assert_eq!(fh, fu8h); assert_eq!(4 * frame.len(), frame_u8.len()); }
package css_parser import ( "fmt" "strings" "github.com/evanw/esbuild/internal/ast" "github.com/evanw/esbuild/internal/compat" "github.com/evanw/esbuild/internal/css_ast" "github.com/evanw/esbuild/internal/css_lexer" "github.com/evanw/esbuild/internal/logger" ) // This is mostly a normal CSS parser with one exception: the addition of // support for parsing https://drafts.csswg.org/css-nesting-1/. type parser struct { log logger.Log source logger.Source tracker logger.LineColumnTracker options Options tokens []css_lexer.Token stack []css_lexer.T index int end int prevError logger.Loc importRecords []ast.ImportRecord } type Options struct { UnsupportedCSSFeatures compat.CSSFeature MangleSyntax bool RemoveWhitespace bool } func Parse(log logger.Log, source logger.Source, options Options) css_ast.AST { p := parser{ log: log, source: source, tracker: logger.MakeLineColumnTracker(&source), options: options, tokens: css_lexer.Tokenize(log, source), prevError: logger.Loc{Start: -1}, } p.end = len(p.tokens) tree := css_ast.AST{} tree.Rules = p.parseListOfRules(ruleContext{ isTopLevel: true, parseSelectors: true, }) tree.ImportRecords = p.importRecords p.expect(css_lexer.TEndOfFile) return tree } func (p *parser) advance() { if p.index < p.end { p.index++ } } func (p *parser) at(index int) css_lexer.Token { if index < p.end { return p.tokens[index] } if p.end < len(p.tokens) { return css_lexer.Token{ Kind: css_lexer.TEndOfFile, Range: logger.Range{Loc: p.tokens[p.end].Range.Loc}, } } return css_lexer.Token{ Kind: css_lexer.TEndOfFile, Range: logger.Range{Loc: logger.Loc{Start: int32(len(p.source.Contents))}}, } } func (p *parser) current() css_lexer.Token { return p.at(p.index) } func (p *parser) next() css_lexer.Token { return p.at(p.index + 1) } func (p *parser) raw() string { t := p.current() return p.source.Contents[t.Range.Loc.Start:t.Range.End()] } func (p *parser) decoded() string { return p.current().DecodedText(p.source.Contents) } func (p *parser) peek(kind css_lexer.T) bool { return kind == p.current().Kind } func (p *parser) eat(kind css_lexer.T) bool { if p.peek(kind) { p.advance() return true } return false } func (p *parser) expect(kind css_lexer.T) bool { if p.eat(kind) { return true } t := p.current() var text string if kind == css_lexer.TSemicolon && p.index > 0 && p.at(p.index-1).Kind == css_lexer.TWhitespace { // Have a nice error message for forgetting a trailing semicolon text = "Expected \";\"" t = p.at(p.index - 1) } else { switch t.Kind { case css_lexer.TEndOfFile, css_lexer.TWhitespace: text = fmt.Sprintf("Expected %s but found %s", kind.String(), t.Kind.String()) t.Range.Len = 0 case css_lexer.TBadURL, css_lexer.TBadString: text = fmt.Sprintf("Expected %s but found %s", kind.String(), t.Kind.String()) default: text = fmt.Sprintf("Expected %s but found %q", kind.String(), p.raw()) } } if t.Range.Loc.Start > p.prevError.Start { p.log.AddRangeWarning(&p.tracker, t.Range, text) p.prevError = t.Range.Loc } return false } func (p *parser) unexpected() { if t := p.current(); t.Range.Loc.Start > p.prevError.Start { var text string switch t.Kind { case css_lexer.TEndOfFile, css_lexer.TWhitespace: text = fmt.Sprintf("Unexpected %s", t.Kind.String()) t.Range.Len = 0 case css_lexer.TBadURL, css_lexer.TBadString: text = fmt.Sprintf("Unexpected %s", t.Kind.String()) default: text = fmt.Sprintf("Unexpected %q", p.raw()) } p.log.AddRangeWarning(&p.tracker, t.Range, text) p.prevError = t.Range.Loc } } type ruleContext struct { isTopLevel bool parseSelectors bool } func (p *parser) parseListOfRules(context ruleContext) []css_ast.R { didWarnAboutCharset := false didWarnAboutImport := false rules := []css_ast.R{} locs := []logger.Loc{} loop: for { switch p.current().Kind { case css_lexer.TEndOfFile, css_lexer.TCloseBrace: break loop case css_lexer.TWhitespace: p.advance() continue case css_lexer.TAtKeyword: first := p.current().Range rule := p.parseAtRule(atRuleContext{}) // Validate structure if context.isTopLevel { switch rule.(type) { case *css_ast.RAtCharset: if !didWarnAboutCharset && len(rules) > 0 { p.log.AddRangeWarningWithNotes(&p.tracker, first, "\"@charset\" must be the first rule in the file", []logger.MsgData{logger.RangeData(&p.tracker, logger.Range{Loc: locs[len(locs)-1]}, "This rule cannot come before a \"@charset\" rule")}) didWarnAboutCharset = true } case *css_ast.RAtImport: if !didWarnAboutImport { importLoop: for i, before := range rules { switch before.(type) { case *css_ast.RAtCharset, *css_ast.RAtImport: default: p.log.AddRangeWarningWithNotes(&p.tracker, first, "All \"@import\" rules must come first", []logger.MsgData{logger.RangeData(&p.tracker, logger.Range{Loc: locs[i]}, "This rule cannot come before an \"@import\" rule")}) didWarnAboutImport = true break importLoop } } } } } rules = append(rules, rule) if context.isTopLevel { locs = append(locs, first.Loc) } continue case css_lexer.TCDO, css_lexer.TCDC: if context.isTopLevel { p.advance() continue } } if context.isTopLevel { locs = append(locs, p.current().Range.Loc) } if context.parseSelectors { rules = append(rules, p.parseSelectorRule()) } else { rules = append(rules, p.parseQualifiedRuleFrom(p.index, false /* isAlreadyInvalid */)) } } if p.options.MangleSyntax { rules = removeEmptyAndDuplicateRules(rules) } return rules } func (p *parser) parseListOfDeclarations() (list []css_ast.R) { for { switch p.current().Kind { case css_lexer.TWhitespace, css_lexer.TSemicolon: p.advance() case css_lexer.TEndOfFile, css_lexer.TCloseBrace: list = p.processDeclarations(list) if p.options.MangleSyntax { list = removeEmptyAndDuplicateRules(list) } return case css_lexer.TAtKeyword: list = append(list, p.parseAtRule(atRuleContext{ isDeclarationList: true, })) case css_lexer.TDelimAmpersand: // Reference: https://drafts.csswg.org/css-nesting-1/ list = append(list, p.parseSelectorRule()) default: list = append(list, p.parseDeclaration()) } } } func removeEmptyAndDuplicateRules(rules []css_ast.R) []css_ast.R { type hashEntry struct { indices []uint32 } n := len(rules) start := n entries := make(map[uint32]hashEntry) // Scan from the back so we keep the last rule skipRule: for i := n - 1; i >= 0; i-- { rule := rules[i] switch r := rule.(type) { case *css_ast.RAtKeyframes: if len(r.Blocks) == 0 { continue } case *css_ast.RKnownAt: if len(r.Rules) == 0 { continue } case *css_ast.RSelector: if len(r.Rules) == 0 { continue } } if hash, ok := rule.Hash(); ok { entry := entries[hash] // For duplicate rules, omit all but the last copy for _, index := range entry.indices { if rule.Equal(rules[index]) { continue skipRule } } entry.indices = append(entry.indices, uint32(i)) entries[hash] = entry } start-- rules[start] = rule } return rules[start:] } func (p *parser) parseURLOrString() (string, logger.Range, bool) { t := p.current() switch t.Kind { case css_lexer.TString: text := p.decoded() p.advance() return text, t.Range, true case css_lexer.TURL: text := p.decoded() p.advance() return text, t.Range, true case css_lexer.TFunction: if p.decoded() == "url" { p.advance() t = p.current() text := p.decoded() if p.expect(css_lexer.TString) && p.expect(css_lexer.TCloseParen) { return text, t.Range, true } } } return "", logger.Range{}, false } func (p *parser) expectURLOrString() (url string, r logger.Range, ok bool) { url, r, ok = p.parseURLOrString() if !ok { p.expect(css_lexer.TURL) } return } type atRuleKind uint8 const ( atRuleUnknown atRuleKind = iota atRuleDeclarations atRuleInheritContext atRuleEmpty ) var specialAtRules = map[string]atRuleKind{ "font-face": atRuleDeclarations, "page": atRuleDeclarations, // These go inside "@page": https://www.w3.org/TR/css-page-3/#syntax-page-selector "bottom-center": atRuleDeclarations, "bottom-left-corner": atRuleDeclarations, "bottom-left": atRuleDeclarations, "bottom-right-corner": atRuleDeclarations, "bottom-right": atRuleDeclarations, "left-bottom": atRuleDeclarations, "left-middle": atRuleDeclarations, "left-top": atRuleDeclarations, "right-bottom": atRuleDeclarations, "right-middle": atRuleDeclarations, "right-top": atRuleDeclarations, "top-center": atRuleDeclarations, "top-left-corner": atRuleDeclarations, "top-left": atRuleDeclarations, "top-right-corner": atRuleDeclarations, "top-right": atRuleDeclarations, // These properties are very deprecated and appear to only be useful for // mobile versions of internet explorer (which may no longer exist?), but // they are used by the https://ant.design/ design system so we recognize // them to avoid the warning. // // Documentation: https://developer.mozilla.org/en-US/docs/Web/CSS/@viewport // Discussion: https://github.com/w3c/csswg-drafts/issues/4766 // "viewport": atRuleDeclarations, "-ms-viewport": atRuleDeclarations, // This feature has been removed from the web because it's actively harmful. // However, there is one exception where "@-moz-document url-prefix() {" is // accepted by Firefox to basically be an "if Firefox" conditional rule. // // Documentation: https://developer.mozilla.org/en-US/docs/Web/CSS/@document // Discussion: https://bugzilla.mozilla.org/show_bug.cgi?id=1035091 // "document": atRuleInheritContext, "-moz-document": atRuleInheritContext, "media": atRuleInheritContext, "scope": atRuleInheritContext, "supports": atRuleInheritContext, } type atRuleContext struct { isDeclarationList bool } func (p *parser) parseAtRule(context atRuleContext) css_ast.R { // Parse the name atToken := p.decoded() atRange := p.current().Range kind := specialAtRules[atToken] p.advance() // Parse the prelude preludeStart := p.index switch atToken { case "charset": kind = atRuleEmpty p.expect(css_lexer.TWhitespace) if p.peek(css_lexer.TString) { encoding := p.decoded() if encoding != "UTF-8" { p.log.AddRangeWarning(&p.tracker, p.current().Range, fmt.Sprintf("\"UTF-8\" will be used instead of unsupported charset %q", encoding)) } p.advance() p.expect(css_lexer.TSemicolon) return &css_ast.RAtCharset{Encoding: encoding} } p.expect(css_lexer.TString) case "import": kind = atRuleEmpty p.eat(css_lexer.TWhitespace) if path, r, ok := p.expectURLOrString(); ok { importConditionsStart := p.index for p.current().Kind != css_lexer.TSemicolon && p.current().Kind != css_lexer.TEndOfFile { p.parseComponentValue() } importConditions := p.convertTokens(p.tokens[importConditionsStart:p.index]) kind := ast.ImportAt // Insert or remove whitespace before the first token if len(importConditions) > 0 { kind = ast.ImportAtConditional if p.options.RemoveWhitespace { importConditions[0].Whitespace &= ^css_ast.WhitespaceBefore } else { importConditions[0].Whitespace |= css_ast.WhitespaceBefore } } p.expect(css_lexer.TSemicolon) importRecordIndex := uint32(len(p.importRecords)) p.importRecords = append(p.importRecords, ast.ImportRecord{ Kind: kind, Path: logger.Path{Text: path}, Range: r, }) return &css_ast.RAtImport{ ImportRecordIndex: importRecordIndex, ImportConditions: importConditions, } } case "keyframes", "-webkit-keyframes", "-moz-keyframes", "-ms-keyframes", "-o-keyframes": p.eat(css_lexer.TWhitespace) var name string if p.peek(css_lexer.TIdent) { name = p.decoded() p.advance() } else if !p.expect(css_lexer.TIdent) && !p.eat(css_lexer.TString) && !p.peek(css_lexer.TOpenBrace) { // Consider string names a syntax error even though they are allowed by // the specification and they work in Firefox because they do not work in // Chrome or Safari. break } p.eat(css_lexer.TWhitespace) if p.expect(css_lexer.TOpenBrace) { var blocks []css_ast.KeyframeBlock blocks: for { switch p.current().Kind { case css_lexer.TWhitespace: p.advance() continue case css_lexer.TCloseBrace, css_lexer.TEndOfFile: break blocks case css_lexer.TOpenBrace: p.expect(css_lexer.TPercentage) p.parseComponentValue() default: var selectors []string selectors: for { t := p.current() switch t.Kind { case css_lexer.TWhitespace: p.advance() continue case css_lexer.TOpenBrace, css_lexer.TEndOfFile: break selectors case css_lexer.TIdent, css_lexer.TPercentage: text := p.decoded() if t.Kind == css_lexer.TIdent { if text == "from" { if p.options.MangleSyntax { text = "0%" // "0%" is equivalent to but shorter than "from" } } else if text != "to" { p.expect(css_lexer.TPercentage) } } else if p.options.MangleSyntax && text == "100%" { text = "to" // "to" is equivalent to but shorter than "100%" } selectors = append(selectors, text) p.advance() default: p.expect(css_lexer.TPercentage) p.parseComponentValue() } p.eat(css_lexer.TWhitespace) if t.Kind != css_lexer.TComma && !p.peek(css_lexer.TOpenBrace) { p.expect(css_lexer.TComma) } } if p.expect(css_lexer.TOpenBrace) { rules := p.parseListOfDeclarations() p.expect(css_lexer.TCloseBrace) // "@keyframes { from {} to { color: red } }" => "@keyframes { to { color: red } }" if !p.options.MangleSyntax || len(rules) > 0 { blocks = append(blocks, css_ast.KeyframeBlock{ Selectors: selectors, Rules: rules, }) } } } } p.expect(css_lexer.TCloseBrace) return &css_ast.RAtKeyframes{ AtToken: atToken, Name: name, Blocks: blocks, } } default: // Warn about unsupported at-rules since they will be passed through // unmodified and may be part of a CSS preprocessor syntax that should // have been compiled away but wasn't. // // The list of supported at-rules that esbuild draws from is here: // https://developer.mozilla.org/en-US/docs/Web/CSS/At-rule. Deprecated // and Firefox-only at-rules have been removed. if kind == atRuleUnknown { if atToken == "namespace" { // CSS namespaces are a weird feature that appears to only really be // useful for styling XML. And the world has moved on from XHTML to // HTML5 so pretty much no one uses CSS namespaces anymore. They are // also complicated to support in a bundler because CSS namespaces are // file-scoped, which means: // // * Default namespaces can be different in different files, in which // case some default namespaces would have to be converted to prefixed // namespaces to avoid collisions. // // * Prefixed namespaces from different files can use the same name, in // which case some prefixed namespaces would need to be renamed to // avoid collisions. // // Instead of implementing all of that for an extremely obscure feature, // CSS namespaces are just explicitly not supported. p.log.AddRangeWarning(&p.tracker, atRange, "\"@namespace\" rules are not supported") } else { p.log.AddRangeWarning(&p.tracker, atRange, fmt.Sprintf("%q is not a known rule name", "@"+atToken)) } } } // Parse an unknown prelude prelude: for { switch p.current().Kind { case css_lexer.TOpenBrace, css_lexer.TEndOfFile: break prelude case css_lexer.TSemicolon, css_lexer.TCloseBrace: prelude := p.convertTokens(p.tokens[preludeStart:p.index]) // Report an error for rules that should have blocks if kind != atRuleEmpty && kind != atRuleUnknown { p.expect(css_lexer.TOpenBrace) p.eat(css_lexer.TSemicolon) return &css_ast.RUnknownAt{AtToken: atToken, Prelude: prelude} } // Otherwise, parse an unknown at rule p.expect(css_lexer.TSemicolon) return &css_ast.RUnknownAt{AtToken: atToken, Prelude: prelude} default: p.parseComponentValue() } } prelude := p.convertTokens(p.tokens[preludeStart:p.index]) blockStart := p.index switch kind { case atRuleEmpty: // Report an error for rules that shouldn't have blocks p.expect(css_lexer.TSemicolon) p.parseBlock(css_lexer.TOpenBrace, css_lexer.TCloseBrace) block := p.convertTokens(p.tokens[blockStart:p.index]) return &css_ast.RUnknownAt{AtToken: atToken, Prelude: prelude, Block: block} case atRuleDeclarations: // Parse known rules whose blocks consist of whatever the current context is p.advance() rules := p.parseListOfDeclarations() p.expect(css_lexer.TCloseBrace) return &css_ast.RKnownAt{AtToken: atToken, Prelude: prelude, Rules: rules} case atRuleInheritContext: // Parse known rules whose blocks consist of whatever the current context is p.advance() var rules []css_ast.R if context.isDeclarationList { rules = p.parseListOfDeclarations() } else { rules = p.parseListOfRules(ruleContext{ parseSelectors: true, }) } p.expect(css_lexer.TCloseBrace) return &css_ast.RKnownAt{AtToken: atToken, Prelude: prelude, Rules: rules} default: // Otherwise, parse an unknown rule p.parseBlock(css_lexer.TOpenBrace, css_lexer.TCloseBrace) block, _ := p.convertTokensHelper(p.tokens[blockStart:p.index], css_lexer.TEndOfFile, convertTokensOpts{allowImports: true}) return &css_ast.RUnknownAt{AtToken: atToken, Prelude: prelude, Block: block} } } func (p *parser) convertTokens(tokens []css_lexer.Token) []css_ast.Token { result, _ := p.convertTokensHelper(tokens, css_lexer.TEndOfFile, convertTokensOpts{}) return result } type convertTokensOpts struct { allowImports bool verbatimWhitespace bool } func (p *parser) convertTokensHelper(tokens []css_lexer.Token, close css_lexer.T, opts convertTokensOpts) ([]css_ast.Token, []css_lexer.Token) { var result []css_ast.Token var nextWhitespace css_ast.WhitespaceFlags loop: for len(tokens) > 0 { t := tokens[0] tokens = tokens[1:] if t.Kind == close { break loop } token := css_ast.Token{ Kind: t.Kind, Text: t.DecodedText(p.source.Contents), Whitespace: nextWhitespace, } nextWhitespace = 0 switch t.Kind { case css_lexer.TWhitespace: if last := len(result) - 1; last >= 0 { result[last].Whitespace |= css_ast.WhitespaceAfter } nextWhitespace = css_ast.WhitespaceBefore continue case css_lexer.TNumber: if p.options.MangleSyntax { if text, ok := mangleNumber(token.Text); ok { token.Text = text } } case css_lexer.TPercentage: if p.options.MangleSyntax { if text, ok := mangleNumber(token.PercentageValue()); ok { token.Text = text + "%" } } case css_lexer.TDimension: token.UnitOffset = t.UnitOffset if p.options.MangleSyntax { if text, ok := mangleNumber(token.DimensionValue()); ok { token.Text = text + token.DimensionUnit() token.UnitOffset = uint16(len(text)) } } case css_lexer.TURL: token.ImportRecordIndex = uint32(len(p.importRecords)) p.importRecords = append(p.importRecords, ast.ImportRecord{ Kind: ast.ImportURL, Path: logger.Path{Text: token.Text}, Range: t.Range, IsUnused: !opts.allowImports, }) token.Text = "" case css_lexer.TFunction: var nested []css_ast.Token original := tokens nestedOpts := opts if token.Text == "var" { // CSS variables require verbatim whitespace for correctness nestedOpts.verbatimWhitespace = true } nested, tokens = p.convertTokensHelper(tokens, css_lexer.TCloseParen, nestedOpts) token.Children = &nested // Treat a URL function call with a string just like a URL token if token.Text == "url" && len(nested) == 1 && nested[0].Kind == css_lexer.TString { token.Kind = css_lexer.TURL token.Text = "" token.Children = nil token.ImportRecordIndex = uint32(len(p.importRecords)) p.importRecords = append(p.importRecords, ast.ImportRecord{ Kind: ast.ImportURL, Path: logger.Path{Text: nested[0].Text}, Range: original[0].Range, IsUnused: !opts.allowImports, }) } case css_lexer.TOpenParen: var nested []css_ast.Token nested, tokens = p.convertTokensHelper(tokens, css_lexer.TCloseParen, opts) token.Children = &nested case css_lexer.TOpenBrace: var nested []css_ast.Token nested, tokens = p.convertTokensHelper(tokens, css_lexer.TCloseBrace, opts) // Pretty-printing: insert leading and trailing whitespace when not minifying if !opts.verbatimWhitespace && !p.options.RemoveWhitespace && len(nested) > 0 { nested[0].Whitespace |= css_ast.WhitespaceBefore nested[len(nested)-1].Whitespace |= css_ast.WhitespaceAfter } token.Children = &nested case css_lexer.TOpenBracket: var nested []css_ast.Token nested, tokens = p.convertTokensHelper(tokens, css_lexer.TCloseBracket, opts) token.Children = &nested } result = append(result, token) } if !opts.verbatimWhitespace { for i := range result { token := &result[i] // Always remove leading and trailing whitespace if i == 0 { token.Whitespace &= ^css_ast.WhitespaceBefore } if i+1 == len(result) { token.Whitespace &= ^css_ast.WhitespaceAfter } switch token.Kind { case css_lexer.TComma: // Assume that whitespace can always be removed before a comma token.Whitespace &= ^css_ast.WhitespaceBefore if i > 0 { result[i-1].Whitespace &= ^css_ast.WhitespaceAfter } // Assume whitespace can always be added after a comma if p.options.RemoveWhitespace { token.Whitespace &= ^css_ast.WhitespaceAfter if i+1 < len(result) { result[i+1].Whitespace &= ^css_ast.WhitespaceBefore } } else { token.Whitespace |= css_ast.WhitespaceAfter if i+1 < len(result) { result[i+1].Whitespace |= css_ast.WhitespaceBefore } } } } } // Insert an explicit whitespace token if we're in verbatim mode and all // tokens were whitespace. In this case there is no token to attach the // whitespace before/after flags so this is the only way to represent this. // This is the only case where this function generates an explicit whitespace // token. It represents whitespace as flags in all other cases. if opts.verbatimWhitespace && len(result) == 0 && nextWhitespace == css_ast.WhitespaceBefore { result = append(result, css_ast.Token{ Kind: css_lexer.TWhitespace, }) } return result, tokens } func mangleNumber(t string) (string, bool) { original := t if dot := strings.IndexByte(t, '.'); dot != -1 { // Remove trailing zeros for len(t) > 0 && t[len(t)-1] == '0' { t = t[:len(t)-1] } // Remove the decimal point if it's unnecessary if dot+1 == len(t) { t = t[:dot] if t == "" || t == "+" || t == "-" { t += "0" } } else { // Remove a leading zero if len(t) >= 3 && t[0] == '0' && t[1] == '.' && t[2] >= '0' && t[2] <= '9' { t = t[1:] } else if len(t) >= 4 && (t[0] == '+' || t[0] == '-') && t[1] == '0' && t[2] == '.' && t[3] >= '0' && t[3] <= '9' { t = t[0:1] + t[2:] } } } return t, t != original } func (p *parser) parseSelectorRule() css_ast.R { preludeStart := p.index // Try parsing the prelude as a selector list if list, ok := p.parseSelectorList(); ok { rule := css_ast.RSelector{Selectors: list} if p.expect(css_lexer.TOpenBrace) { rule.Rules = p.parseListOfDeclarations() p.expect(css_lexer.TCloseBrace) return &rule } } // Otherwise, parse a generic qualified rule return p.parseQualifiedRuleFrom(preludeStart, true /* isAlreadyInvalid */) } func (p *parser) parseQualifiedRuleFrom(preludeStart int, isAlreadyInvalid bool) *css_ast.RQualified { loop: for { switch p.current().Kind { case css_lexer.TOpenBrace, css_lexer.TEndOfFile: break loop case css_lexer.TSemicolon: // Error recovery if the block is omitted (likely some CSS meta-syntax) if !isAlreadyInvalid { p.expect(css_lexer.TOpenBrace) } prelude := p.convertTokens(p.tokens[preludeStart:p.index]) p.advance() return &css_ast.RQualified{Prelude: prelude} default: p.parseComponentValue() } } rule := css_ast.RQualified{ Prelude: p.convertTokens(p.tokens[preludeStart:p.index]), } if p.eat(css_lexer.TOpenBrace) { rule.Rules = p.parseListOfDeclarations() p.expect(css_lexer.TCloseBrace) } else if !isAlreadyInvalid { p.expect(css_lexer.TOpenBrace) } return &rule } func (p *parser) parseDeclaration() css_ast.R { // Parse the key keyStart := p.index ok := false if p.expect(css_lexer.TIdent) { p.eat(css_lexer.TWhitespace) if p.expect(css_lexer.TColon) { ok = true } } // Parse the value valueStart := p.index stop: for { switch p.current().Kind { case css_lexer.TEndOfFile, css_lexer.TSemicolon, css_lexer.TCloseBrace: break stop case css_lexer.TOpenBrace: // Error recovery if there is an unexpected block (likely some CSS meta-syntax) p.parseComponentValue() p.eat(css_lexer.TWhitespace) if ok && !p.peek(css_lexer.TSemicolon) { p.expect(css_lexer.TSemicolon) } break stop default: p.parseComponentValue() } } // Stop now if this is not a valid declaration if !ok { return &css_ast.RBadDeclaration{ Tokens: p.convertTokens(p.tokens[keyStart:p.index]), } } keyToken := p.tokens[keyStart] keyText := keyToken.DecodedText(p.source.Contents) value := p.tokens[valueStart:p.index] verbatimWhitespace := strings.HasPrefix(keyText, "--") // Remove trailing "!important" important := false i := len(value) - 1 if i >= 0 && value[i].Kind == css_lexer.TWhitespace { i-- } if i >= 0 && value[i].Kind == css_lexer.TIdent && strings.EqualFold(value[i].DecodedText(p.source.Contents), "important") { i-- if i >= 0 && value[i].Kind == css_lexer.TWhitespace { i-- } if i >= 0 && value[i].Kind == css_lexer.TDelimExclamation { value = value[:i] important = true } } result, _ := p.convertTokensHelper(value, css_lexer.TEndOfFile, convertTokensOpts{ allowImports: true, // CSS variables require verbatim whitespace for correctness verbatimWhitespace: verbatimWhitespace, }) // Insert or remove whitespace before the first token if !verbatimWhitespace && len(result) > 0 { if p.options.RemoveWhitespace { result[0].Whitespace &= ^css_ast.WhitespaceBefore } else { result[0].Whitespace |= css_ast.WhitespaceBefore } } return &css_ast.RDeclaration{ Key: css_ast.KnownDeclarations[keyText], KeyText: keyText, KeyRange: keyToken.Range, Value: result, Important: important, } } func (p *parser) parseComponentValue() { switch p.current().Kind { case css_lexer.TFunction: p.parseBlock(css_lexer.TFunction, css_lexer.TCloseParen) case css_lexer.TOpenParen: p.parseBlock(css_lexer.TOpenParen, css_lexer.TCloseParen) case css_lexer.TOpenBrace: p.parseBlock(css_lexer.TOpenBrace, css_lexer.TCloseBrace) case css_lexer.TOpenBracket: p.parseBlock(css_lexer.TOpenBracket, css_lexer.TCloseBracket) case css_lexer.TEndOfFile: p.unexpected() default: p.advance() } } func (p *parser) parseBlock(open css_lexer.T, close css_lexer.T) { if p.expect(open) { for !p.eat(close) { if p.peek(css_lexer.TEndOfFile) { p.expect(close) return } p.parseComponentValue() } } }
package xyz.redtorch.desktop.layout.base; import javafx.collections.FXCollections; import javafx.collections.ObservableList; import javafx.collections.transformation.SortedList; import javafx.scene.Node; import javafx.scene.control.SelectionMode; import javafx.scene.control.TableColumn; import javafx.scene.control.TableRow; import javafx.scene.control.TableView; import javafx.scene.control.cell.PropertyValueFactory; import javafx.scene.input.MouseButton; import javafx.scene.layout.Pane; import javafx.scene.layout.Priority; import javafx.scene.layout.VBox; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import xyz.redtorch.desktop.layout.base.bean.TickFXBean; import xyz.redtorch.desktop.service.DesktopTradeCachesService; import xyz.redtorch.desktop.service.GuiMainService; import xyz.redtorch.pb.CoreField.ContractField; import xyz.redtorch.pb.CoreField.TickField; import java.util.*; @Component public class TickLayout { private static final Logger logger = LoggerFactory.getLogger(TickLayout.class); private final VBox vBox = new VBox(); private boolean layoutCreated = false; private final ObservableList<TickFXBean> tickObservableList = FXCollections.observableArrayList(); private List<TickField> tickList = new ArrayList<>(); private final TableView<TickFXBean> tickTableView = new TableView<>(); private Map<String, TickFXBean> tickFXBeanMap = new HashMap<>(); private Set<String> selectedTickUniformSymbolSet = new HashSet<>(); @Autowired private GuiMainService guiMainService; @Autowired private DesktopTradeCachesService desktopTradeCachesService; public Node getNode() { if (!layoutCreated) { createLayout(); layoutCreated = true; } return this.vBox; } public void updateData(List<TickField> tickList) { this.tickList = tickList; render(); } public void render() { tickTableView.getSelectionModel().clearSelection(); /// tickTableView.getSelectionModel().clearSelection(); Set<String> uniformSymbolSet = new HashSet<>(); List<TickFXBean> newTickFXBeanList = new ArrayList<>(); for (TickField tick : tickList) { String uniformSymbol = tick.getUniformSymbol(); uniformSymbolSet.add(uniformSymbol); ContractField contractField = desktopTradeCachesService.queryContractByUniformSymbol(uniformSymbol); if (tickFXBeanMap.containsKey(uniformSymbol)) { tickFXBeanMap.get(uniformSymbol).update(tick, guiMainService.isSelectedContract(contractField), contractField); } else { TickFXBean tickFXBean = new TickFXBean(tick, guiMainService.isSelectedContract(contractField), contractField); tickFXBeanMap.put(uniformSymbol, tickFXBean); newTickFXBeanList.add(tickFXBean); } } tickObservableList.addAll(newTickFXBeanList); Map<String, TickFXBean> newTickFXBeanMap = new HashMap<>(); for (String uniformSymbol : tickFXBeanMap.keySet()) { if (uniformSymbolSet.contains(uniformSymbol)) { newTickFXBeanMap.put(uniformSymbol, tickFXBeanMap.get(uniformSymbol)); } } tickFXBeanMap = newTickFXBeanMap; tickObservableList.removeIf(tickFXBean -> !uniformSymbolSet.contains(tickFXBean.getTickField().getUniformSymbol())); tickTableView.sort(); /// Set<String> newSelectedTickIdSet = new HashSet<>(); for (TickFXBean tick : tickObservableList) { if (selectedTickUniformSymbolSet.contains(tick.getTickField().getUniformSymbol())) { tickTableView.getSelectionModel().select(tick); newSelectedTickIdSet.add(tick.getTickField().getUniformSymbol()); } } selectedTickUniformSymbolSet = newSelectedTickIdSet; } private void createLayout() { tickTableView.setTableMenuButtonVisible(true); TableColumn<TickFXBean, Pane> contractCol = new TableColumn<>("合约"); contractCol.setPrefWidth(160); contractCol.setCellValueFactory(new PropertyValueFactory<>("contract")); contractCol.setComparator((Pane p1, Pane p2) -> { try { TickField tick1 = (TickField) p1.getUserData(); TickField tick2 = (TickField) p2.getUserData(); return StringUtils.compare(tick1.getUniformSymbol(), tick2.getUniformSymbol()); } catch (Exception e) { logger.error("排序错误", e); } return 0; }); tickTableView.getColumns().add(contractCol); TableColumn<TickFXBean, Pane> lastPriceCol = new TableColumn<>("最新价格"); lastPriceCol.setPrefWidth(120); lastPriceCol.setCellValueFactory(new PropertyValueFactory<>("lastPrice")); lastPriceCol.setComparator((Pane p1, Pane p2) -> { try { TickField tick1 = (TickField) p1.getUserData(); TickField tick2 = (TickField) p2.getUserData(); return Double.compare(tick1.getLastPrice(), tick2.getLastPrice()); } catch (Exception e) { logger.error("排序错误", e); } return 0; }); tickTableView.getColumns().add(lastPriceCol); TableColumn<TickFXBean, Pane> abPriceCol = new TableColumn<>("买卖一价"); abPriceCol.setPrefWidth(70); abPriceCol.setCellValueFactory(new PropertyValueFactory<>("abPrice")); abPriceCol.setSortable(false); tickTableView.getColumns().add(abPriceCol); TableColumn<TickFXBean, Pane> abVolumeCol = new TableColumn<>("买卖一量"); abVolumeCol.setPrefWidth(70); abVolumeCol.setCellValueFactory(new PropertyValueFactory<>("abVolume")); abVolumeCol.setSortable(false); tickTableView.getColumns().add(abVolumeCol); TableColumn<TickFXBean, Pane> volumeCol = new TableColumn<>("成交量"); volumeCol.setPrefWidth(70); volumeCol.setCellValueFactory(new PropertyValueFactory<>("volume")); volumeCol.setSortable(false); tickTableView.getColumns().add(volumeCol); TableColumn<TickFXBean, Pane> openInterestCol = new TableColumn<>("持仓量"); openInterestCol.setPrefWidth(70); openInterestCol.setCellValueFactory(new PropertyValueFactory<>("openInterest")); openInterestCol.setSortable(false); tickTableView.getColumns().add(openInterestCol); TableColumn<TickFXBean, Pane> limitPriceCol = new TableColumn<>("涨跌停"); limitPriceCol.setPrefWidth(70); limitPriceCol.setCellValueFactory(new PropertyValueFactory<>("limit")); limitPriceCol.setSortable(false); tickTableView.getColumns().add(limitPriceCol); TableColumn<TickFXBean, Pane> actionTimeCol = new TableColumn<>("时间"); actionTimeCol.setPrefWidth(90); actionTimeCol.setCellValueFactory(new PropertyValueFactory<>("actionTime")); actionTimeCol.setSortable(false); tickTableView.getColumns().add(actionTimeCol); SortedList<TickFXBean> sortedItems = new SortedList<>(tickObservableList); tickTableView.setItems(sortedItems); sortedItems.comparatorProperty().bind(tickTableView.comparatorProperty()); tickTableView.getSortOrder().add(contractCol); tickTableView.getSelectionModel().setSelectionMode(SelectionMode.MULTIPLE); tickTableView.setOnMousePressed(event -> { ObservableList<TickFXBean> selectedItems = tickTableView.getSelectionModel().getSelectedItems(); selectedTickUniformSymbolSet.clear(); for (TickFXBean row : selectedItems) { selectedTickUniformSymbolSet.add(row.getTickField().getUniformSymbol()); } }); tickTableView.setRowFactory(tv -> { TableRow<TickFXBean> row = new TableRow<>(); row.setOnMousePressed(event -> { if (!row.isEmpty() && event.getButton() == MouseButton.PRIMARY && event.getClickCount() == 1) { ObservableList<TickFXBean> selectedItems = tickTableView.getSelectionModel().getSelectedItems(); selectedTickUniformSymbolSet.clear(); for (TickFXBean tick : selectedItems) { selectedTickUniformSymbolSet.add(tick.getTickField().getUniformSymbol()); } TickFXBean clickedItem = row.getItem(); ContractField contract = desktopTradeCachesService.queryContractByUniformSymbol(clickedItem.getTickField().getUniformSymbol()); guiMainService.updateSelectedContract(contract); } }); return row; }); tickTableView.setFocusTraversable(false); vBox.getChildren().add(tickTableView); VBox.setVgrow(tickTableView, Priority.ALWAYS); vBox.setMinWidth(1); // HBox hBox = new HBox(); // vBox.getChildren().add(hBox); } }
// Emits a release based on the value's type category (address or object). static void emitCleanup(SILBuilder &builder, SILLocation loc, SILValue v) { if (v->getType().isAddress()) builder.createDestroyAddr(loc, v); else builder.createReleaseValue(loc, v, builder.getDefaultAtomicity()); }
export { default } from './ListOverflow';
<filename>packages/lit-analyzer/src/analyze/util/type/remove-undefined-from-type.ts import { isAssignableToSimpleTypeKind, SimpleType, SimpleTypeKind } from "ts-simple-type"; export function removeUndefinedFromType(type: SimpleType): SimpleType { switch (type.kind) { case SimpleTypeKind.ALIAS: return { ...type, target: removeUndefinedFromType(type.target) }; case SimpleTypeKind.UNION: return { ...type, types: type.types.filter(t => !isAssignableToSimpleTypeKind(t, SimpleTypeKind.UNDEFINED)) }; } return type; }
def run_id(self, desc=""): cur = self.conn.cursor() cur.execute('insert into test_run (run_desc) values (?)', (desc,)) self.conn.commit() cur.execute('select max(id) from test_run') return cur.fetchone()[0]
Loader power-split transmission system based on a planetary gear set In hydraulic mechanical transmission loaders, a hydraulic torque converter can prevent an engine from stalling due to overloading of the loader during the spading process; however, the hydraulic torque converter also reduces the loader’s fuel economy because of its low transmission efficiency. To address this issue, the study designs an output-power-split transmission system that is applied to a hybrid loader. The designed transmission system removes the hydraulic torque converter in the power transmission system of a traditional loader and adopts a planetary gear set with a compact structure as the dynamic coupling element, thus allowing the output power of the loader to be split transmitted. During shoveling, the loader power-split transmission system based on a planetary gear set can prevent the motor from plugging and over-burning under conditions that ensure that the traction does not decrease. In addition, the transmission efficiency and loader fuel economy are higher in the proposed transmission system than in the power transmission system of a traditional loader. The test results show that the transmission efficiency of the designed system was 13.2% higher than that of the traditional hydraulic mechanical transmission loader.
def musicThumbnailing(x, fs, short_term_size=1.0, short_term_step=0.5, thumb_size=10.0, limit_1 = 0, limit_2 = 1): x = audioBasicIO.stereo2mono(x); st_feats, _ = aF.stFeatureExtraction(x, fs, fs * short_term_size, fs * short_term_step) S = selfSimilarityMatrix(st_feats) M = int(round(thumb_size / short_term_step)) B = numpy.eye(M,M) S = scipy.signal.convolve2d(S, B, 'valid') min_sm = numpy.min(S) for i in range(S.shape[0]): for j in range(S.shape[1]): if abs(i-j) < 5.0 / short_term_step or i > j: S[i,j] = min_sm; S[0:int(limit_1 * S.shape[0]), :] = min_sm S[:, 0:int(limit_1 * S.shape[0])] = min_sm S[int(limit_2 * S.shape[0])::, :] = min_sm S[:, int(limit_2 * S.shape[0])::] = min_sm maxVal = numpy.max(S) [I, J] = numpy.unravel_index(S.argmax(), S.shape) i1 = I i2 = I j1 = J j2 = J while i2-i1<M: if i1 <=0 or j1<=0 or i2 >= S.shape[0]-2 or j2 >= S.shape[1]-2: break if S[i1-1, j1-1] > S[i2 + 1, j2 + 1]: i1 -= 1 j1 -= 1 else: i2 += 1 j2 += 1 return short_term_step * i1, short_term_step * i2, \ short_term_step * j1, short_term_step * j2, S
_Who will invent America's next great century?_ The big ideas that will revolutionize the way we live will not emerge from our nation's capital. They will be dreamt up, as they always have been, by enterprising Americans who hope to create positive value for others. Encounter Intelligence is dedicated to promoting advances in innovation, education, and technology that will improve the lives of all Americans and unlock real opportunity for those who need it most. # **Table of Contents** Cover The Change Too Fast Too Incomplete Too Emotional Too Untrustworthy The Argument for Regulation Types of Social Media Regulation Other Approaches Building Immunity Mental Nutrition Defensive Memes Acquired Resistance The Power of Doing Nothing Conclusion Copyright The information world is undergoing a transformation, and it's huge. How huge? Huge enough that its impact is comparable to the introduction of agriculture, with similarly far-reaching effects on human civilization. And like the introduction of agriculture, though its impact may be positive overall, there will be significant costs along with the benefits, and neither the costs nor the benefits will be borne evenly. In this short book, I will look at the change in communications technology that has taken place over the past couple of centuries, and particularly over the past couple of decades. I will explore both the benefits and the downsides of these changes, and look at what's likely to come next. I will also look at efforts to ameliorate the downsides through regulation of online speech and other approaches. I will conclude with some suggestions of my own. ## The Change Society seems to be growing steadily crazier. And maybe it doesn't just seem to be. Maybe it actually _is_ growing crazier. Science-fiction writer Robert Heinlein's 1930s future history dubbed the early 21st century "the Crazy Years," a time when rapid technological and social change would leave people psychologically unmoored and, well, crazy. Today's society seems to be living up to that prediction. But why? I recently read James C. Scott's _Against the Grain: A Deep History of the Earliest States,_ and one of the interesting aspects of the earliest agricultural civilizations is how fragile they were. A bunch of people and their animals would crowd together in a newly formed city, and diseases that weren't much of a threat when everybody was out hunting and gathering over large areas would suddenly spread like wildfire and depopulate the town almost overnight. As Scott writes, an early city was more like a (badly run) refugee resettlement camp than a modern urban area, with people thrown together higgledy-piggledy with no real efforts at sanitation or amenities. He observes that "the pioneers who created this historically novel ecology could not possibly have known the disease vectors they were inadvertently unleashing." Then I ran across this observation on Twitter: "The Internet is rewiring brains and social relations. Could it be producing a civilizational nervous breakdown?" And I saw another article noting that depression in teens skyrocketed between 2010 and 2015, as smartphones took over. It made me wonder if we're in the same boat as the Neolithic cities, only for what you might call viruses of the mind: toxic ideas and emotions that spread like wildfire. # **Maybe we don't know the mental-disease vectors that we're inadvertently unleashing, just as those early civilizations didn't understand the physical-disease vectors they were promoting. Looking around at today's society, that certainly seems plausible.** Hunters and gatherers were at far less risk for infectious disease because they didn't encounter very many new people very often. Their exposure was low, and contact among such bands was sporadic enough that diseases couldn't spread very fast. Their environment and lifestyle were such that both diseases and ideas spread slowly. It wasn't until you crowded thousands or tens of thousands of them, along with their animals, into small dense areas with poor sanitation that disease outbreaks took off. Instead of meeting dozens of new people per year, an urban dweller probably encountered hundreds per day. Diseases that would have affected only a few people at a time as they spread slowly across a continent (or just burned out for lack of new carriers) would now, in these congested urban centers, leap from person to person in a flash. It's no surprise that the earliest cities often depopulated themselves via epidemics. Likewise, in recent years we've gone from an era when ideas spread comparatively slowly to one in which social media, in particular, allow them to spread like wildfire. A few hundred years ago, ideas spread mainly by word of mouth, or by books, which had to travel physically. Later they spread via newspapers. Now they spread at the speed of light, and are shared almost as quickly, at the click of a mouse. Sometimes that's good, when they're good ideas. But most ideas are probably bad. Maybe we don't know the mental-disease vectors that we're inadvertently unleashing, just as those early civilizations didn't understand the physical-disease vectors they were promoting. Looking around at today's society, that certainly seems plausible. It took three things to help control the spread of disease in cities: sanitation, acclimation, and better nutrition. In early cities, after all, people had no idea how diseases spread, something we didn't fully understand until the late 19th century. But rule-of-thumb sanitation made things a lot better over time. Also, populations eventually adapted: Diseases became endemic, not epidemic, and usually less severe as people developed immunity. And finally, as Scott notes, surviving disease was always a function of nutrition, with better-nourished populations doing much better than malnourished ones. Right now, it almost seems as if the social media world was designed to spread viruses of the mind. And that's probably because it was. While in the earlier days of the Internet ideas spread faster than before, today in the walled gardens of social media outlets like Facebook, Instagram, or especially Twitter, ideas spread much, much faster, and with less time for rumination or consideration, than ever before. And that's by design, as social media companies use algorithms that promote posts based on "engagement" – which typically means users' emotional reactions – and "share" buttons allow each user to pass them on to hundreds or thousands of friends, who can then do the same. This repeated sharing and resharing can produce a chain reaction reminiscent of a nuclear reactor with the control rods removed. As Jaron Lanier writes, "Engagement is not meant to serve any particular purpose other than its own enhancement, and yet the result is an unnatural global amplification of the 'easy' emotions, which happen to be the negative ones.... Remember, with old-fashioned advertising you could measure whether a product did better after an ad was run, but now companies are measuring whether individuals changed their behaviors, and the feeds for each person are constantly tweaked to get individual behavior to change.... The scheme I am describing amplifies negative emotions more than positive ones, so it's more efficient at harming society than at improving it." In the newspaper age, a full day passed from one publication to the next, and newspapers had to physically travel from the printing plant to the readers. It was possible to share newspaper articles, but only to a relatively small number of friends. Even with the advent of television and radio, there was usually a similar time, days or at least hours, between programs, and sharing was basically impossible. Ideas spread quickly, by historical standards, but not with anything like the speed of today's social media. This increase in speed, along with some other characteristics of social media, has led to calls for stricter speech regulation. In the pages that follow, I will outline some arguments for such regulation, suggest some counterarguments, and then offer some suggested responses of my own. In short, criticisms of social media as speech fall into several categories: it is too fast, it is too incomplete, it is too emotional, and it is too untrustworthy. All of these claims have some merit. ## Too Fast For most of human history, ideas spread slowly. They could travel no faster than the human being sharing them, and the only way they could be transferred was word of mouth. Some ideas still did well: Homer's work, for example, spread in exactly this fashion. But it was slow at best, and Homer is perhaps the exception that proves the rule, as comparatively little thinking from the preliterate era survives. The invention of writing, and later of printing, meant that ideas could spread much more widely. Their speed was still limited by human speeds of travel (though such speed was improving a bit) but circulation no longer required word-of-mouth contact. A book, handbill, or newspaper could reach multiple readers, and once movable type arrived post-Gutenberg it was easy to produce them in comparatively large numbers. # **Right now, it almost seems as if the social media world was designed to spread viruses of the mind. And that's probably because it was.** Thomas Paine's pamphlet _Common Sense_ was the best-selling work of the Colonial era; it sold 100,000 copies in 1776, the year it was published. Given that the population of the United States was 2.5 million at the time, that's quite impressive. If we assume, plausibly, given that copies were distributed and read aloud at taverns and other public places, that each copy was perused by five people, that would mean that 20 percent of the American population – and a much higher percentage of the adult population, of course – was exposed to Paine's work in its first year. This is huge, but, of course, it was also an exceptional case. The spread of information got faster throughout the 19th century, as railroads increased the speed of travel dramatically, and the telegraph meant that, for the first time, information could travel long distances much, much faster than a human could. Multiple editions of newspapers in big cities meant that an idea published in a morning edition could, at least in principle, be responded to or repeated in an afternoon paper. And with the invention of radio and television, ideas could be broadcast to many people – not simply, as with the telegraph, from point to point – at the speed of light. This represented a major increase in speed of transmission, but the speed of interaction was still comparatively slow. Television or radio news broadcasts were scheduled things, not really interactive with one another, and, most importantly, they were _broadcasts,_ information spread outward from a single point. They were important new technologies, for good or for evil (Hitler's mastery of radio was a major part of his success) but they still gave people time to ruminate and think about what they broadcast. And these broadcasts weren't prone to near-instantaneous chain reactions. Even the Internet made less of a difference, at first, than it might have. Originally, Internet news was just newspaper content ported over to a website. "Newsgroups" on Usenet were probably the first truly interactive medium, in which any individual could post items and everyone else could respond instantaneously. Usenet quickly gained a reputation for "flame wars" and the promulgation of conspiracy theories, which in retrospect probably should have been a warning. Even the "blogosphere" of the early 21st century, in which independently run blog sites posted items on news and responded both to Big Media stories and to each other, was more like traditional media in some respects than like Usenet or social media. To read content on blogs, readers had to go there. To interact, bloggers had to read each other's sites and decide to post a response, generally with a link back to the post they were replying to. If you didn't like a blog you could just ignore it. A story that spread like wildfire through the blogosphere still did so over the better part of a day, not over minutes, and it was typically pretty easy to find the original item and get context, something the culture of blogging encouraged. (As James Lileks wrote, "The link changes everything. When someone derides or exalts a piece, the link lets you examine the thing itself, without interference." And bloggers often encouraged their readers to follow the link and – to use a popular blogospheric phrase – "read the whole thing.") In addition, a story's spreading required at least a modicum of actual thought and consideration on the part of bloggers, who were also constrained, to a greater or lesser degree, by considerations of reputation. Some blogs served as trusted nodes on the blogosphere, and many other bloggers would be reluctant to run with a story that the trusted nodes didn't believe. In engineering parlance, the early blogosphere was a "loosely coupled" system, one where changes in one part were not immediately or directly transmitted to others. Loosely coupled systems tend to be resilient, and not very subject to systemic failures, because what happens in one part of the system affects other parts only weakly and slowly. Tightly coupled systems, on the other hand, where changes affecting one node swiftly affect others, are prone to cascading failures. Usenet was one such system, where an entire newsgroup could be ruined by a spreading "flame war." If a blogger flamed, people could just ignore the blog; when a Usenet user flamed, others got sucked in until the channel was filled with people yelling at each other. (As Nick Denton wrote, the blogosphere "routes around idiots" in a way that Usenet didn't, because the blogosphere doesn't depend on the common channel that a Usenet group did.) Social media – especially Twitter – is more like Usenet than blogs, but in many ways is worse. Like Usenet, it is tightly coupled. The "retweet," "comment," and "like" buttons are immediate. A retweet sends a posting, no matter how angry or misinformed, to all the retweeter's followers, who can then do the same to their followers, and so on, in a runaway chain reaction. Unlike blogs, little to no thought is required, and in practice very few people even follow the link (if there is one) to "read the whole thing." According to a study by computer scientists at Columbia University and the French National Institute, 59 percent of people who share a link on social media don't read the underlying story. (I'm honestly surprised the number isn't higher.) As Caitlin Dewey reported in _The Washington Post_: _Worse, the study finds that these sort of blind peer-to-peer shares are really important in determining what news gets circulated and what just fades off the public radar. So your thoughtless retweets, and those of your friends, are actually shaping our shared political and cultural agendas._ _"People are more willing to share an article than read it," study co-author Arnaud Legout said in a statement. "This is typical of modern information consumption. People form an opinion based on a summary, or a summary of summaries, without making the effort to go deeper."_ To verify that depressing piece of conventional Internet wisdom, Legout and his co-authors collected two data sets: the first, on all tweets containing Bit.ly-shortened links to five major news sources during a one-month period last summer; the second, on all of the clicks attached to that set of shortened links, as logged by Bit.ly, during the same period. After cleaning and collating that data, the researchers basically found themselves with a map to how news goes viral on Twitter. And that map showed, pretty clearly, that "viral" news is widely shared — but not necessarily, you know, read. Commenting on this study in _Forbes,_ Jayson DeMers writes: _The circulation of headlines in this way leads to an echo chamber effect. Users are more likely to share headlines that adhere to their pre-existing conceptions, rather than challenging them, and as a result, publishers try to post more headlines along those lines. Social groups regurgitate the same types of posts and content over and over again, leading to a kind of information stagnation. This is one of the most powerful negative repercussions of the blind sharing effect._ _This trend also makes it easier for journalists and content publishers to manipulate their audiences—whether they intend to or not. In a headline, one small word change can make a big difference, and even if you report all the real facts in the body of your article, the way you shape a headline can completely transform how users interpret your presentation of information. This is a dangerous and powerful tool._ That's exactly right. Social media makes people less informed but more partisan. The "block" and "mute" functions on Twitter, and similar tools on Facebook, etc., are intended to protect against Usenet-style flame wars, but to the extent that they work, they also put people in bubbles of similar thinkers, which tends to encourage the spread of misinformation so long as it matches the thinkers' prejudices. This problem is particularly severe as research indicates that a majority of people get their news from social media. According to a 2017 Pew study, 67 percent of Americans get at least some of their news from social media, and even among older Americans (those over 50) more than half rely on social media for a significant part of their news diet. # **Social media makes people less informed but more partisan.** Twitter in particular seems prone to waves of viral misinformation or hatred, which spread much faster, and with far less critical thinking, than anything we saw in the blogosphere. The result has been a series of pile-ons involving people whose alleged misbehavior (which often turns out to be fictional, or misrepresented) winds up costing them their jobs. The archetypical example involved PR executive Justine Sacco, who tweeted to her 170 followers as she got ready to board a flight to South Africa: "Going to Africa. Hope I don't get AIDS. Just kidding. I'm white!" She then got on the plane, turned off her phone, and while airborne and out of touch became the number one topic on Twitter, under the hashtag #HasJustineLandedYet. Although she meant it as a joke about Westerners' paranoia, it produced a shame mob – with help from Gawker Media's Sam Biddle, who admitted he promoted the shaming in search of traffic – that cost her her job. Such shame mobs have become more common, typically involving nonpublic figures caught out by a thoughtless or out-of-context statement, as the characteristics of Twitter seem ideally designed for bringing together an angry mob on short notice. Describing the phenomenon in _The New York Times,_ once-enthusiastic shamer Jon Ronson had second thoughts: _[At first,] it felt as if hierarchies were being dismantled, as if justice were being democratized. As time passed, though, I watched these shame campaigns multiply, to the point that they targeted not just powerful institutions and public figures but really anyone perceived to have done something offensive. I also began to marvel at the disconnect between the severity of the crime and the gleeful savagery of the punishment. It almost felt as if shamings were now happening for their own sake, as if they were following a script._ _Eventually I started to wonder about the recipients of our shamings, the real humans who were the virtual targets of these campaigns. So for the past two years, I've been interviewing individuals like Justine Sacco: everyday people pilloried brutally, most often for posting some poorly considered joke on social media. Whenever possible, I have met them in person, to truly grasp the emotional toll at the other end of our screens. The people I met were mostly unemployed, fired for their transgressions, and they seemed broken somehow — deeply confused and traumatized._ And it gets worse: _I met a man who, in early 2013, had been sitting at a conference for tech developers in Santa Clara, Calif., when a stupid joke popped into his head. It was about the attachments for computers and mobile devices that are commonly called dongles. He murmured the joke to his friend sitting next to him, he told me. "It was so bad, I don't remember the exact words," he said. "Something about a fictitious piece of hardware that has a really big dongle, a ridiculous dongle.... It wasn't even conversation-level volume."_ _Moments later, he half-noticed when a woman one row in front of them stood up, turned around and took a photograph. He thought she was taking a crowd shot, so he looked straight ahead, trying to avoid ruining her picture. It's a little painful to look at the photograph now, knowing what was coming._ _The woman had, in fact, overheard the joke. She considered it to be emblematic of the gender imbalance that plagues the tech industry and the toxic, male-dominated corporate culture that arises from it. She tweeted the picture to her 9,209 followers with the caption: "Not cool. Jokes about... 'big' dongles right behind me." Ten minutes later, he and his friend were taken into a quiet room at the conference and asked to explain themselves. A day later, his boss called him into his office, and he was fired._ _"I packed up all my stuff in a box," he told me. (Like Stone and Sacco, he had never before talked on the record about what happened to him. He spoke on the condition of anonymity to avoid further damaging his career.) "I went outside to call my wife. I'm not one to shed tears, but" — he paused — "when I got in the car with my wife I just.... I've got three kids. Getting fired was terrifying."_ _The woman who took the photograph, Adria Richards, soon felt the wrath of the crowd herself. The man responsible for the dongle joke had posted about losing his job on Hacker News, an online forum popular with developers. This led to a backlash from the other end of the political spectrum. So-called men's rights activists and anonymous trolls bombarded Richards with death threats on Twitter and Facebook. Someone tweeted Richards's home address along with a photograph of a beheaded woman with duct tape over her mouth. Fearing for her life, she left her home, sleeping on friends' couches for the remainder of the year._ When people organize spontaneously, it can be a good thing – look at the GoFundMe campaigns for people who are sick or injured, or disaster responses like the Cajun Navy. But this sort of spontaneous organization seems more like casual cruelty. It's almost like a high-tech version of Shirley Jackson's horror story _The Lottery,_ in which a village randomly chooses one of its members to be stoned to death. The rapid-response character of social media lends itself to this sort of thing, which is why it is becoming more common. Meanwhile, in other parts of the world, social media is used not only to persecute individuals, but whole groups, by promoting religious and racial persecution. Twitter CEO Jack Dorsey recently came under fire for ignoring the role of social media (something for which Facebook apologized) in the persecution of Burma's Rohingya people. Sometimes too-swift reactions are a reason to regulate speech. Traditionally, "incitement" is a category of speech that gets less (or no) First Amendment protection. The classic example of incitement is when a mob, already riled up and emotional, is urged by a speaker to do something violent or illegal right away. This "imminence" is an essential part of incitement, because it means that there won't be time for measured reflection, or even time for someone to try to talk people out of acting. The too-fast characteristics of speech under social media like Twitter would seem to be similar. I'm not suggesting that tweets and other social media speech should be taken out of First Amendment protection because they are incitement – the full test for incitement, from _Brandenburg v. Ohio,_ requires that the speech be intended to produce, and be likely to produce, imminent lawless action. In practice, it's pretty hard for a tweet to promote imminent lawless action since the speaker and the listener are in different physical locations, and listeners aren't likely to do much of anything concrete and immediate in response to a tweet, since they'd have to look up from their phones first. But the notion that speech of the sort that by its nature encourages people to respond without thinking is more problematic than speech that does not seems to me to carry over. ## Too Incomplete Traditional news stories leave a lot out. To boil down events into 800 or 1,000 words – or, increasingly, 500 words – requires omitting a lot of context, history, and background. But when a news story is presented on Facebook or (especially) Twitter, often only the headline is read, along with, perhaps, a one-sentence summary, as we've seen. Relatively few people click through to read the actual story, which often contains far more information and nuance than the headline, which is usually deliberately provocative. # **In fact, the corruption of the political/intellectual class by social media is particularly serious, since their descent into thoughtless polarization can then spread to the rest of the population, even that large part that doesn't use social media itself, through traditional channels.** In his insightful essay _In The Beginning was the Command Line,_ Neal Stephenson wrote that, "The ability to think rationally is pretty rare, even in prestigious universities. We're in the TV age now, and people think by linking pictures in their brains." That was 1999. To some degree it's still true, but the pictures have been replaced by catchphrases and hashtags. The short-form character of social media tends to reinforce this. Even before the rise of social media, critics were worrying that the Internet was shortening people's attention spans. In his work _The Shallows: What the Internet Is Doing to Our Brains,_ Nicholas Carr wrote that prior centuries' technological change had worked to encourage people to think longer and harder than in the past. The rise of printed books, which could present arguments and marshal facts on a scale previously unknown, actually changed how people thought and argued. "The arguments in books became longer and clearer, as well as more complex and more challenging, as writers strived self-consciously to refine their ideas and their logic.... The advances in book technology changed the personal experience of reading and writing. They also had social consequences." The widespread process of deep, attentive reading, Carr writes, changed people and changed society. "The literary mind," he writes, "once confined to the cloisters of the monastery and the towers of the university had become the general mind.... The words in books didn't just strengthen people's ability to think abstractly, they enriched people's experience of the physical world, the world outside the books." All that has changed, he writes, in the Internet age, a claim that seems surely to be stronger today than a decade ago. "Dozens of studies by psychologists, neurobiologists, educators and Web designers point to the same conclusion: When we go online, we enter an environment that promotes cursory reading, hurried and distracted thinking, and superficial learning. It's possible to think deeply while surfing the Net, just as it's possible to think shallowly while reading a book, but that's not the type of thinking the technology encourages and rewards." Neuroscientist Maryanne Wolf of UCLA wondered just how social media was affecting us, so she experimented on herself, and she didn't like what she found. As Robert Fulford reports: _She set aside enough time to re-read a book she had loved as a young woman, Hermann Hesse's_ Magister Ludi. _Alas, she soon discovered that Hesse no longer pleased her. "I hated the book. I hated the whole so-called experiment." The narrative proved painfully slow._ _She had changed in ways she would never have predicted. "I now read on the surface and very quickly; in fact, I read too fast to comprehend deeper levels, which forced me constantly to go back and re-read the same sentence over and over with increasing frustration." She had lost the cognitive patience that once sustained her in reading such narratives._ _She still buys books, "but more and more I read in them rather than being whisked away by them." At some point she had "begun to read more to be informed than to be immersed, much less to be transported."_ _She's not alone. As a writer in the_ Guardian _recently pointed out, "Skim reading is the new normal." Just about everyone does it, perhaps not aware that in a few years it becomes habitual. The reader who skims can lose the ability to grasp another person's feelings or perceive beauty._ I've had similar experiences myself. A few years ago, I noticed that I really enjoyed reading on airplanes and wondered why. After a bit of reflection, I realized that it was because I wasn't distracted by the temptation to check a device every now and then, allowing reading to be the kind of immersive experience I once took for granted. Now I make a point of semi-disconnecting every night, sitting down with a novel and a glass of wine, with my computer and phone out of reach. I try to do the same thing when I'm reading for work instead of pleasure, setting my devices aside so that I can read deeply and really think about things but it's always a struggle. And I don't think that I'm alone. I'm not suggesting something as simplistic as books good, Internet bad. There's nothing inherently good about books as such – _Das Kapital_ and _Mein Kampf_ are both books with murderous consequences, and books that obviously did nothing to improve their readers' critical-thinking abilities. But the capacity for deep reading and deep thinking is a valuable one, and one that is being tossed aside for no particular reason. As Fulford notes, "Universities report that students now avoid signing on for classes in 19th-century literature. They realize they can no longer work through Dickens or George Eliot." In his classic _The System of Freedom of Expression,_ Yale First Amendment scholar Thomas Emerson wrote: _Freedom of expression is an essential process for examining knowledge and discovering truth. An individual who seeks knowledge and truth must hear all sides of the question, consider all alternatives, test his judgment by exposing it to opposition, and make full use of different minds. Discussion must be kept open no matter how certainly true an accepted opinion may seem to be; many of the most widely acknowledged truths have turned out to be erroneous...._ _Freedom of expression is essential to provide for participation in decision making by all members of society. This is particularly significant for political decisions. Once one accepts the premise of the Declaration of Independence—that governments "derive their just powers from the consent of the governed"—it follows that the governed must, in order to exercise their right of consent, have full freedom of expression both in forming individual judgments and in forming the common judgment._ # **Social media is addictive by design. The companies involved put enormous amounts of thought and effort into making it that way, so that people will be glued to their screens.** The kind of deep, wide-ranging, multipolar community debate that Emerson envisioned as key to our system of freedom of expression is at odds with the surface-skimming, tribal, catchphrase-based nature of social media. In a recent piece, Daniel Hannan suggests that our politics are becoming more vicious because we're becoming stupider – as a result of social media shrinking our attention spans: _The fall in IQ scores in the West is perhaps the most under-reported story of our era. For most of the twentieth century, IQ rose by around three points per decade globally, probably because of better nutrition. But that trend has recently gone into reverse in developed countries._ _You hadn't heard? I'm not surprised. Journalists and politicians won't go near the subject and you can see why. Consider the theories offered by neuroscientists for the decline. Some argued it had to do with the rising age of motherhood, because the children of older mothers tend to have lower IQs, other things being equal. No one likes to say this, because it can come across as "older moms have dumb kids," which is not true. (My wife and I were 44 when our youngest child was born, and my own parents were also elderly, but that didn't make me too thick to grasp the concept of statistical distributions.)_ _Other theories were even more explosive. For example, that unintelligent people were having more kids, or that the fall in average scores reflected immigration from places with lower IQs._ _But a new study from Norway, which examines IQ scores from 730,000 men (standardized tests are part of military service there) disproves all these ideas, because it shows IQ dropping within the same families. Men born in 1991 score, on average, five points lower than men born in 1975. There must, in other words, be an environmental explanation, and the chronology throws up a clear suspect: the rise in screen-time._ _Kids brought up with Facebook and Instagram are more politically bigoted, not because they don't hear alternative opinions, but because they don't learn the concentration necessary to listen to opponents — a difficult and unnatural skill._ It's unfortunate that social media not only makes such debate more difficult on its platforms, but also, it seems, rewires people's brains in such a fashion as to make such debate more difficult everywhere else. It is made worse by the fact that Twitter in particular seems to be most heavily used by the very people – pundits, political journalists, the intelligentsia – most vital to the sort of debate that Emerson saw as essential. In fact, the corruption of the political/intellectual class by social media is particularly serious, since their descent into thoughtless polarization can then spread to the rest of the population, even that large part that doesn't use social media itself, through traditional channels. Writing on why Twitter is worse than it seems, David French observes that even though its user base is smaller than most other social media, those users are particularly influential: _But in public influence it punches far above its weight. Why? Because it's where cultural kingmakers congregate, and thus where conventional wisdom is formed and shaped — often instantly and thoughtlessly._ _In other words, Twitter is where the people who care the most spend their time. The disproportionate influence of microbursts of instant public comments from a curated set of people these influencers follow shapes their writing and thinking and conduct way beyond the platform._ _It's tempting, when reading a news feed full of rage and hysteria, to console yourself in the knowledge that it's "just Twitter." But behind those angry, hyperbolic tweets (well, the blue-check-marked ones, anyway) are people, and those people are disproportionately the most engaged and most influential men and women in American public life. It's "just" the American political class putting its rage and intemperance on display, hoping to remake the world in its own irate image. And the surprising success of that attempted makeover should scare you, whatever your own political views are._ Twitter is also the most stripped-down of the social media platforms, and thus the most illustrative of social media's basic flaws. Just as sad people repetitively pulling the levers on gas-station slot machines illustrate the essence of gambling without the distracting glamour of casinos and racetracks, so Twitter, without a focus on "friends" or photos, or other sidelines, displays raw online human political nature at its worst. This makes it easy for people to get worse. You can reject Twitter's toxicity by leaving the platform, as I did in the fall of 2018. But French is right that this doesn't really solve the problem: "Absent large-scale collective action by the political/media class to reject the platform, simply logging off Twitter is merely a _personal_ defensive mechanism — a sometimes necessary mental-health break that all too often correlates with diminished influence in the national political debate." With Twitter, you can participate and be driven crazy – or you can stay sane, and lose influence. That's a bad trade-off. ## Too Emotional In _The Shallows,_ Nicholas Carr writes: _One thing is very clear: If, knowing what we know today about the brain's plasticity, you were to set out to invent a medium that would rewire our mental circuits as quickly and thoroughly as possible, you would probably end up designing something that looks and works a lot like the Internet. It's not just that we tend to use the Net regularly, even obsessively. It's that the Net delivers precisely the kind of sensory and cognitive stimuli – repetitive, intensive, interactive, addictive – that have been shown to result in strong and rapid alterations in brain circuits and functions. With the exception of alphabets and number systems, the Net may be the single most powerful mind-altering technology that has ever come into general use._ _The Net also provides a high-speed system for delivering responses and rewards—"positive reinforcements" in psychological terms—which encourage the repetition of both physical and mental actions. When we click a link, we get something new to look at and evaluate. When we Google a keyword, we receive, in the blink of an eye, a list of interesting information to appraise. When we send a text or an instant message or an e-mail, we often get a reply in a matter of seconds or minutes. When we use Facebook, we attract new friends or form closer bonds with old ones. When we send a tweet through Twitter, we gain new followers.... The Net's interactivity gives us powerful new tools for finding information, expressing ourselves, and conversing with others. It also turns us into lab rats constantly pressing levers to get tiny pellets of social or intellectual nourishment. The Net commands our attention with far greater insistency than our television or radio or morning newspapers ever did._ Social media is addictive by design. The companies involved put enormous amounts of thought and effort into making it that way, so that people will be glued to their screens. As much as they're selling anything, they're selling the "dopamine hit" that people experience when they get a "like" or a "share" or some other response to their action. We've reached the point where there are not merely articles in places like _Psychology Today_ and _The Washington Post_ on dealing with "social media addiction," but even scholarly papers in medical journals with titles like "The relationship between addictive use of social media and video games and symptoms of psychiatric disorders: A large scale cross-sectional study." As mentioned earlier, one of the consulting companies in the business of making applications addictive is even named Dopamine Labs, making no bones about what's going on. As Adam Alter writes in his book _Irresistible: The Rise of Addictive Technology and the Business of Keeping Us Hooked,_ addictive technology is big money. And the consequences are severe: _Kids aren't born craving tech, but they come to see it as indispensable. By the time they enter middle school, their social lives migrate from the real world to the digital world. All day, every day, they share hundreds of millions of photos on Instagram and billions of text messages. They don't have the option of taking a break, because this is where they come for validation and friendship._ _Online interactions aren't just different from real-world interactions; they're measurably worse. Humans learn empathy and understanding by watching how their actions affect other people. Empathy can't flourish without immediate feedback, and it's a very slow-developing skill. One analysis of seventy-two studies found that empathy has declined among college students between 1979 and 2009. They're less likely to take the perspective of other people, and show less concern for others. The problem is bad among boys, but it's worse among girls...._ # **People are more likely to believe misinformation on social media because they tend to only read headlines that mesh with their preconceived ideas, and they tend to get and share those headlines from friends, family, or people they see as ideological allies.** _Many teens refuse to communicate on the phone or face-to-face, and they conduct their fights by text. "It's too awkward in person," one girl told Steiner-Adair. "I was just in a fight with someone and I was texting them and I asked, 'Can I call you, or can we video chat?' and they were like 'No.'"... That's obviously a terrible way to learn to communicate, because it discourages directness._ It's also a terrible way to learn empathy, as the emotional response to one's behavior, normally displayed in things like facial expressions, body language, and tone of voice, is reduced to text and emojis. Perhaps this is one of the reasons for the shame mobs: To the mobs, their targets don't really seem human. But while the shame mobs throw their stones in a sort of play, their victims' lives and careers are ruined in earnest. Essentially, the addictive aspects of social media (and other online apps) are intended to bypass the conscious thought process and go directly to the limbic system, after the fashion of many drugs and intoxicants, or, for that matter, of gambling. That's not new, of course – people in biblical times drank wine and gambled, and it wasn't new then – but the methodology is. The fact that this stimulation is being achieved via technology, rather than by old-fashioned methods, makes it more reminiscent of the "direct limbic stimulation" advertisements that inflicted an unquenchable thirst for "Mokie-Coke" in Frederick Pohl's dystopian novel of advertising excess, _The Merchant Wars._ It is a case of "behavioral addiction," rather than chemical dependence. (As Sherry Turkle writes, "The machine's prowess was exploiting our vulnerabilities: we wanted to feel recognized, and we didn't want to feel alone.") Nor is this addiction limited to young people. In fact, as a recent article in _Wired_ by Clive Thompson reported, the evidence is that older people – the middle-aged Generation Xers in particular – are the most hooked: _The data suggests that the ones most hooked on their devices are thosegraying Gen Xers. Research by Nielsen, for example, found that Americans aged 35 to 49 used social media 40 minutes more each week than those aged 18 to 34. Gen Xers were also more likely than millennials to pull their phones out at the dinner table. (Baby boomers were even worse!) The middle-aged spend more time than millennials on every type of device—phone, computer, tablet—and, while they don't peek at their phones while driving more than young people, they do it more than they should._ Historically, of course, we've regulated addictive products, whether drugs, liquor, tobacco, or gambling, more strictly than other products precisely because addiction, by its nature, overrides people's reason and works directly on desire. Social media companies may come to be regarded more like tobacco companies than like media companies, with consonant calls for stricter regulation of their products, or at least the addictive aspects thereof. Likewise, one argument for regulating porn or violence in entertainment is that those forms go straight to the limbic system, arousing emotions without any intervening thought or argument. The comparison to Twitter seems pretty obvious. ## Too Untrustworthy "A lie," as an old aphorism has it, "can get halfway around the world before the truth gets its boots on." In the world of social media, this statement may actually turn out to be overoptimistic. It sometimes seems as if the lie gets halfway around the galaxy before the truth gets its boots on. Even before social media, of course, it was a standard joke (stemming from an ancient _New Yorker_ cartoon by Peter Steiner) that "on the Internet, nobody knows you're a dog." Social media only amplifies this tendency. First, setting up an actual website (say, a blog or a news site) takes a modicum of effort. Not a huge amount, but some. Setting up a social media account, on the other hand, takes less than a minute. You're then free to be fake. That's one reason why Twitter and Facebook are plagued by fake accounts to the point that they've instituted "verified" statuses for celebrities, so that people can be sure that the person they're following is the real deal. But what about the rest of us? A fake celebrity is nice, but dozens – or hundreds, or thousands – of fake "bot" accounts purporting to be ordinary people can give the impression that an idea is gaining traction when it's all manufactured. Sometimes it can swing elections. In the very close Alabama Senate special election between Roy Moore and Doug Jones, donors supporting the victor, Jones, used a false-flag "Dry Alabama" campaign to convince people that Moore wanted to ban alcohol. The Daily Caller reported: _Operatives with New Knowledge, a group affiliated with Dickerson, created thousands of Twitteraccounts posing as Russian bots to boost the election-year chances of Jones — the accounts began following Moore's Twitter account in October 2017. The project created a slew of Facebook accounts as well that were designed to troll conservatives into opposing Moore._ _But the misinformation project attracted attention from local and national media, falsely suggesting Russia was backing Moore's candidacy._ The Montgomery Advertiser, _for one, was the first to cover the story using the Russian-bot angle. National media outlets quickly followed suit._ _"Roy Moore flooded with fake Russian Twitter followers," read the headline on a_ New York Post _story_, _which cited the Advertiser._ WaPo _focused its reporting on the fact that Moore blamed Democrats for the fake accounts. Other major national outlets picked up on the story shortly thereafter, with many pundits mocking Moore for blaming Democratic operatives._ But it doesn't take a six-figure campaign to spread misinformation on social media. Even fairly obvious parody accounts are often taken as real and quoted by real-life news media: The "DPRK News Service" Twitter account, purporting to be a North Korean media outlet, has fooled CNN, Fox News, _Newsweek,_ and other major outlets into reporting its content as if it were a genuine product of the North Korean government. Though to be fair, as Gizmodo reported a while back, that's partly because "many of the tweets are only about 10 percent more ludicrous than the real English-language news feed of North Korea." People are more likely to believe misinformation on social media because they tend to only read headlines that mesh with their preconceived ideas, and they tend to get and share those headlines from friends, family, or people they see as ideological allies. This makes them less critical and more willing to pass on things that on further thought they would probably recognize as bogus. In addition, of course, social media passes along only tiny niblets of information, allowing and even encouraging people to make assumptions about the background, assumptions that also tend to follow their preconceptions and prejudices. ## The Argument for Regulation Unsurprisingly, the characteristics of social media outlined above have produced calls for regulation. As a form of media that is addictive, mentally damaging, and prone to spread misinformation – all while allowing giant corporations to form detailed dossiers on our likes, dislikes, and connections – social media raises numerous concerns, many of them legitimate. # **As a social media user, you face the paradox that people harassing you can be anonymous, even as you have no privacy from the social media company, or from anyone who can hack, or buy, your personal information.** Traditionally, we've regulated private information like credit info and health records. We've – until recently, at least – regulated pornography and other forms of what the Supreme Court calls "low value" speech. We've regulated substances, like drugs and tobacco, and practices, like gambling, that promote addiction and dependence. And (again until recently) we've regulated false and defamatory speech in terms of libel and slander. As Andrew Arnold writes in _Forbes_: _Some of the strongest proponents of social media argue that it is a matter of public interest to regulate social media. They say thatregulating it may be as important as regulating tobacco or alcohol. One of the arguments they make is that social media empowers large corporations to control the flow of information. As long as they can afford to saturate social media feeds with posts that curate the information they want, anyone standing in opposition to that is essentially steamrolled because they don't have the resources to counter that._ _It isn't only businesses that may benefit at the expense of the consumer. It can also be governments vs. citizens. Oppressive regimes such as the one in the Philippines or Russia appear to be using social media as a tool in their efforts to remain in power._ _There's even a health aspect to consider. With constant notifications and pressure to never miss the next viral post or bit of information,social media platforms are using the same techniques that casinos use to draw in gamblers. Considering that most platforms are open to people from the age of 13 on, that's troubling._ The pressure for regulation is certainly growing. Some of this is fallout from the 2016 election: The fable that "fake news" or "Russian bots" swung the election is mostly a comforting myth for Hillary Clinton supporters still groping to explain their loss. As researchers from Princeton and NYU who explored the phenomenon of "fake news" on social media found, it's "important to be clear about how rare this behavior is on social platforms.... The vast majority of Facebook users in our data did not share any articles from fake news domains in 2016 at all." And, of course, just because fake news attacks didn't swing the last election doesn't mean they won't be a threat in the future. As dependence on social media increases, and as techniques for manipulating opinions grow more refined, the likelihood that foreign actors – or domestic ones, as in the Alabama Senate special election mentioned earlier – will change enough peoples' minds to flip an election grows higher. There are also serious concerns about privacy, with Facebook's data breaches, and scandals at other providers, demonstrating that loss of privacy is baked into the system as it currently exists. As a social media user, you face the paradox that people harassing you can be anonymous, even as you have no privacy from the social media company, or from anyone who can hack, or buy, your personal information. In addition, existing companies like Facebook have started to embrace the idea of some sort of regulation. This isn't necessarily to be applauded – established companies tend to favor regulation of their industries as it makes life harder for new entrants who might be potential competitors. And it's a safe bet that whatever regulation Facebook favors won't do much to limit Facebook's freedom of action, or to impose any sort of genuine accountability for misbehavior. Still, with less industry opposition, some sort of regulation becomes more likely. ## Types of Social Media Regulation But what kind? There have been numerous proposals, but here are some of the most significant: _End online anonymity:_ Trolls, bots, and cyberstalkers take advantage of anonymity online. Thus, the argument goes, we should end anonymity, or at least limit it. As one proposal, summarized by Mark Courtney in AccountingWeb, has it: _We believe that anonymity is important, but if something goes wrong, there also must be some way to get back to the source. This doesn't mean that people always have to be overtly identifiable, and it would certainly be hard to regulate, but we would recommend a responsible body or trade organization where the authorities (closely controlled) would be able to track illegal activity back to its perpetrator._ _Here's an analogy we like to use: People should be able to travel the Internet highway with the equivalent of a car registration plate that's issued upon proper verification by a secure and certified issuing body or identity provider – just like a car registration._ _While the registration could be used to identify you if needed, the sites you visit on the Internet wouldn't need to know your name or where you live, unless you wanted to tell them your details (give consent). And if you wanted to hide your appearance from them, then that would be possible too._ _However, just like the real highway, if there's an issue, such as speeding or dangerous driving, a reputable body would be able to trace the incident back to the culprit with confidence. Although, hopefully, it wouldn't come to this very often, the fact this could happen would provide assurance to others on that highway (and their parents) and would ensure that drivers act more responsibly. This would allow the beneficial aspects of anonymity, yet take away the perceived sense of being untouchable, which can cause so much damage._ It's been a bad few years for "reputable authorities," alas, and I don't know who I'd be willing to trust with this sort of power. But that's the least of it. The notion of requiring a license to speak on the Internet – because this is effectively what's being proposed – raises the possibility of people's licenses being revoked, a matter with obvious concerns for freedom of speech, and freedom in general. This sort of a regime was imagined in Vernor Vinge's _True Names,_ and that story was a cautionary tale, not a how-to manual. _Remove Section 230 immunity:_ Under Section 230 of the Communications Decency Act, Internet publishers are not responsible for the content of information that comes from "another information content provider." Initially, this was meant to protect against, for example, newspapers being held liable for libelous statements by their readers in comment sections. Now, however, it protects services like Facebook or Twitter from liability for effectively everything on their sites, since everything there is content provided by someone else. It's doubtful that these sites could survive in the absence of Section 230 immunity. If you wanted to force a return to something more like the old blogosphere, a widely distributed network of small publishers, then this repeal would likely accomplish that end, though the repeal of Section 230 immunity seems highly unlikely, given its impact and the opposition it would create. Basically every major tech company, every social media company, and every traditional media company would be in opposition. Perhaps lesser tweaks might be possible, say requiring due care to prevent the spread of false or defamatory or threatening information, but it would be an uphill battle. # **Besides, the rather sorry state of "fact-checking" journalistic enterprises suggests that unpopular opinions will be treated as incorrect facts, and popular opinions as the reverse, on a fairly regular basis.** _More scrutiny of users:_ The chief approach that the social media giants have taken so far has been to ban people who say things they don't like. This has resulted in people like Infowars' Alex Jones being banned from Twitter, and an extremely unsuccessful effort by Tumblr to ban pornographic content, as well as the regular "Facebook jailing" of people who post political content that offends Facebook's in-house censors. The problem is that this censorship seems to fall much more heavily on the right than on the left, given the – extremely – left-leaning makeup of the social media companies' workforce. In an interview last year, Twitter's CEO Jack Dorsey admitted that his company is "left leaning," but denied that it affected the company's policies. In a separate interview with NYU Professor Jay Rosen, Dorsey admitted that some conservatives who work at Twitter "don't feel safe to express their opinions at the company." Dorsey said he thought that was wrong, and that everyone in the company should feel free to express their opinions, but if Twitter's employees aren't safe expressing non-lefty opinions, what of Twitter users? This sort of one-sided censorship is one of the reasons, together with the other concerns mentioned here, why I deactivated my Twitter account. Censoring people is always going to be ideologically fraught, and social media companies are apparently incapable of addressing this in an unbiased manner. At any rate, seriously throttling destructive content would probably cost them a lot of money. As Jaron Lanier writes, "A social media company is in a better position if it doesn't know what's going on, because then it makes just as much money but with less culpability." _More scrutiny of content:_ Writing in _The Hill,_ Anders Aslund argues that "each social media company should be obligated to establish a system for checking incoming information and exclude what is obviously false or even slanderous. Fact checking is not enough. Sheer lies must be expelled. Wikipedia has done so voluntarily, and it should be in the interest of the social media companies to do the same." Well, the history of my own Wikipedia entry – which at one point, among other things, featured a photoshopped picture of me in an "I had an abortion" T-shirt as if it were real – leaves me skeptical about how good a job Wikipedia has actually done. And it, too, has shown rather extreme political bias. But there are only two ways this kind of fact-checking can be accomplished on a social media platform. Either everything has to be checked when it's posted, an impossible task, or things have to be checked when there are sufficient complaints, which is as likely to encourage mobbing and targeting of political opponents as anything else. Besides, the rather sorry state of "fact-checking" journalistic enterprises suggests that unpopular opinions will be treated as incorrect facts, and popular opinions as the reverse, on a fairly regular basis. _Algorithmic transparency:_ Social media all offers us "curated" content, which means, basically, content chosen to manipulate us and hold our attention. This is accomplished via algorithms, which are deep secrets. Similar algorithms are, and would be, employed to control "undesirable" speech. But how do they work? Do they work? As York University Professor Natasha Kusikov writes in _The Conversation_ : "Is 'trust us' a good enough response, given the problem? With so much at stake, it may be time for a fundamental rethink of how these indispensable 21st century companies are regulated and what they're allowed to do. At the very minimum, governments and citizens should reconsider whether the lack of oversight into how these companies shape our speech rights is really in the public interest." Facebook already experimented with manipulating voter turnout. As Jaron Lanier writes: "In the published research, Facebook used the cheerful example of boosting voter turnout. But since Facebook is all about targeting and can calculate your political affiliation, among many other things, and since it has also proven it can make people sad, it is likely that social networks can also be used to suppress voters who have been targeted because of how they are likely to vote." Algorithmic transparency would limit that to some degree. So would a mandatory "vanilla" algorithm. When Facebook was new, it just showed you what your friends posted, in the order they posted it, with no algorithmic jiggery-pokery. I strongly preferred that, and one fairly nonintrusive form of regulation would be to require something like that as an easy-to-activate option. Facebook would hate this, because jiggery-pokery is their business model, but it wouldn't be hard to implement. Some people might choose to let Facebook manipulate their feed, but even those people would, if this were implemented transparently, always be aware that Facebook was engaging in that manipulation. As part of algorithmic transparency, or in addition to it, regulators might target aspects of social media specifically designed to be addictive. Not only do we traditionally regulate addictive substances ranging from alcohol to heroin, but we also regulate addictive behaviors like gambling. And, as with those other addictions, there's arguably a public-safety angle: Addiction to social media can lead to distracted driving and other unsafe behavior, as well as inattention to work, relationships, etc. A study of social media users aged 19–32 by researchers at the University of Pittsburgh School of Medicine found much higher rates of depression among the heaviest users. (We've even seen "neurobiological programming" to respond to smartphone notices used as a courtroom defense to charges of distracted driving, though so far unsuccessfully.) Given that companies invest substantial effort and money into making their apps addictive, it seems as if there's something to regulate there. And, in fact, Anne Longfield, the Children's Commissioner of England, has called for such regulation already in the UK, with support from Health Secretary Jeremy Hunt. Most of these forms of regulation at least potentially raise First Amendment issues, even to the question of "curating" feeds. How those issues might ultimately be resolved is beyond the scope of my discussion here, and may turn out to be quite sticky: Can Facebook or Twitter persuasively claim First Amendment rights as publishers while simultaneously seeking Section 230 immunity by claiming that the things they publish come from someone else? # **Knowing nothing makes you easy to manipulate. Lack of relevant life experience makes you easy to manipulate. So maybe people should know more?** And leaving aside the First Amendment discussion, which tends to devolve into a technical argument about what courts will do, there is the broader free speech argument. Free speech in America is, or at least has been, a cultural value, not merely a narrow legal one. And the calls for regulation are a problem that way. As Peter Suderman writes in _The New York Times,_ "Given the unanticipated reach and influence of these companies, this view is perhaps understandable. But it is mistaken and even dangerous, because at its core it is a view that speech — the primary use for these platforms — is not an individual right, but a collective good that should be subject to political control." He's absolutely right about that, which is why I favor other approaches to regulating social media – though I note that social media companies themselves seem to regard the speech of their users as a collective good subject to their own control. ## Other Approaches Leaving aside various forms of content regulation, is there anything else that can or should be done? Well, we might better achieve the goals of regulation by regulating something other than speech. Although antitrust is out of fashion, the huge tech companies constitute interlocking monopolies in various fields, and often support one another against competitors – as Paypal, for example, cut off money transfers to YouTube competitor BitChute, and Twitter competitor Gab. And, more significantly for our purposes, it's the walled-garden character of these services, coupled with their monopoly status, that brings many of the dangers people complain about. When destructive content goes on Twitter or Facebook, or when cascading waves of hysteria hit these services, they can spread to the limit of their user base. But if Twitter or Facebook were competing with five or ten other similar services, or maybe even two or three, this sort of thing would be more likely to damp out, after the fashion of the old, loosely coupled blogosphere. In addition, competition would promote greater attention to matters of privacy, algorithmic integrity, and so on because users could more easily leave for another service. Right now, if you don't like Facebook or Twitter, there's no substantial alternative, and if you decide to leave Facebook for Instagram, you're just leaving for another Facebook property with a slightly different user interface. Antitrust regulation would also dilute the political power of these big companies, and that's a real issue. Old-time monopolies like those broken up by Teddy Roosevelt concentrated economic power (in industries like railroads, steel, or oil) and gained political power as a result. But the very nature of social media companies' monopolies amplifies their political power even before they start hiring lobbyists. As Columbia Law Professor Tim Wu notes in his new book, _The Curse of Bigness: Antitrust in the New Gilded Age,_ "industry concentration leads to political corruption: Big monopolies aren't just an economic threat: They're a political threat. Because they're largely free of market constraints, they don't have to put all their energy into making a better product for less money. Instead, they put a lot of their energy into political manipulation to protect their monopoly." An industry made up of 500 companies might want government protection, but it's harder to get them to agree on a lobbying campaign. One made up of three companies, or one, can do so, and be sure that it will reap all the rewards of its effort. Thus, as Wu notes, "The more concentrated the industry, the more corrupt we can expect the political process to be." And, as he points out, these fears (and the realities) of huge companies wielding unchecked political power motivated the antitrust crusaders of a century ago every bit as much as concern about prices. (As the first Justice John Marshall Harlan wrote to his former law partner Augustus Willson in 1905, "Indeed, the greatest injury to the integrity of our social organization comes from the enormous power of corporations. We must have corporations. We could not get along without them, but we must see that they do not corrupt our government and its institutions.") In total, Wu reports, "Facebook managed to string together 67 unchallenged acquisitions, which seems impressive unless you consider that Amazon undertook 91 and Google got away with 214 (a few of which were conditioned). In this way, the tech industry became essentially composed of just a few giant trusts: Google for search and related industries, Facebook for social media, Amazon for online commerce." And these new tech monsters have a one-two punch that Standard Oil lacked: not only do they control immense wealth and important industries, but their fields of operation – which give them enormous control over communications, including communications about politics – also give them direct political power that in many ways exceeds that of previous monopolies. As Wu writes: "Big tech is ubiquitous, seems to know too much about us, and seems to have too much power over what we see, hear, do, and even feel. It has reignited debates over who really rules, when the decisions of just a few people have great influence over everyone." Rather than focusing on the content of what individuals post on social media, regulators might better focus on breaking up these behemoths, policing anticompetitive collusion among them, and in general ensuring that their powers are not abused. This approach, rooted in antitrust law, would raise no First Amendment or free speech problems, and would address many of the most significant complaints about social media. ## Building Immunity As I mentioned early on in talking about early cities and disease, the easy spread of pathogens was one factor, but another was malnutrition. Poorly nourished populations tended to do much worse in the face of epidemics than those that were well fed. There is probably an analogy in the social media world too. # **Almost always, the organization's reaction is channeled through social media specialists, and social media specialists are the _very_ last people who are going to tell their bosses that it's safe to ignore social media. But, in fact, it usually is.** People succumb to disinformation or mass hysteria in part because they're predisposed to. But what predisposes them? Well, some of it's just being human: We're all emotional beings to one degree or another, and all of us are capable of getting irrational in the right circumstances. But just as everyone can get sick, some people are more resistant to disease than others, so there may be some things we can do to boost people's resistance to the downsides of social media. So maybe we should nourish people's minds and make them more resistant. ## Mental Nutrition In bragging about how he manipulated the political news media, Obama foreign policy advisor Ben Rhodes described them this way: "Most of the outlets are reporting on world events from Washington. The average reporter we talk to is 27 years old, and their only reporting experience consists of being around political campaigns. That's a sea change. They literally know nothing." Knowing nothing makes you easy to manipulate. Lack of relevant life experience makes you easy to manipulate. So maybe people should know more? I've written elsewhere about the failures of our educational system, but in a time when two-thirds of millennials don't know what Auschwitz is, it's not crazy to think that our populace could be toughened up when it comes to mental nutrition. Basic knowledge of civics is poor, knowledge of history is limited and politicized, and the once-traditional canon of western literature, myth, and philosophy no longer holds sway. People who know more – note, not necessarily people who are "more educated" in the contemporary sense – are harder to fool. If we wanted a populace that was more resistant to propaganda and hysteria, we'd be educating people better. So why aren't we? ## Defensive Memes Short of reforming the entire educational system – a noble goal, but a big one – is there anything else we can do? Well, short-term education in critical thinking would be helpful. When I was in an enrichment class in elementary school, they taught us using The Propaganda Game, a component of Layman Allen's educational games. The Propaganda Game explained different types of propaganda ploys, then presented various efforts in which we were supposed to identify the techniques being used. I remember recognizing those techniques all over major media after that. Updating this approach to the current day probably wouldn't be hard – there's an online version of The Propaganda Game now – and exposing as many people as possible to it would be useful. That sort of education would be very helpful today, I think. News stories often suggest that today's youth are "savvy consumers" of Internet news, but there's more evidence for the "consumers" part than for the "savvy." Training people in critical reading and critical thinking would go a long way toward minimizing the danger of social media misinformation and hysteria. It seems to me that the educational system in the mid-20th century, when social media was entirely unforeseen, trained people better for skepticism and thoughtfulness than the educational system today, when social media is everywhere. And perhaps, like ancient cities that didn't understand the germ theory of disease but nonetheless acquired decent sanitation by rule of thumb, we will come upon some social arrangements that will reduce, if not eliminate, the dangers of social media. Generally speaking, human societies find a way to deal with new challenges. It is likely that we will do the same with social media, or perhaps that the evolution of communications technology will leave social media as obsolete as the telegraph, leaving its problems behind – in exchange, no doubt, for an entirely new batch. ## Acquired Resistance And no doubt people will learn on their own. As recounted earlier, Jon Ronson started out enthusiastic about Twitter shame mobs – they seemed anti-hierarchical and democratic. But after a while, they seemed more mobby and cruel. As the excitement wears off, we can expect social media to be viewed differently. And in my own observation, younger people seem less enthusiastic, and more fearful, of social media than people did a few years ago when Facebook and Twitter were new. As I write this, Facebook and Twitter seem to be stagnating in terms of new users, and some see them facing potential financial problems in the future. As early cities grew, people gradually adapted to the disease environment – or perhaps more accurately, the people with weaker immune systems died off and failed to reproduce – and the population over time became more resistant to disease. Diseases didn't go away, but they went from epidemic to endemic, with once-lethal plagues reduced to tolerable childhood diseases. ## The Power of Doing Nothing Perhaps we'll achieve a similar state with regard to social media. And perhaps the most productive path to such immunity and adaptation is the realization that what happens on social media isn't actually that important. There is a strong sense in which, as Noah Millman wrote in _The Week_ recently, what happens on Twitter isn't real. Millman comments: _When Twitter "blows up" in response to something controversial associated with the organization, does that demand a forthright response to prevent serious harm to the organization's reputation? And how can you predict in advance what will spark such a storm?_ _It's extremely hard to know — because Twitter, like the financial markets, is also a chaotic system, and hence inherently unpredictable. In the face of that uncertainty, the default for many organizations is to react defensively, but it's not clear that defensiveness is effective, in either the short or long term...._ _What if they followed a PR strategy that presumed that, in the Twitter era, the baseline level of negative publicity is always going to be higher than it used to be — and that the presumption should be that the publicity has few real consequences in monetary terms. Firing off an angry tweet is the second-easiest thing in the world to do, the only thing easier being liking someone else's angry tweet. If that's all that's happening, then what's happening really isn't real._ Millman notes that _The New York Times_ followed such a strategy when it faced demands to fire columnist Ross Douthat over a column sympathetic to the old WASP aristocracy. At one level, he notes, it suggests the _Times_ supports Douthat. But, he observes: _In another sense, though, what it suggests is that the_ Times _is confident that a wave of Twitter outrage is not actually a threat to their bottom line. Their readers may write nasty comments, but they aren't actually going to leave. In fact, they don't even actually want Douthat to leave, because the opportunity to vent their outrage is part of the experience they came for, whether they actually read his piece or just heard about it on Twitter._ I think there is deep wisdom in this approach. Social media (especially Twitter) is full of sound and fury, but usually it signifies, well, not much. People are angry on social media (especially Twitter) in no small part because so many people go there in order to be angry. Once the anger is discharged online, it's very unusual for people to actually follow it up with concrete actions in the real world. One reason why social media has the impact it has is because the people who run big organizations grant it more power than it really possesses. Lots of journalists are on Twitter and write about it (after all, writing about what happens on Twitter doesn't even require you to leave your office or make a phone call, and the quotes are already online, ready to cut and paste). With that coverage, what happens on social media _seems_ important. And at every big organization, whether it's a corporation, a government agency, or a university, there's now some sort of social media specialist. Almost always, the organization's reaction is channeled through social media specialists, and social media specialists are the _very_ last people who are going to tell their bosses that it's safe to ignore social media. But, in fact, it usually is. If organizations routinely imposed a three-day or seven-day waiting period on responding to social media storms, they'd usually find – and by "usually," I mean "almost always" – that by the time the waiting period had passed the outrage mob would be gone, distracted by the next shiny object. (Or the one after that.) It's also a good idea to have principles. If you have a rule like "we don't fire people over social media posts," then you don't have to engage and explain each case. (Or, for that matter, if you have a rule like "employees aren't allowed to use social media.") Without rules, you find yourself trying to make explanations to a mob that doesn't want explanations, only capitulation. And your capitulation, likely as not, will just set off an opposite-but-equal mob angry that you gave in. Widespread adoption of these principles would substantially tame the Twitter outrage mobs. Like two-year-olds, they scream in no small part because they know someone is listening. If you want to continue the disease analogy, social media is like those diseases that kill by causing the victim's own immune system to go into overdrive. A less fevered response might be healthier. # **The presumption is that, overall, truth will win out most of the time. The danger of monopoly organs like Facebook or Twitter is that they will selectively silence some of those voices and amplify others.** As for reducing the spread of misinformation, well, that will take more than inaction. The ignorance of the average voter, rational or otherwise, is of long standing, and a review of newspapers from our nation's first century will not reveal any Golden Age of sober, nonpartisan factuality. And already, the widespread reporting on "fake news" has presumably encouraged people to be at least moderately more skeptical of what they see and read online. At any rate, here, too, breakups seem in order. The "marketplace of ideas" approach to free political speech has always relied on a wide variety of different views from a wide variety of different speakers, many of which will inevitably be wrong or even dishonest. The presumption is that, overall, truth will win out most of the time. The danger of monopoly organs like Facebook or Twitter is that they will selectively silence some of those voices and amplify others. Encouraging these tech behemoths to police "bad" content only makes that more likely. With a greater diversity of social media platforms, the risk of systemic bias – the sort that's most likely to steer elections, and society, in a particular direction via falsehoods – is reduced. With more platforms, more news can filter through the cracks, and people can compare coverage from one to another. (This is especially true if, as they should be, antitrust laws are applied to prevent collusion among platform providers.) With people's attention split among more platforms, too, chain-reaction hysteria will be reduced. ## Conclusion And that, in a way, is the bottom line. Policing the content of social media speech, beyond a very basic level of blocking viruses and the like, is a bad idea. The more involved and granular the policing becomes, the worse of an idea it is, because it looks more and more like political censorship, which is what it will inevitably become. Policing platforms, and collusion among them, however, is likely to do more good than censorship. Antitrust scrutiny of monopolies and collusion will do more for the integrity of social media, and the protection of society from hysteria and misinformation, than regulation of content. And such antitrust regulation doesn't raise the same First Amendment and free speech problems. An approach based on antitrust and competition will preserve free speech while reducing social media abuses. As social media grows more pervasive, and more obviously destructive, the pressure for regulation is sure to grow. Better to regulate in a way that preserves free speech, and that doesn't empower tech oligarchs. © 2019 by Glenn Harlan Reynolds All rights reserved. No part of this publication may be reproduced, stored in a retrieval system, or transmitted, in any form or by any means, electronic, mechanical, photocopying, recording, or otherwise, without the prior written permission of Encounter Books, 900 Broadway, Suite 601, New York, New York, 10003. First American edition published in 2019 by Encounter Books, an activity of Encounter for Culture and Education, Inc., a nonprofit, tax exempt corporation. Encounter Books website address: www.encounterbooks.com Manufactured in the United States and printed on acid-free paper. The paper used in this publication meets the minimum requirements of ANSI/NISO Z39.48‒1992 (R 1997) ( _Permanence of Paper_ ). FIRST AMERICAN EDITION LIBRARY OF CONGRESS CATALOGING-IN-PUBLICATION DATA IS AVAILABLE
<gh_stars>1-10 import re import requests from lxml.html import HtmlComment from pyquery import PyQuery as q _INT = re.compile(r'(\d|,)+') def _parse_fare(f): m = _INT.search(f) if m is None: return ' -' return ' =Y= {}'.format(m.group(0)) def _no_comment(i): for o in i: if isinstance(o, HtmlComment): continue yield q(o) class RouteDisplay: def __init__(self, options): if not options['show_all_stops']: self._format_stops = lambda _: None def format_root(self, root): for dtl in root('.routeDetail'): print('=' * 80) for item in _no_comment(dtl): self._format_item(item) def _format_item(self, item): cls = item.attr('class') if cls == 'station': self._format_station(item) elif cls == 'fareSection': self._format_fare_sec(item) elif cls == 'fareSection express': self._format_fare_exp(item) elif cls == 'fare': self._format_fare(item) elif cls == 'access': self._format_access(item) elif cls == 'access walk': self._format_walk(item) else: print(' X-> ', item.attr('class'), 'Please report this message') def _format_station(self, sta): print(sta('dl dt').text()) print(' ', sta('.time li').text()) def _format_fare_sec(self, fare_sta): for item in _no_comment(fare_sta.children()): self._format_item(item) def _format_fare_exp(self, fare_sta): for item in _no_comment(fare_sta.children()): if item.attr('class') == 'fare': print(_parse_fare(item.text()), 'for limited express') continue self._format_item(item) def _format_fare(self, fare): print(_parse_fare(fare.text())) def _format_stops(self, acc): for stop in _no_comment(acc('.stop dl')): print(' ', stop('dt').text(), stop('dd').text()) def _format_access(self, acc): print(' ', acc('.transport div').text()) platform_text = acc('.platform').text() if platform_text: print(' ', platform_text) stop_num = acc('.btnStopNum').text() if stop_num: print(' ', stop_num) self._format_stops(acc) def _format_walk(self, w): print('... Walk ...') def find_route(src, dst, dt, options): def _opt_to_char(opt): return '1' if options[opt] else '0' head_info = '{} ==> {} at {}'.format(src, dst, dt.strftime('%Y-%m-%d %H:%M')) print(head_info) dt_str = dt.strftime('%Y%m%d%H%M') root = q(str(requests.get( 'https://transit.yahoo.co.jp/search/result', params={ 'flatlon': '', 'from': src, 'tlatlon': '', 'to': dst, 'viacode': '', 'via': '', 'y': dt_str[:4], 'm': dt_str[4:6], 'd': dt_str[6:8], 'hh': dt_str[8:10], 'm1': dt_str[10:11], 'm2': dt_str[11:12], 'ticket': 'ic', 'expkind': '1', 'ws': '3', 's': '0', 'al': '1', 'shin': _opt_to_char('shinkansen'), 'ex': _opt_to_char('limited_express'), 'hb': '1', 'lb': '1', 'sr': '1', 'kw': '', }).content, 'utf-8')) RouteDisplay(options).format_root(root) print('=' * 80) print(head_info)
# -*- coding: utf-8 -*- """ Created on Mon May 20 16:28:40 2019 @author: Administrator """ class Solution: def findMissingRanges(self, nums: list, lower: int, upper: int) -> list: if len(nums) == 1 and nums[0] == lower: return [str(lower+1) + '->' + str(upper)] if len(nums) == 1 and nums[0] == upper: return [str(lower) + '->' + str(upper-1)] if len(nums) == 2 and nums[0] == lower and nums[-1] == upper: return [] a = set(range(lower, upper+1)) a = list(a - set(nums)) a = self.summaryRanges(a) b = [] for k in a: if type(k) == int: b.append(str(k)) else: b.append(str(k[0])+'->'+str(k[-1])) return b def summaryRanges(self, nums: list) -> list: if nums == []: return nums a = [nums[0]] for k in range(1, len(nums)): if type(a[-1]) != list and nums[k] - a[-1] == 1: a.append([a[-1], nums[k]]) a.pop(-2) elif type(a[-1]) == list and nums[k] - a[-1][-1] == 1: a.append([a[-1][-1], nums[k]]) else: a.append(nums[k]) k = -1 for _ in range(len(a)): k += 1 if k == len(a) - 1: #a[k] == a[-1]: break if (type(a[k])==list) * (type(a[k+1])==list) == 1: if a[k][-1] >= a[k+1][0]: a.insert(k, [a[k][0], max(a[k][-1],a[k+1][-1])]) a.pop(k+1) a.pop(k+1) k -= 1 else: continue return a solu = Solution() nums, lower, upper = [0, 1, 3, 50, 75], 0, 99, nums, lower, upper = [], 0, 99, nums, lower, upper = list(range(0,100)), 0, 99 nums, lower, upper = list(range(0,99)), 0, 99 nums, lower, upper = [2147483647], 0, 2147483647 nums, lower, upper = [-2147483648,2147483647], -2147483648, 2147483647 print(solu.findMissingRanges(nums, lower, upper))
/** * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orgqr.h" #include "Eigen/Dense" #include "cpu_kernel_utils.h" #include "utils/eigen_tensor.h" #include "utils/kernel_util.h" #include <numeric> #include <iostream> using namespace Eigen; namespace { const uint32_t kOutputNum = 1; const uint32_t kInputNum = 2; const char *kOrgqr = "Orgqr"; const double ZERO = 0.; const uint32_t kTWO = 2; constexpr int64_t kParallelDataNums = 18 * 1024; constexpr int64_t kParallelDataNumsMid = 32 * 1024; #define ORGQR_COMPUTE(DTYPE, TYPE, CTX) \ case (DTYPE): { \ uint32_t result = OrgqrCompute<TYPE>(CTX); \ if (result != KERNEL_STATUS_OK) { \ KERNEL_LOG_ERROR("Orgqr kernel compute failed."); \ return result; \ } \ break; \ } #define ORGQR_COMPUTE_COMPLEX(DTYPE, TYPE, CTX) \ case (DTYPE): { \ uint32_t result = OrgqrComputeComplex<TYPE>(CTX); \ if (result != KERNEL_STATUS_OK) { \ KERNEL_LOG_ERROR("Orgqr kernel compute failed."); \ return result; \ } \ break; \ } } // namespace namespace aicpu { uint32_t OrgqrCpuKernel::Compute(CpuKernelContext &ctx) { // check params KERNEL_HANDLE_ERROR(NormalCheck(ctx, kInputNum, kOutputNum), "Orgqr check input and output number failed."); KERNEL_HANDLE_ERROR(OrgqrCheck(ctx), "[%s] check params failed.", kOrgqr); auto data_type = ctx.Input(0)->GetDataType(); switch (data_type) { ORGQR_COMPUTE(DT_FLOAT, float, ctx) ORGQR_COMPUTE(DT_DOUBLE, double, ctx) ORGQR_COMPUTE_COMPLEX(DT_COMPLEX64, std::complex<float_t>, ctx) ORGQR_COMPUTE_COMPLEX(DT_COMPLEX128, std::complex<double_t>, ctx) default: KERNEL_LOG_ERROR("Orgqr kernel data type [%s] not support.", DTypeStr(data_type).c_str()); return KERNEL_STATUS_PARAM_INVALID; } return KERNEL_STATUS_OK; } uint32_t OrgqrCpuKernel::OrgqrCheck(CpuKernelContext &ctx) { std::vector<int64_t> shape_x = ctx.Input(0)->GetTensorShape()->GetDimSizes(); size_t shape_size = shape_x.size(); KERNEL_CHECK_FALSE((shape_size > 1), KERNEL_STATUS_PARAM_INVALID, "Input x must be at least rank 2.") KERNEL_CHECK_FALSE((shape_x[shape_size - kTWO] > 0), KERNEL_STATUS_PARAM_INVALID, "Dimension [%zu] of input x must be at least 1, but [%zu].", shape_size - kTWO, shape_x[shape_size - kTWO]) KERNEL_CHECK_FALSE((shape_x[shape_size - 1] > 0), KERNEL_STATUS_PARAM_INVALID, "Dimension [%zu] of input x must be at least 1, but [%zu].", shape_size - 1, shape_x[shape_size - 1]) KERNEL_CHECK_FALSE((shape_x[shape_size - kTWO] >= shape_x[shape_size - 1]), KERNEL_STATUS_PARAM_INVALID, "Dimension [%zu] of input x must be bigger than dimension [%zu], when input x has rank [%zu].", shape_size - kTWO, shape_size - 1, shape_size) std::vector<int64_t> shape_tau = ctx.Input(1)->GetTensorShape()->GetDimSizes(); size_t shape_tau_size = shape_tau.size(); KERNEL_CHECK_FALSE((shape_x[shape_size - 1] >= shape_tau[shape_tau_size - 1]), KERNEL_STATUS_PARAM_INVALID, "Dimension [%zu] of input tau must be less than [%zu], but [%zu].", shape_tau_size - 1, shape_x[shape_size - 1], shape_tau[shape_tau_size - 1]) if (shape_size > kTWO) { KERNEL_CHECK_FALSE((shape_x[0] == shape_tau[0]), KERNEL_STATUS_PARAM_INVALID, "Dimension 0 of input tau must equal Dimension 0 of input x when input has batch") } return KERNEL_STATUS_OK; } template <typename T> uint32_t OrgqrCpuKernel::OrgqrCompute(CpuKernelContext &ctx) { auto *x = reinterpret_cast<T *>(ctx.Input(0)->GetData()); auto *tau = reinterpret_cast<T *>(ctx.Input(1)->GetData()); auto *y = reinterpret_cast<T *>(ctx.Output(0)->GetData()); std::vector<int64_t> shape_x = ctx.Input(0)->GetTensorShape()->GetDimSizes(); size_t shape_size = shape_x.size(); size_t m = shape_x[shape_size - kTWO]; size_t n = shape_x[shape_size - 1]; std::vector<int64_t> shape_tau = ctx.Input(1)->GetTensorShape()->GetDimSizes(); size_t p = *(shape_tau.end() - 1); size_t size_mn = m * n; size_t matrix_num = ctx.Input(0)->NumElements() / size_mn; int64_t data_size = ctx.Input(0)->NumElements() * sizeof(T); typedef Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MartrixXd; typedef Eigen::Matrix<T, Eigen::Dynamic, 1> VectorXd; if (data_size <= kParallelDataNums) { for (size_t i = 0; i < matrix_num; i++) { Eigen::Map<MartrixXd> martrix_y(y + i * m * n, m, n); Eigen::Map<MartrixXd> martrix_x(x + i * m * n, m, n); MartrixXd tmp = MartrixXd::Identity(m, m); Eigen::Map<VectorXd> vector_tau(tau + i * p, p, 1); for (size_t k = 0; k < p; k++) { VectorXd vector_v = martrix_x.block(k, k, m - k, 1); vector_v[0] = 1; tmp.rightCols(m - k) = tmp.rightCols(m - k) - vector_tau(k) * (tmp.rightCols(m - k) * vector_v) * vector_v.transpose(); } martrix_y = tmp.leftCols(n); } } else { uint32_t min_core_num = 1; uint32_t max_core_num = std::max(min_core_num, aicpu::CpuKernelUtils::GetCPUNum(ctx) - 2); if (data_size <= kParallelDataNumsMid) { max_core_num = std::min(max_core_num, 4U); // up to 4 cpu cores } if (max_core_num > matrix_num) { max_core_num = matrix_num; } auto shard_qr = [&](size_t start, size_t end) { for (size_t i = start; i < end; i++) { Eigen::Map<MartrixXd> martrix_y(y + i * m * n, m, n); Eigen::Map<MartrixXd> martrix_x(x + i * m * n, m, n); MartrixXd tmp = MartrixXd::Identity(m, m); Eigen::Map<VectorXd> vector_tau(tau + i * p, p, 1); for (size_t k = 0; k < p; k++) { VectorXd vector_v = martrix_x.block(k, k, m - k, 1); vector_v[0] = 1; tmp.rightCols(m - k) = tmp.rightCols(m - k) - vector_tau(k) * (tmp.rightCols(m - k) * vector_v) * vector_v.transpose(); } martrix_y = tmp.leftCols(n); } }; if (max_core_num == 0) { KERNEL_LOG_ERROR("max_core_num could not be 0."); } KERNEL_HANDLE_ERROR(CpuKernelUtils::ParallelFor(ctx, matrix_num, matrix_num / max_core_num, shard_qr), "Orgqr Compute failed."); } return KERNEL_STATUS_OK; } template <typename T> uint32_t OrgqrCpuKernel::OrgqrComputeComplex(CpuKernelContext &ctx) { auto *x = reinterpret_cast<T *>(ctx.Input(0)->GetData()); auto *tau = reinterpret_cast<T *>(ctx.Input(1)->GetData()); auto *y = reinterpret_cast<T *>(ctx.Output(0)->GetData()); std::vector<int64_t> shape_x = ctx.Input(0)->GetTensorShape()->GetDimSizes(); size_t shape_size = shape_x.size(); size_t m = shape_x[shape_size - kTWO]; size_t n = shape_x[shape_size - 1]; std::vector<int64_t> shape_tau = ctx.Input(1)->GetTensorShape()->GetDimSizes(); size_t p = *(shape_tau.end() - 1); size_t size_mn = m * n; size_t matrix_num = ctx.Input(0)->NumElements() / size_mn; int64_t data_size = ctx.Input(0)->NumElements() * sizeof(T); typedef Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MartrixXd; typedef Eigen::Matrix<T, Eigen::Dynamic, 1> VectorXd; if (data_size <= kParallelDataNums) { for (size_t i = 0; i < matrix_num; i++) { Eigen::Map<MartrixXd> martrix_y(y + i * m * n, m, n); Eigen::Map<MartrixXd> martrix_x(x + i * m * n, m, n); MartrixXd tmp = MartrixXd::Identity(m, m); Eigen::Map<VectorXd> vector_tau(tau + i * p, p, 1); for (size_t k = 0; k < p; k++) { VectorXd vector_v = martrix_x.block(k, k, m - k, 1); vector_v[0] = 1; tmp.rightCols(m - k) = tmp.rightCols(m - k) - vector_tau(k) * (tmp.rightCols(m - k) * vector_v) * vector_v.adjoint(); } martrix_y = tmp.leftCols(n); } } else { uint32_t min_core_num = 1; uint32_t max_core_num = std::max(min_core_num, aicpu::CpuKernelUtils::GetCPUNum(ctx) - 2); if (data_size <= kParallelDataNumsMid) { max_core_num = std::min(max_core_num, 4U); // up to 4 cpu cores } if (max_core_num > matrix_num) { max_core_num = matrix_num; } auto shard_qr = [&](size_t start, size_t end) { for (size_t i = start; i < end; i++) { Eigen::Map<MartrixXd> martrix_y(y + i * m * n, m, n); Eigen::Map<MartrixXd> martrix_x(x + i * m * n, m, n); MartrixXd tmp = MartrixXd::Identity(m, m); Eigen::Map<VectorXd> vector_tau(tau + i * p, p, 1); for (size_t k = 0; k < p; k++) { VectorXd vector_v = martrix_x.block(k, k, m - k, 1); vector_v[0] = 1; tmp.rightCols(m - k) = tmp.rightCols(m - k) - vector_tau(k) * (tmp.rightCols(m - k) * vector_v) * vector_v.adjoint(); } martrix_y = tmp.leftCols(n); } }; if (max_core_num == 0) { KERNEL_LOG_ERROR("max_core_num could not be 0."); } KERNEL_HANDLE_ERROR(CpuKernelUtils::ParallelFor(ctx, matrix_num, matrix_num / max_core_num, shard_qr), "Orgqr Compute failed."); } return KERNEL_STATUS_OK; } REGISTER_CPU_KERNEL(kOrgqr, OrgqrCpuKernel); } // namespace aicpu
def handle_custom_chars(html, escape=True): replace = [('[', '##91!', '&#91;'), (']', '##93!', '&#93;'), ("'", '#apos!', '&apos;'), ('"', '#quot!', '&quot;')] for original, escape_to, unescape_to in replace: if escape: html = html.replace(original, escape_to) else: html = html.replace(escape_to, unescape_to) return html
def calculate_statistical_difference(selection: pd.DataFrame, selected_player: str) -> None: st.header("**♟** Statistical Difference **♟**") st.markdown("Here, you can see if there is a statistical difference between the scores" "of the selected person and the selected game, and the average score of all other " "players for the same game.") score_selection = selection.loc[:, [column for column in selection.columns if (('score' in column) & ('has_score' not in column) & (selected_player + '_score' not in column))]] score_selection = score_selection.to_numpy() average_score = np.mean(score_selection[np.nonzero(score_selection)]) player_values = selection[selected_player + "_score"].values if len(player_values) > 15: p = wilcoxon(player_values-average_score) if p[1] < 0.05: st.write("{}🔹 According to a **one-sample Wilcoxon signed-rank test**".format(SPACES)) st.write("{}🔹 there **is** a significant difference between the scores of **{}** " "(mean score of {}) ".format(SPACES, selected_player, round(np.mean(player_values), 2))) st.write("{}🔹 and the average (score of {}).".format(SPACES, round(average_score, 2))) else: st.write("{}🔹 According to an one-sample Wilcoxon signed-rank test there " "is no significant difference between the scores of {} (mean score " "of {}) and the average (score of {}).".format(SPACES, selected_player, round(np.mean(player_values), 2), round(average_score, 2))) else: st.write("{}🔹 Insufficient data to run statistical test. A minimum of " "**15** matches is necessary.".format(SPACES)) st.write(" ")
class HeuristicAgent: """ This is our agent. It decideds our actions! """ def __init__(self, actions, divisor): self.decisions = {} self.divisor = divisor self.actions = list(actions)[:len(actions)-1] # eliminate 'none' action def pick_action(self, gameState): simple_gameState = (np.round(gameState['player_y']/self.divisor), np.round(gameState['ball_y']/self.divisor), np.round(gameState['ball_x']/self.divisor)) if simple_gameState in self.decisions: return self.decisions[simple_gameState] else: return random.choice(self.actions) def q_learning(self, learning_rate=1, discount_factor=0.99, epsilon=0.05, episodes=1000, strategy="smart", screen_w=512, screen_h=384, cpu_speed_ratio=0.5, players_speed_ratio = 0.5, ball_speed_ratio=0.75, max_score=3): # Q-learning variables Q = {} agent_combinations = {} alpha, discount = learning_rate, discount_factor scores = [] # Game variables game = Pong(screen_w,screen_h,cpu_speed_ratio,players_speed_ratio,MAX_SCORE=max_score) # 384,288 p = PLE(game, fps=30, display_screen=False) p.init() if (not (strategy=="random")): for episode in range(episodes): p.reset_game() while not p.game_over(): agent_state = (np.round(p.getGameState()['player_y']/self.divisor), np.round(p.getGameState()['ball_y']/self.divisor), np.round(p.getGameState()['ball_x']/self.divisor)) agent_action = get_action(strategy, Q, agent_combinations, agent_state, self.actions, epsilon) agent_combinations[agent_state, agent_action] = True p.act(agent_action) reward = game.getReward() next_state = (np.round(p.getGameState()['player_y']/self.divisor), np.round(p.getGameState()['ball_y']/self.divisor), np.round(p.getGameState()['ball_x']/self.divisor)) next_action = best_action(strategy, Q, next_state, self.actions) if (agent_state, agent_action) not in Q: Q[agent_state, agent_action] = 0 if (next_state, next_action) not in Q: Q[next_state, next_action] = 0 Q[agent_state, agent_action] = (1-alpha)*Q[agent_state,agent_action] + alpha*(reward+discount*Q[next_state,next_action]) for states in Q: if states[0] not in self.decisions: self.decisions[states[0]] = states[1] else: if (Q[states[0],self.decisions[states[0]]] < Q[states]): self.decisions[states[0]] = states[1] def get_action(player_strategy, Q, seen_combinations, state, actions, epsilon): if player_strategy == "dumb": return 0 else: return epsilon_greedy(player_strategy, Q, seen_combinations, state, actions, epsilon) def epsilon_greedy(player_strategy, Q, seen_combinations, state, possible_actions, epsilon): not_tried_yet = [] for action in possible_actions: if (state, action) not in seen_combinations: not_tried_yet.append(action) if not_tried_yet != []: return random.choice(not_tried_yet) if random.random() < epsilon: return random.choice(possible_actions) else: return best_action(player_strategy, Q, state, possible_actions) def best_action(player_strategy, Q, state, possible_actions): best_action = None best_action_reward = -float('inf') if player_strategy == "dumb": return 0 for action in possible_actions: if (state, action) not in Q: Q[state, action] = 0 if Q[state, action] > best_action_reward: best_action_reward = Q[state, action] best_action = action return best_action
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package org.jnode.net; import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; import java.net.StandardSocketOptions; import java.nio.ByteBuffer; import java.nio.channels.SelectionKey; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.nio.charset.Charset; import java.util.LinkedList; import java.util.concurrent.CompletableFuture; import java.util.logging.Level; import org.jnode.core.JNode.NContext; import org.jnode.core.Looper; import org.jnode.core.NotYourThreadException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * * @author daniele */ public class NSocket implements Closeable { private final static Logger log = LoggerFactory.getLogger(Looper.class); private SocketChannel sc; private OnErrorHandler errorHandler; private OnDataHandler dataHandler; private onDrainHandler drainHandler; private OnCloseHandler closeHanlder; private OnConnectionEstablishedHandler enstablishedHandler; private SelectionKey sk; private final NContext ncontext; private final Looper looper; private int state = 0; private static final Charset charset = Charset.forName("utf-8"); public final NOutput out; public NSocket(NContext ncontext) { this.ncontext = ncontext; out = new NSockOut(); looper = ncontext.requestLooper(); } private CompletableFuture<SocketChannel> configure(SocketChannel sc) { CompletableFuture<SocketChannel> cf = new CompletableFuture<>(); try { sc.configureBlocking(false); sc.setOption(StandardSocketOptions.TCP_NODELAY, true); cf.complete(sc); } catch (Exception e) { cf.completeExceptionally(e); } return cf; } public CompletableFuture<NSocket> connect(InetSocketAddress dest) { try { sc = SocketChannel.open(); sc.configureBlocking(false); sc.setOption(StandardSocketOptions.TCP_NODELAY, true); sc.connect(dest); return looper.register(sc, SelectionKey.OP_CONNECT, new SocketChannelEvent()) .thenCompose((SelectionKey rr) -> { sk = rr; state = 1; return CompletableFuture.completedFuture(NSocket.this); }); } catch (Throwable e) { CompletableFuture cf = new CompletableFuture(); cf.completeExceptionally(e); return cf; } } public CompletableFuture<Void> setSocketChannel(ServerSocketChannel ssc) { try { SocketChannel sc = ssc.accept(); if (sc == null) throw new NullPointerException(); return setSocketChannel(sc, SelectionKey.OP_READ); } catch (Throwable e) { CompletableFuture cf = new CompletableFuture(); cf.completeExceptionally(e); return cf; } } private CompletableFuture<Void> setSocketChannel(SocketChannel sc, int op) { this.sc = sc; return configure(sc).thenCompose((SocketChannel sock) -> looper.register(sc, op, new SocketChannelEvent())) .thenAccept((SelectionKey rr) -> { sk = rr; state = 1; }); } public NSocket onConnectionEstablished(OnConnectionEstablishedHandler handler) { this.enstablishedHandler = handler; return this; } public NSocket onError(OnErrorHandler handler) { this.errorHandler = handler; return this; } public NSocket onClose(OnCloseHandler handler) { this.closeHanlder = handler; return this; } public NSocket onData(OnDataHandler handler) { this.dataHandler = handler; return this; } public NSocket onDrain(onDrainHandler handler) { this.drainHandler = handler; return this; } @Override public void close() { checkThread(); sendConnectionDown(DOWN_REASON.LOCAL_CLOSE); try { sc.close(); } catch (IOException ex) { looper.schedule(() -> { _onError(ex); }); } } public static enum DOWN_REASON { IO_EXCEPTION, REMOTE_CLOSE, LOCAL_CLOSE }; private DOWN_REASON downReason = null; public DOWN_REASON getCloseReason() { return downReason; } private void sendConnectionDown(DOWN_REASON reason) { if (state == 2) return; sk.cancel(); state = 2; downReason = reason; looper.schedule(() -> { _onClose(); }); } public int read(ByteBuffer bb) { if (state != 1) return -1; if (!sc.isConnected()) { sendConnectionDown(DOWN_REASON.LOCAL_CLOSE); return -1; } try { int readed = sc.read(bb); if (readed == -1) { sendConnectionDown(DOWN_REASON.REMOTE_CLOSE); return -1; } return readed; } catch (IOException ex) { looper.schedule(() -> { _onError(ex); }); return -1; } } private boolean processWriteOps() { if (readWriteOps.isEmpty()) return false; try { while (!readWriteOps.isEmpty()) { WriteOps wo = readWriteOps.getFirst(); int count = wo.fillChannel(sc); if (wo.isDone()) { wo.release(); readWriteOps.poll(); } if (count == 0) break; } sk.interestOps(sk.interestOps() | SelectionKey.OP_WRITE); return true; } catch (Throwable ex) { //CancelledKeyException sendConnectionDown(DOWN_REASON.IO_EXCEPTION); return false; } } public void executeSafe(Runnable r) { if (Thread.currentThread().getId() != looper.getId()) { looper.schedule(r); return; } try { r.run(); } catch (Throwable t) { log.error("Error. Uncatched exception ", t); } } private void _onEnstablished() { if (enstablishedHandler == null) return; try { enstablishedHandler.onConnectionEstablished(this); } catch (Throwable t) { log.error("Uncatched error", t); } } private void _onClose() { if (closeHanlder == null) return; try { closeHanlder.onClose(this); } catch (Throwable t) { log.error("Uncatched error", t); } } private void _onError(Exception bb) { if (errorHandler == null) { return; } try { errorHandler.onError(bb); } catch (Throwable t) { log.error("Uncatched error", t); } } private void _onData() { if (dataHandler == null) { return; } try { dataHandler.onDataIncoming(this); } catch (Throwable t) { log.error("Uncatched error", t); } } private void _onDrain() { if (drainHandler == null) { return; } try { drainHandler.onDrain(); } catch (Throwable t) { log.error("Uncatched error", t); } } /** * * @author daniele */ private class SocketChannelEvent implements Looper.ChannelEvent { @Override public void onEvent(SelectionKey a) { if (!a.isValid()) return; if (a.isConnectable()) { try { sc.finishConnect(); sk.interestOps(SelectionKey.OP_READ); _onEnstablished(); } catch (IOException ex) { _onError(ex); sendConnectionDown(DOWN_REASON.IO_EXCEPTION); } } if (a.isReadable()) { try { if (!sc.isConnected()) { _onClose(); a.cancel(); } else { _onData(); } } catch (Exception e) { _onError(e); } } if (a.isValid() && a.isWritable()) { if (!processWriteOps()) { if (sk.isValid() && state == 1) { sk.interestOps(sk.interestOps() & (~SelectionKey.OP_WRITE)); _onDrain(); } } } } } private void checkThread() { if (Thread.currentThread().getId() != looper.getId()) throw new NotYourThreadException(); } private final LinkedList<WriteOps> readWriteOps = new LinkedList<>(); private class NSockOut extends NOutput { private int estimedFrameSize = 4096; private WriteOps wo; protected NSockOut() { super(); wo = WriteOps.createFromBBCache(ncontext.getByteBufferCache(), estimedFrameSize); } @Override public void flush() { if (wo.position() == 0) return; checkThread(); createWriteOps(estimedFrameSize); } private void appendWriteOps(WriteOps writeOps) { if (writeOps.isOpen()) writeOps.closeAndFlip(); readWriteOps.add(writeOps); processWriteOps(); } private void createWriteOps(int size) { if (wo.position() == 0) wo.release(); else appendWriteOps(wo); wo = WriteOps.createFromBBCache(ncontext.getByteBufferCache(), estimedFrameSize); } @Override public void setEstimedFrameSize(int size) { checkThread(); if (size <= 100) size = 100; estimedFrameSize = size; } @Override public void write(ByteBuffer bb) { checkThread(); if (Thread.currentThread().getId() != looper.getId()) { looper.schedule(() -> { flush(); }); return; } if (!bb.hasRemaining()) return; flush(); appendWriteOps(WriteOps.wrap(bb)); } @Override public void write(int b) { checkThread(); if (wo.remaining() == 0) createWriteOps(estimedFrameSize); wo.write(b); } @Override public void write(byte[] b) { checkThread(); if (wo.remaining() < b.length) createWriteOps(Math.max(b.length, estimedFrameSize)); wo.write(b); } @Override public void write(byte[] b, int off, int len) { checkThread(); if (wo.remaining() < len) createWriteOps(Math.max(len, estimedFrameSize)); wo.write(b, off, len); } /** * No effect on this implementation */ @Override public void close() { NSocket.this.close(); } } }
// SetUDPSocketOptions set controls FlagDst,FlagInterface to UDPConn. func SetUDPSocketOptions(conn *net.UDPConn) error { if ip4 := conn.LocalAddr().(*net.UDPAddr).IP.To4(); ip4 != nil { return ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true) } return ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true) }
def InPeriod(*args): return _ElCLib.elclib_InPeriod(*args)
import { Color, OgmaSimpleType } from '@ogma/common'; import { style as styler, Styler } from '@ogma/styler'; export function colorize( value: OgmaSimpleType, color: Color = Color.WHITE, style: Styler = styler, useColor = true, ): string { if (useColor) { value = style[Color[color].toLowerCase()].apply(value); } return value.toString(); }
/** * * @author George El-Haddad * <br/> * Feb 4, 2010 * */ public class AgentType extends Type implements AgentFeature { private static final long serialVersionUID = -1260393269571892216L; private URI agentUri = null; private VCard agent = null; private List<AgentParameterType> agentParameterTypes = null; public AgentType() { super(EncodingType.EIGHT_BIT, ParameterTypeStyle.PARAMETER_VALUE_LIST); agentParameterTypes = new ArrayList<AgentParameterType>(); } public AgentType(URI agentUri) { super(EncodingType.EIGHT_BIT); setAgentURI(agentUri); agentParameterTypes = new ArrayList<AgentParameterType>(); } public AgentType(VCard agent) { super(EncodingType.EIGHT_BIT); setAgent(agent); agentParameterTypes = new ArrayList<AgentParameterType>(); } /** * {@inheritDoc} */ public VCard getAgent() { return agent; } /** * {@inheritDoc} */ public Iterator<AgentParameterType> getAgentParameterTypes() { return agentParameterTypes.listIterator(); } /** * {@inheritDoc} */ public URI getAgentURI() { return agentUri; } /** * {@inheritDoc} */ public void setAgent(VCard agent) { this.agent = agent; } /** * {@inheritDoc} */ public void setAgentURI(URI agentUri) { this.agentUri = agentUri; } /** * {@inheritDoc} */ public boolean hasAgent() { return agentUri != null || agent != null; } /** * {@inheritDoc} */ public boolean isURI() { return agentUri != null; } /** * {@inheritDoc} */ public boolean isInline() { return agent != null; } /** * {@inheritDoc} */ public void addAgentParameterType(AgentParameterType agentParameterType) { agentParameterTypes.add(agentParameterType); } /** * {@inheritDoc} */ public void removeAgentParameterType(AgentParameterType agentParameterType) { agentParameterTypes.remove(agentParameterType); } /** * {@inheritDoc} */ public boolean containsAgentParameterType(AgentParameterType agentParameterType) { return agentParameterTypes.contains(agentParameterType); } /** * {@inheritDoc} */ public boolean hasAgentParameterTypes() { return !agentParameterTypes.isEmpty(); } /** * {@inheritDoc} */ public void clearAgentParameterTypes() { agentParameterTypes.clear(); } /** * {@inheritDoc} */ @Override public String getTypeString() { return VCardType.AGENT.getType(); } /** * {@inheritDoc} */ @Override public boolean equals(Object obj) { if(obj != null) { if(obj instanceof AgentType) { if(this == obj || ((AgentType)obj).hashCode() == this.hashCode()) { return true; } else { return false; } } else { return false; } } else { return false; } } /** * {@inheritDoc} */ @Override public int hashCode() { return Util.generateHashCode(toString()); } /** * {@inheritDoc} */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(this.getClass().getName()); sb.append("[ "); if(encodingType != null) { sb.append(encodingType.getType()); sb.append(","); } if(agentUri != null) { sb.append(agentUri.getPath()); sb.append(","); } if(agent != null) { sb.append(agent.toString()); sb.append(","); } if(!agentParameterTypes.isEmpty()) { for(int i=0; i < agentParameterTypes.size(); i++) { sb.append(agentParameterTypes.get(i).getType()); sb.append(","); } } if(super.id != null) { sb.append(super.id); sb.append(","); } sb.deleteCharAt(sb.length()-1); //Remove last comma. sb.append(" ]"); return sb.toString(); } /** * {@inheritDoc} */ @Override public AgentFeature clone() { AgentType cloned = new AgentType(); if(agentUri != null) { try { cloned.setAgentURI(new URI(agentUri.toString())); } catch(URISyntaxException e) { cloned.setAgentURI(null); } } if(agent != null) { cloned.setAgent(agent.clone()); } if(!agentParameterTypes.isEmpty()) { for(int i = 0; i < agentParameterTypes.size(); i++) { cloned.addAgentParameterType(agentParameterTypes.get(i)); } } cloned.setParameterTypeStyle(getParameterTypeStyle()); cloned.setEncodingType(getEncodingType()); cloned.setID(getID()); return cloned; } }
/** * Per-child layout information associated with OpenStreetMapView. */ public static class LayoutParams extends ViewGroup.LayoutParams implements MapViewLayouts { /** * The location of the child within the map view. */ public ILatLng geoPoint; /** * The alignment the alignment of the view compared to the location. */ public int alignment; public int offsetX; public int offsetY; /** * Creates a new set of layout parameters with the specified width, height and location. * * @param width the width, either {@link #FILL_PARENT}, {@link #WRAP_CONTENT} or a fixed * size * in pixels * @param height the height, either {@link #FILL_PARENT}, {@link #WRAP_CONTENT} or a fixed * size * in pixels * @param aGeoPoint the location of the child within the map view * @param aAlignment the alignment of the view compared to the location {@link * #BOTTOM_CENTER}, * {@link #BOTTOM_LEFT}, {@link #BOTTOM_RIGHT} {@link #TOP_CENTER}, * {@link #TOP_LEFT}, {@link #TOP_RIGHT} * @param aOffsetX the additional X offset from the alignment location to draw the child * within * the map view * @param aOffsetY the additional Y offset from the alignment location to draw the child * within * the map view */ public LayoutParams(final int width, final int height, final ILatLng aGeoPoint, final int aAlignment, final int aOffsetX, final int aOffsetY) { super(width, height); if (aGeoPoint != null) { this.geoPoint = aGeoPoint; } else { this.geoPoint = new LatLng(0, 0); } this.alignment = aAlignment; this.offsetX = aOffsetX; this.offsetY = aOffsetY; } /** * Since we cannot use XML files in this project this constructor is useless. Creates a new * set of layout parameters. The values are extracted from the supplied attributes set and * context. * * @param c the application environment * @param attrs the set of attributes fom which to extract the layout parameters values */ public LayoutParams(final Context c, final AttributeSet attrs) { super(c, attrs); this.geoPoint = new LatLng(0, 0); this.alignment = BOTTOM_CENTER; } public LayoutParams(final ViewGroup.LayoutParams source) { super(source); } }
// rather than call kevent(2) each time, we can likely optimize and // call it once for like 100 items fn next(&mut self) -> Option<Self::Item> { if !self.watcher.started { return None; } get_event(self.watcher, None) }
(AP) – A British opposition lawmaker says Parliament should stop the “madness” Iand overturn the result of a referendum calling for Britain to leave the European Union. Labour legislator David Lammy says Thursday’s national vote was non-binding and “our sovereign Parliament needs to now vote on whether we should quit the EU.” Wake up. We do not have to do this. We can stop this madness through a vote in Parliament. My statement below pic.twitter.com/V8f9Yo1TZd — David Lammy (@DavidLammy) June 25, 2016 He says some “leave” supporters now regret their votes and Parliament should vote on Britain’s EU membership. He said “we can stop this madness and bring this nightmare to an end. … Let us not destroy our economy on the basis of lies and the hubris of (‘leave’ leader) Boris Johnson.” Constitutional experts say Parliament cannot easily ignore the will of the people. Alan Renwick, deputy director of University College London’s Constitution Unit, says “in legal theory that is possible. In practice, that is absolutely not possible.”
/*! * @class StationArchiveLine "stationArchiveLine.hpp" "sff/hypoinverse2000/stationArchiveLine.hpp" * @brief Defines a year-2000-compatibile station archive line. * @copyright Ben Baker (University of Utah) distributed under the MIT license. */ class StationArchiveLine { public: StationArchiveLine(); StationArchiveLine(const StationArchiveLine &station); [[maybe_unused]] StationArchiveLine(StationArchiveLine &&station) noexcept; void unpackString(const std::string &line); [[nodiscard]] std::string packString() const noexcept; StationArchiveLine& operator=(const StationArchiveLine &station); StationArchiveLine& operator=(StationArchiveLine &&station) noexcept; ~StationArchiveLine(); void clear() noexcept; void setNetworkName(const std::string &network) noexcept; [[nodiscard]] std::string getNetworkName() const; [[nodiscard]] bool haveNetworkName() const noexcept; void setStationName(const std::string &station) noexcept; [[nodiscard]] std::string getStationName() const; [[nodiscard]] bool haveStationName() const noexcept; void setChannelName(const std::string &channel) noexcept; [[nodiscard]] std::string getChannelName() const; [[nodiscard]] bool haveChannelName() const noexcept; void setLocationCode(const std::string &location) noexcept; [[nodiscard]] std::string getLocationCode() const; [[nodiscard]] bool haveLocationCode() const noexcept; void setEpicentralDistance(double distance); [[nodiscard]] double getEpicentralDistance() const; [[nodiscard]] bool haveEpicentralDistance() const noexcept; void setAzimuth(double azimuth); [[nodiscard]] double getAzimuth() const; [[nodiscard]] bool haveAzimuth() const noexcept; void setTakeOffAngle(double angle); [[nodiscard]] double getTakeOffAngle() const; [[nodiscard]] bool haveTakeOffAngle() const noexcept; void setPPickTime(const SFF::Utilities::Time &pickTime) noexcept; [[nodiscard]] SFF::Utilities::Time getPPickTime() const; [[nodiscard]] bool havePPickTime() const noexcept; void setPRemark(const std::string &remark) noexcept; [[nodiscard]] std::string getPRemark() const; [[nodiscard]] bool havePRemark() const noexcept; void setFirstMotion(char firstMotion) noexcept; [[nodiscard]] char getFirstMotion() const; [[nodiscard]] bool haveFirstMotion() const noexcept; void setPResidual(double residual) noexcept; [[nodiscard]] double getPResidual() const; [[nodiscard]] bool havePResidual() const noexcept; void setPWeightCode(uint16_t weightCode) noexcept; [[nodiscard]] int getPWeightCode() const; [[nodiscard]] bool havePWeightCode() const noexcept; void setPWeightUsed(double weightUsed); [[nodiscard]] double getPWeightUsed() const; [[nodiscard]] bool havePWeightUsed() const noexcept; void setPDelayTime(double delay) noexcept; [[nodiscard]] double getPDelayTime() const; [[nodiscard]] bool havePDelayTime() const noexcept; void setPImportance(double importance); [[nodiscard]] double getPImportance() const; [[nodiscard]] bool havePImportance() const noexcept; void setSPickTime(const SFF::Utilities::Time &pickTime) noexcept; [[nodiscard]] SFF::Utilities::Time getSPickTime() const; [[nodiscard]] bool haveSPickTime() const noexcept; void setSRemark(const std::string &remark) noexcept; [[nodiscard]] std::string getSRemark() const; [[nodiscard]] bool haveSRemark() const noexcept; void setSResidual(double residual) noexcept; [[nodiscard]] double getSResidual() const; [[nodiscard]] bool haveSResidual() const noexcept; void setSWeightCode(uint16_t weightCode) noexcept; [[nodiscard]] int getSWeightCode() const; [[nodiscard]] bool haveSWeightCode() const noexcept; void setSWeightUsed(double weightUsed); [[nodiscard]] double getSWeightUsed() const; [[nodiscard]] bool haveSWeightUsed() const noexcept; void setSDelayTime(double correction) noexcept; [[nodiscard]] double getSDelayTime() const; [[nodiscard]] bool haveSDelayTime() const noexcept; void setSImportance(double importance); [[nodiscard]] double getSImportance() const; [[nodiscard]] bool haveSImportance() const noexcept; void setAmplitudeMagnitude(double magnitude) noexcept; [[nodiscard]] double getAmplitudeMagnitude() const; [[nodiscard]] bool haveAmplitudeMagnitude() const noexcept; void setAmplitudeMagnitudeWeightCode(int code); [[nodiscard]] int getAmplitudeMagnitudeWeightCode() const; [[nodiscard]] bool haveAmplitudeMagnitudeWeightCode() const noexcept; void setAmplitudeMagnitudeLabel(char label); [[nodiscard]] char getAmplitudeMagnitudeLabel() const; [[nodiscard]] bool haveAmplitudeMagnitudeLabel() const noexcept; void setPeriodOfAmplitudeMeasurement(double period); [[nodiscard]] double getPeriodOfAmplitudeMeasurement() const; [[nodiscard]] bool havePeriodOfAmplitudeMeasurement() const noexcept; void setDurationMagnitude(double magnitude) noexcept; [[nodiscard]] double getDurationMagnitude() const; [[nodiscard]] bool haveDurationMagnitude() const noexcept; void setDurationMagnitudeWeightCode(int code); [[nodiscard]] int getDurationMagnitudeWeightCode() const; [[nodiscard]] bool haveDurationMagnitudeWeightCode() const noexcept; void setDurationMagnitudeLabel(char label); [[nodiscard]] char getDurationMagnitudeLabel() const; [[nodiscard]] bool haveDurationMagnitudeLabel() const noexcept; void setCodaDuration(double duration); [[nodiscard]] double getCodaDuration() const; [[nodiscard]] bool haveCodaDuration() const noexcept; void setDataSourceCode(char code); [[nodiscard]] char getDataSourceCode() const; [[nodiscard]] bool haveDataSourceCode() const noexcept; void setAmplitude(double amplitude); [[nodiscard]] double getAmplitude() const; [[nodiscard]] bool haveAmplitude() const noexcept; void setAmplitudeUnits(AmplitudeUnits units) noexcept; [[nodiscard]] AmplitudeUnits getAmplitudeUnits() const; [[nodiscard]] bool haveAmplitudeUnits() const noexcept; private: class StationArchiveLineImpl; std::unique_ptr<StationArchiveLineImpl> pImpl; }
<gh_stars>0 package main import ( "encoding/json" "fmt" "log" "net/http" "path/filepath" "sort" "strconv" "strings" "github.com/go-rod/rod/lib/utils" ) const mirror = "https://registry.npmmirror.com/-/binary/chromium-browser-snapshots/" func main() { list := getList(mirror) revLists := [][]int{} for _, os := range list { revList := []int{} for _, s := range getList(mirror + os + "/") { rev, err := strconv.ParseInt(s, 10, 32) if err != nil { log.Fatal(err) } revList = append(revList, int(rev)) } sort.Ints(revList) revLists = append(revLists, revList) } rev := largestCommonRevision(revLists) if rev < 969819 { utils.E(fmt.Errorf("cannot match version of the latest chromium from %s", mirror)) } out := utils.S(`// generated by "lib/launcher/revision" package launcher // DefaultRevision for chromium const DefaultRevision = {{.revision}} `, "revision", rev, ) utils.E(utils.OutputFile(filepath.FromSlash("lib/launcher/revision.go"), out)) } func getList(path string) []string { res, err := http.Get(path) utils.E(err) defer func() { _ = res.Body.Close() }() var data interface{} err = json.NewDecoder(res.Body).Decode(&data) utils.E(err) list := data.([]interface{}) names := []string{} for _, it := range list { name := it.(map[string]interface{})["name"].(string) names = append(names, strings.TrimRight(name, "/")) } return names } func largestCommonRevision(revLists [][]int) int { sort.Slice(revLists, func(i, j int) bool { return len(revLists[i]) < len(revLists[j]) }) shortest := revLists[0] for i := len(shortest) - 1; i >= 0; i-- { r := shortest[i] isCommon := true for i := 1; i < len(revLists); i++ { if !has(revLists[i], r) { isCommon = false break } } if isCommon { return r } } return 0 } func has(list []int, i int) bool { index := sort.SearchInts(list, i) return index < len(list) && list[index] == i }
Deposition of DNA-functionalized gold nanospheres into nanoporous surfaces. We report the deposition of DNA-conjugated gold nanospheres into arrays of surface nanopores obtained from hexagonally ordered thin polystyrene-b-poly(methyl methacrylate) (PS-b-PMMA) diblock copolymer films on silicon. The deposition occurs spontaneously from aqueous solution and is driven by either electrostatic interactions or specific DNA hybridization events between the DNA nanospheres and the surface nanopores. To mitigate this spontaneous deposition, we have chemically modified the nanopores with either positively charged aminosilanes or oligonucleotide probe sequences. The deposition of DNA nanospheres into the surface nanopores was characterized by atomic force microscopy (AFM) and X-ray photoelectron spectroscopy (XPS). We have observed preferential immobilization of individual DNA nanospheres within the nanopores, based on the size matching between the two entities. The inclusion density and selectivity of DNA nanosphere deposition into the surface nanopores was found to depend predominantly on the methods through which the nanoporous surfaces were prepared and chemically functionalized.
/** * @author Elly Kitoto (Nerdstone) */ @RunWith(RobolectricTestRunner.class) @Config(application = TestApplication.class, shadows = {CustomFontTextViewShadow.class}) public class BaseFamilyProfileActivityFragmentTest { private BaseFamilyProfileActivityFragment familyProfileActivity; @Before public void setUp() { MockitoAnnotations.initMocks(this); familyProfileActivity = new BaseFamilyProfileActivityFragmentShadow(); BaseFamilyProfileActivityPresenter presenter = spy(new BaseFamilyProfileActivityPresenter(Mockito.mock(FamilyProfileActivityContract.View.class), Mockito.mock(FamilyProfileActivityContract.Model.class), null, "familybaseid")); doNothing().when(presenter).initializeQueries(anyString()); Whitebox.setInternalState(familyProfileActivity, "presenter", presenter); AppCompatActivity activity = Robolectric.buildActivity(AppCompatActivity.class).create().start().get(); Whitebox.setInternalState(familyProfileActivity, "searchView", new EditText(activity)); activity.setContentView(R.layout.activity_family_profile); activity.getSupportFragmentManager().beginTransaction().add(familyProfileActivity, "BaseFamilyProfileActivityFragment").commit(); } @Test public void getMainCondition() { assertEquals(familyProfileActivity.getMainCondition(), " object_relational_id = 'familybaseid' and date_removed is null "); } @Test public void getDefaultSortQuery() { assertEquals(familyProfileActivity.getDefaultSortQuery(), "dod, dob ASC "); } @Test public void setUniqueID() { familyProfileActivity.setUniqueID("unique"); assertEquals(familyProfileActivity.searchView.getText().toString(), "unique"); } @Test public void presenter() { assertNotNull(familyProfileActivity.presenter()); } }
Increased Urge to Gamble Following Near-Miss Outcomes May Drive Purchasing Behaviour in Scratch Card Gambling Previous research into scratch card gambling has highlighted the effects of these games on players’ arousal and affective states. Specifically, near-miss outcomes in scratch cards (uncovering 2 of 3 needed jackpot symbols) have been associated with high levels of physiological and subjective arousal and negative emotional evaluations, including increased frustration. We sought to extend this research by examining whether near-misses prompted increases in gambling urge, and the subsequent purchasing of additional scratch cards. Participants played two scratch cards with varying outcomes with half of the sample experiencing a near-miss for the jackpot prize, and the other half experiencing a regular loss. Players rated their urge to continue gambling after each game outcome, and following the initial playing phase, were then able to use their winnings to purchase additional cards. Our results indicated that near-misses increased the urge to gamble significantly more than regular losses, and urge to gamble in the near-miss group was significantly correlated with purchasing at least one additional card. Although some players in the loss group purchased another card, there was no correlation between urge to gamble and purchasing in this group. Additionally, participants in the near-miss group who purchased additional cards reported higher levels of urge than those who did not purchase more cards. This was not true for the loss group: participants who experienced solely losing outcomes reported similar levels of urge regardless of whether or not they purchased more scratch cards. Despite near-misses’ objective status as monetary losses, the increased urge that follows near-miss outcomes may translate into further scratch card gambling for a subset of individuals . Introduction Scratch cards (also referred to as ''instant tickets'' or ''instant win'' games) are a ubiquitous form of gambling in our society. Many different types of scratch-card games exist, but in general, the goal of these games is to uncover matching symbols by removing an opaque film covering. Players typically remove this covering by ''scratching'' it off with the aid of a small coin. Depending on what is matched or uncovered, a certain prize may be attained. In Canada, scratch cards range in price from $1.00 to $30.00, with the majority of cards being in the $3.00 to $10.00 range (M = $5.81; calculated from OLG 2016). The payback percentages of scratch-card games in Ontario range from 59.97 to 70.39% (M = 65.73%; calculated from OLG 2016). Researchers have long focused on the use of these products by youth, with lottery products being a highly sought-after type of gambling for this demographic (Griffiths 2000;Felsher et al. 2004;Donati et al. 2013;Wood and Griffiths 1998). A recently published study of Canadian youth aged 13-19 found that scratch cards were the most common type of regulated gambling behaviour engaged in by these teenagers, with 13.8% of the sample endorsing participation (Elton-Marshall et al. 2016). In light of these findings, researchers have begun to examine in more detail the types of individuals who play these games and the experiences associated with this form of gambling. In a large Canadian survey study (Short et al. 2015) the amount of scratch-card gambling that participants engaged in was negatively correlated with level of education; no other demographic variables (e.g., age, sex, marital status) were meaningfully correlated with frequency of scratch-card play. Another study found that Ontario baby boomers who played scratch cards reported participating in more forms of gambling than those in the cohort who did not report playing these games (Papoff and Norris 2009). These authors also found that at-risk/problem gambling prevalence was significantly higher among respondents who purchased scratch cards compared to those who did not. Similarly, a large, 5-year longitudinal study of gambling behaviour in Canada found that instant-winticket gambling (which includes scratch cards) was predictive of problem gambling over time (Williams et al. 2015). Although population estimates for pathological scratch card gambling are low (DeFuentes-Merillas et al. 2003), case study reports of pathological scratch card gamblers do exist (Raposo-Lima et al. 2015), demonstrating that for a small portion of gamblers, these games may be associated with problematic use. Although studies examining player characteristics are informative, examining specific structural aspects of the games themselves may also help to elucidate how these games affect the people who play them. Such knowledge, in turn may provide insight into why these games are so popular with the general population. While typically seen as an innocuous type of gambling, scratch cards nevertheless bear many similarities to more addictive forms of gambling. Specifically, these games share many structural characteristics with slot machines. These include intermittent payout intervals, rapid event frequency, the opportunity for continual play, and near-miss outcomes (Griffiths 1995a(Griffiths , b, 1997Wood and Griffiths 1998). Multiple authors have commented on these resemblances, referring to these games as slot machines in a paper form (Griffiths 1995b(Griffiths , 1997Ariyabuddhiphongs 2011), and consequently a potentially ''hard'' (as opposed to a ''softer'', more innocuous) form of gambling (Griffiths 2002). Of the many surface similarities between slot machines and scratch cards, arguably the most striking are near-misses. Since near-misses have been most rigorously investigated in the context of slot machine play, we turn first to this literature to inform our predictions. Slot machines have been associated with high levels of problem gambling (Dowling et al. 2005) for quite some time, and across a wide range of countries (Fisher and Griffiths 1995). Tellingly, the Ontario Problem Gambling Helpline receives more calls identifying slot machine gambling as a concern compared to any other gambling type (Counter and Davey 2006). As previously mentioned, near-misses are a ubiquitous feature of many slot machine games. Reid (1986) defined a near-miss as an outcome that comes close to a win but falls short. A classic near-miss in a slot machine game consists of two of the required jackpot symbols landing on the payline, with the third landing just below or above. In a scratch card game, a near-miss consists of players getting two of the three symbols required to win a jackpot prize, but missing the third. These outcomes create the appearance of coming close to a jackpot, but are nonetheless a losing outcome, in that there is no monetary gain for the player. The effects of near-miss outcomes on gamblers have been particularly well documented in slot machine research, with a wealth of studies describing the effects of these outcomes in human participants (Clark et al. 2013;Dixon et al. 2013;Habib and Dixon 2010), rats (Winstanley et al. 2011) and pigeons (Scarf et al. 2011) Many studies of human gamblers report that slot machine near-misses increase players' physiological arousal, as measured by skin conductance (or electrodermal activity) and heart rate (Clark et al. 2012(Clark et al. , 2013Dixon et al. 2011Dixon et al. , 2013. Increased arousal for nearmiss outcomes may be problematic, as heightened physiological arousal has been identified as a key reinforcer of gambling behaviour (Brown 1986). As such, if arousal is reinforcing, and near-misses trigger an increase in arousal, then it could be that players are being reinforced for losing. Heightened arousal that accompanies a lack of goal attainment can result in a paradoxically frustrating, yet highly motivating subjective experience. In line with this notion, some authors have postulated that heightened arousal indicates enhanced motivation (Bradley and Lang 2007). Consistent with the notion that near-misses increase motivation, near-miss outcomes in slot machines have been shown to prolong the amount of time spent gambling (Côté et al. 2003;Kassinove and Schare 2001). Neuroimaging studies allow insight into the mechanisms behind these behavioural effects. Near-miss outcomes in a simulated slot machine task have been found to activate the ventral striatum, an area associated with reward processing (Clark et al. 2009), despite their objective status as a losing outcome. On this same task, participants rated near-miss outcomes as unpleasant, yet still motivating when they had personal control over their wager. These results highlight the paradoxically motivating yet aversive nature of near-miss outcomes, and the behavioural consequences that they have for the gambler (i.e. increased money and time spent gambling). Indeed, research on approach motivation suggests that motivated behaviour can occur in response to negative stimuli (Harmon-Jones et al. 2013). Thus near-misses may activate the ''wanting'' as opposed to the hedonic ''liking'' facet of the reward system (Berridge 2007). Research regarding near-misses in slot machines has prompted us to investigate the effects of near-miss outcomes in scratch cards. We found that players showed heightened physiological arousal as they uncovered the symbols that led to small wins and near-miss outcomes. We showed such effects using both skin conductance (Stange et al. 2016b) and heart rate changes (Stange et al. 2016a). Additionally, in these studies near-miss outcomes were consistently rated as the most frustrating and emotionally negative outcome, whereas wins were rated as the most positive and least frustrating. Importantly, scratch card nearmisses also appeared to increase the urge to continue gambling. When student scratch-card gamblers were polled immediately following each outcome, urge to continue gambling was as elevated following near-miss outcomes as it was for small wins of $5.00 (Stange et al. 2016a). These results suggest that scratch card near-misses, even though they are monetary losses, may be capable of encouraging further gambling behaviour much like their slot machine equivalents. Although we have shown increases in arousal, frustration, negative affect, and subjective urge following scratch card near-misses, it remains unknown whether or not experiencing these outcomes would actually prolong gambling behaviour, as in slot machines. In this study our two overarching goals were to: (1) replicate our previous finding that near-miss outcomes trigger increases in the urge to gamble, and (2) assess whether near-misses and their associated heightened urge would prompt participants to actually purchase more scratch cards. We had participants play two custom-made scratch cards. On the first card (Card 1), all participants experienced a loss, a small win and another loss. On the second card (Card 2), one group of participants experienced three consecutive losing games, while the other group experienced two losses, followed by a near miss. Participants were asked to give ratings of their urge to gamble after each outcome. Following game play, participants were given an opportunity to use their winnings (from Card 1) to purchase additional cards. We predicted that participants would experience increases in the urge to gamble following both winning and near-miss outcomes (a replication of our previous findings). We also predicted that participants who experienced a near-miss outcome would be more likely than participants who experienced only losses to use their winnings to purchase additional cards. Finally, we predicted that this purchasing behaviour would be attributable to increases in the urge to continue gambling following the near-miss outcome, as compared to regular losing outcomes. Method Participants Participants gave informed written consent before the study began, and all procedures were approved by the University of Waterloo's Office of Research Ethics. Sixty-five undergraduate students were recruited from the University of Waterloo's Research Experience Group in exchange for course credit. All participants were prescreened to ensure that they were at least 18 years of age (the legal age to purchase scratch cards in Ontario), had experience playing scratch cards, and were not currently in or seeking treatment for problem gambling. The average age of the participants was 19.97 years (SD = 1.57), and the sample was predominantly female (51 females, 14 males). One participant was excluded from all analyses due to a procedural error, and six were excluded due to incomplete data (see ''Analytical Strategy'' section). Problem Gambling Severity Index The Problem Gambling Severity Index (PGSI) is a subscale of the Canadian Problem Gambling Index (CPGI), a well-validated screen for gambling problems and overall problem gambling severity in the general population (Ferris and Wynne 2001). This measure was used to characterize our sample; no specific hypotheses concerning problem gambling status were made. Gambling Related Cognitions Scale The Gambling Related Cognitions Scale (GRCS; Raylu and Oei 2004) was administered for purposes peripheral to this study and will not be discussed further. Measure of Gambling Urge To assess participants' urge to gamble, we used the following item: ''How would you rate your desire to gamble on a scale from 0 (no desire to gamble) to 100 (overwhelming desire to gamble)?'' (Young et al. 2008). Participants responded by moving a cursor along a linear sliding scale (ranging from 0 to 100) to the location that best reflected their urge to gamble. Scratch Cards The custom made scratch cards were modeled after Cash for Life, a scratch card game currently available in Ontario. In Cash for Life, the player is presented with game-play boxes containing symbols denoting various monetary amounts. To win a prize, a player must uncover three matching symbols within one game. The player then wins the amount specified by the symbol (i.e. three matching $5.00 symbols would mean a win of $5.00). Our game utilized a similar game structure and design in that three matching symbols were needed to win a prize. The cards in this study (described in detail below) were similar in design to those used in previous studies ( Fig. 1; see also Stange et al. 2016a, b). Fig. 1 ''Cash for a Month'' scratch card. The custom made scratch cards employed in this study were designed to mimic a popular scratch card available in Ontario. This card contains two losses (games 1 and 2) and a near-miss for the top prize (game 3) Procedure Participants were brought into the laboratory, where they signed an informed consent letter. Participants then completed the PGSI (Ferris and Wynne 2001) and demographic items on a laptop computer. Following this, participants were told that the game they would be playing was called ''Cash for a Month'', and that it was similar to existing scratch card games available at Ontario retailers. Using an enlarged example of one of the cards, the experimenter showed participants that each scratch card contained three games, and within each game, there were six symbols (Fig. 1). The experimenter explained that the goal of the scratch card game was to find three matching symbols within any one of the games on the card; if participants found three matching symbols, they won the corresponding prize. Participants were instructed to uncover the symbols one game at a time, and to scratch each game from left to right, and top to bottom. Participants were told to rate their desire to continue gambling after each game (three ratings per card) using a tablet computer that was provided (Lenovo Ideatab, model A1000). The experimenter also explained that to win the top prize of ''Cash for a Month'' (corresponding to $25.00 a week for 4 weeks, $100.00 total) they would need to uncover three ''MONTH'' symbols within one game (analogous to the ''LIFE'' symbol in Cash for Life). Participants were also told that they would pick a scratch card to play from a tray of approximately 100 scratch cards, and that one of the cards in the tray was the top prize winning card. They were reminded that the odds of winning were approximately 1 in 100 and then told that the top prize had been won in past studies. Importantly, participants were told that the first two cards that they would be playing were free, but that if they won anything on those two cards, they would be able to use their winnings to purchase additional cards later on in the study. Participants were asked if they had any questions about the game structure or rules before continuing. The experimenter then had the participant choose the scratch cards that they would play during the experiment. Participants chose their cards from a display case similar to those found in Ontario lottery retailers and identical to what has been used in previous studies (Stange et al. 2016a, b). The scratch cards were arranged in two trays to facilitate our between-subjects manipulation. In the first tray of cards, all cards contained games with a loss, a small win of $5.00, and another loss. The single top prize card was also included in this tray. The card that participants chose from the second tray determined the condition to which the participant was randomly assigned (half loss cards and half near-miss cards). Participants in the loss group chose a card in which all three games were regular losses. Those in the near-miss group chose a card that contained a loss, a second loss, and then a near-miss (two of the three symbols needed to win the jackpot prize). After choosing their cards, the experimenter placed the scratch card in a secure scratching platform (see Stange et al. 2016b for a more detailed description). Participants played the three games on that card, filled out their urge ratings following each game, and repeated this process for their second card. Once they had completed scratching both cards, the experimenter gave the participant their winnings ($5.00) and told them they could purchase additional scratch cards to play if they wished. The experimenter explained that each card cost $2.00, and would be chosen from another display case, but the overall odds of winning the top prize remained unchanged. If participants chose to play another card, the experimenter kept $2.00 of the participant's overall winnings (leaving the participant with $3.00), and let the participant choose another card. Participants then completed the scratch card games and corresponding urge ratings in a similar manner as the first two cards. Any additional cards that participants purchased contained only regular losses comprised of symbol arrangements that participants had not encountered on previous cards. Participants who played a third card were given the option to purchase a fourth card (a cost of $2.00, leaving the participant with $1.00). In sum, if participants chose to not purchase, they left with $5.00, purchasing one additional card meant an overall gain of $3.00, and purchasing two cards left the participant with $1.00. No participants in the current sample won the top prize of ''Cash for a Month''. Following the entire game-play portion of the study, participants completed the GRCS. After completing the survey, participants were given their winnings, a feedback letter, and responsible gambling resources. PGSI Scores on the PGSI indicated that 35 participants were non-problem gamblers (score of 0), 27 were low-risk (score of 1-4), 1 was moderate risk (score of 5-7), and 1 participant was a problem gambler (score above 8; Currie et al. 2013). PGSI status was not analyzed further, primarily since no specific predictions were made about the influence of problem gambling status on our dependent variables, but also because of low numbers of problematic gamblers. Purchasing Behaviour Considering all participants, only 31.3% (n = 20) of the total sample of participants (N = 64) elected to purchase at least one additional scratch card with their winnings. In the loss condition, 25.8% (n = 8) of participants purchased at least one additional card. In the near-miss condition, 36.4% (n = 12) of participants purchased at least one additional card. A Chi-square test of independence revealed that these frequencies were not significantly different, X 2 (1, N = 64) = .829, p = .362. Analytical Strategy Of the 65 participants recruited, 6 participants were excluded from any data analyses involving urge to continue gambling ratings due to incomplete or missing urge evaluations. Mean ratings of urge to continue gambling were calculated following each outcome, and compared across groups (loss vs. near-miss). Given the nature of the design (Card 1 contained a loss, a small win, and a loss; Card 2 contained two losses with the third game dependent on condition), we analyzed the cards separately. For each card we conducted a mixed analysis of variance (ANOVA) with game as the repeated factor, and group as the between-subjects factor. In the case of tests where sphericity assumptions were violated, corrected degrees and freedom and F values are reported. Post-hoc comparisons were conducted using t tests, and were evaluated at a/m (Bonferroni correction) to control for familywise error rate. Card 1 For Card 1 (loss, small win, loss), this analysis indicated a significant main effect of game, F(2, 112) = 35.00, p \ .001, g p 2 = .385. Collapsing across group, post hoc analyses (evaluated at a/2 = .025) indicated that the win triggered higher urge ratings than either the loss preceding it t(57) = 7.65, p \ .001, or following it, t(57) = 6.65, p \ .001. Importantly, the main effect of group (loss, near-miss) was not significant, F(1, 56) = .001, p = .974. Therefore, there were no pre-existing differences in urge to continue gambling between the groups. The mean urge ratings for Card 1 are shown in Fig. 2a. Card 2 For Card 2, there was no main effect of game, F(1.78, 99.85) = 1.04, p = .35, g p 2 = .018. There was a main effect of group, F(1, 56) = 4.07, p = .049, g p 2 = .068. The interpretation of these main effects were qualified by a significant interaction between game number and group, F(1.78, 99.85) = 18.96, p \ .001, g p 2 = .253. This interaction is depicted Fig. 2b. Post hoc t tests (evaluated at a/3 = .017) indicated there were no significant differences between the groups for the first loss, t(56) = .15, p = .88, or the second loss, t(56) = 1.26, p = .21 but urge ratings at game 3 were significantly higher for those exposed to the near-miss than those exposed to the loss, t(56) = 4.04, p \ .001. Relationship Between Urge and Purchase Status To assess whether different scratch-card outcomes in the very last game on Card 2 (loss or near-miss) fostered differences in post-game urge and subsequent scratch card purchasing behaviour, we conducted point-biserial correlations separately for each group (loss, near- Fig. 2 a Card 1 urge ratings. Mean urge to continue gambling ratings for participants in the loss and nearmiss conditions. Outcomes 1 and 3 were losses, outcome 2 was a small win of $5.00. b Card 2 urge ratings. Mean urge to continue gambling ratings for participants in the loss and near-miss conditions. Outcomes 1 and 2 were losses, outcome 3 was a loss for those in the loss condition, but a near-miss for the top prize (Cash for a Month) for those in the near-miss condition. Error bars are ±1 SEM miss), correlating post-outcome urge with purchasing behaviour (non-purchasers coded as 0, purchasers as 1). For the near-miss group, urge ratings immediately following the nearmiss were significantly positively correlated with purchasing status, r pb = .49, n = 29, p = .007. For the loss group, however, urge ratings following the loss showed no relationship with purchasing status, r pb = -.018, n = 29, p = .926. Using Fisher's r-to-z transformations, these correlations were significantly different from each other, Z = 1.99, p = .046. As a supplementary means of assessing whether the near-miss-induced elevations in urge actually triggered purchasing behaviour, we compared the urge levels of purchasers to non-purchasers. We reasoned that if near-misses triggered increases in urge for at least some participants, that those participants should be the ones who would be most likely to purchase additional cards. If so, then purchasers should show higher urge levels than nonpurchasers. A between-subjects ANOVA, with group and purchase status as the betweensubjects variables indicated a significant interaction between group and purchase status, F(1, 54) = 4.90, p = .031, g p 2 = .083. Follow-up t tests (evaluated at a/2 = .025) indicated that there were no significant differences in urge between participants who did and did not purchase additional cards in the loss group, t(27) = .09, p = .926. However, for participants in the near-miss condition, purchasers showed significantly higher urge ratings than those who did not purchase additional cards, t(27) = 2.92, p = .007. Table 1 displays the means and standard deviations of urge to continue gambling for participants in each condition. Discussion We ran an experiment to determine whether near-misses would trigger increases in gambling urge, and whether this increased desire to continue gambling would translate into participants using their winnings to purchase additional scratch cards. Near-misses dramatically increased the urge to gamble-a finding that replicates our previous study on scratch card players (Stange et al. 2016a). Figure 2a shows that the random assignment of players into the two groups was effective-there were no differences between the urge ratings of the groups prior to the key manipulation (the introduction of the near-miss for one of the groups). Figure 2b shows that the groups continued to show similar urge trajectories for the two losses on Card 2. The groups only diverged following the third game when the key manipulation was delivered (a near-miss for half of the participants, and another loss for the other half of the participants). Those who experienced a loss in their third game showed a decline in their urge to gamble, whereas those who experienced a near miss showed a clear spike in gambling urge. In sum, the finding that scratch card near-misses trigger increases in the urge to gamble is a robust one that replicates across studies using different procedures (e.g., the within-subjects design in Stange et al. 2016a, and the between-subjects design employed in the present study). The effects of near-misses on urge in various gambling forms is at first glance counterintuitive, as they are clearly a monetary loss, yet still enhance the motivation to play. Classic interpretations of near-miss effects derived from investigations of slot-machine play focus on the arousing yet frustrating properties of these outcomes. Slot machine nearmisses are consistently reported as being unpleasant outcomes (Clark et al. 2009) that increase physiological arousal (Dixon et al. 2011) and frustration (Dixon et al. 2013). Despite such negative affect, they have been found to prolong slot-machine play (Côté et al. 2003;Kassinove and Schare 2001). Thus a theorized chain of events is as follows: when a player experiences a near-miss, frustration ensues due to having just missed the jackpot prize. This is coupled with an increase in physiological arousal and negative subjective evaluations. Due to this state of heightened frustration and physiological arousal, players are eager to move on to their next available game as quickly as possible leading to increases in the urge to continue gambling. This urge then translates, for at least some players, into prolonged or additional gambling behaviour. In this and previous studies we provide converging evidence for this chain of events in scratch-card play. Near-miss outcomes in scratch cards are associated with increased physiological and subjective arousal, and heightened subjective negative emotion and frustration (Stange et al. 2016a, b). Yet, regardless of their objective monetary status, nearmisses have distinct motivational consequences for the player. In the current study they served to increase the urge to gamble compared to those who were exposed to a standard losing outcome. Our second prediction was that the spikes in urge caused by the near-miss would trigger the purchase of additional scratch cards. Within the group exposed to the near-miss, those who purchased more cards appeared to be those who experienced this spike in urge. The purchasers showed far higher urge ratings following the near-miss than the non-purchasers. Furthermore, there was a positive point-biserial correlation between participants' ratings of their urge to gamble following near-misses and their purchasing behaviour. This lends support to the idea that near-misses trigger increases in the urge to gamble, which can in turn prompt some players to buy more cards. An unexpected finding concerned those in the loss group. Despite three successive losses in Card 2, eight participants still purchased at least one more card. In the loss group, urge to gamble was significantly lower than in the near-miss group, and (unlike in the nearmiss group) there were no differences in the urge ratings between purchasers and nonpurchasers. Additionally, urge ratings following losses were uncorrelated with purchasing behaviour. Thus despite not showing an increase in urge to continue gambling, a small subset of people in the loss group still chose to purchase additional cards. This puzzling finding hints at the importance of considering other individual differences among players and how these may relate to purchasing behaviours. Some candidate variables that may be informative include impulsivity (MacLaren et al. 2012) and the closely related concept of delay discounting (Dixon et al. 2003;Callan et al. 2011) in which deficits are strongly related to gambling behaviour. Research examining differences in delay discounting have shown that participants who chose to purchase scratch cards from an experimenter in an unrelated experimental context discounted delayed rewards at a steeper rate than those who did not purchase scratch cards (Callan et al. 2011). The inability of some individuals to delay larger, later rewards and instead engage in less-rewarding behaviour in the short term may explain some differences in purchasing behaviour within the current study. Individual differences like delay discounting could also potentially account for why some participants with low urges to gamble nonetheless purchased an extra card (i.e. the purchasers in the loss group), and might also explain why some participants with high urge to gamble following a near-miss might have been able to refrain from making a purchase (they may have been able to discount the slim possibility of earning money immediately, for the surety of having an extra $5.00 to spend that evening). Although not the manipulation of primary interest, the data for Card 1 clearly shows that small wins in scratch card play trigger increases in the urge to gamble-a finding that replicates previous results from our laboratory. For both groups urge was relatively low following the first loss, then rose dramatically following the small win, and dropped once again following another loss. When considering the effects of small wins, it is important to note that the most common outcome in scratch card play is a loss, but if a prize is won, the most common prize amount in virtually all scratch card games is in fact not a true win, but rather what in gambling parlance is called a ''push''. This outcome is a type of ''win'' in which the player gains an amount equal to that of their original bet. It is not unreasonable to assume that the majority of these push outcomes may be simply ''cashed in'' for another card by the player, as they are of equivalent value. Therefore, future research would benefit from designs that directly compare the effects of pushes and true wins. Limitations While we tried to accurately approximate real gambling behaviour, it should be reiterated that participants were not gambling with their own money and thus could not truly lose within the constraints of the experimental design. Although our participants all had experience playing scratch cards, it's possible that different results would be obtained with a community-based sample of more experienced scratch card players, or more experienced gamblers in general. Future studies should attempt to clarify the roles of experience, frequency of play, and gambling status on near-miss effects. Another limitation of the current study is the relatively small jackpot prize that participants could win, in contrast to real scratch card jackpots (which range from tens of thousands to millions of dollars). However, we believe this factor would only serve to attenuate responses to the different outcome types. In this light, the robust effect of near-misses on urge that we obtained may be viewed as a conservative estimate of the effects that may occur in real scratch card games. Conclusion Overall, the results of this study highlight the potentially problematic influence of nearmiss outcomes in scratch cards on player behaviour and motivational state. Individuals who experienced near-miss outcomes showed a heightened motivation to gamble. The players who showed the largest urges following the near-miss were those who chose to purchase additional scratch cards. Thus gambling urge appears to be a state related to purchasing behaviour, perhaps lying dormant until triggered by a specific game outcome, such as a near-miss. While some players seek additional gambling opportunities regardless of the outcomes they experience, for others, near-miss outcomes may be just enough to encourage further gambling behaviour through increases in subjective urge related to arousal and frustration.
/* * Copyright (C) ST-Ericsson SA 2012 * * Author: <NAME> <<EMAIL>>, * <NAME> <<EMAIL>> * for ST-Ericsson. * * License terms: * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/module.h> #include <linux/device.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/mutex.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "ux500_pcm.h" #include "ux500_msp_dai.h" #include "mop500_ab8500.h" #include "../codecs/ab8500-codec.h" #define TX_SLOT_MONO 0x0008 #define TX_SLOT_STEREO 0x000a #define RX_SLOT_MONO 0x0001 #define RX_SLOT_STEREO 0x0003 #define TX_SLOT_8CH 0x00FF #define RX_SLOT_8CH 0x00FF #define DEF_TX_SLOTS TX_SLOT_STEREO #define DEF_RX_SLOTS RX_SLOT_MONO #define DRIVERMODE_NORMAL 0 #define DRIVERMODE_CODEC_ONLY 1 /* Slot configuration */ static unsigned int tx_slots = DEF_TX_SLOTS; static unsigned int rx_slots = DEF_RX_SLOTS; /* Configuration consistency parameters */ static DEFINE_MUTEX(mop500_ab8500_params_lock); static unsigned long mop500_ab8500_usage; static int mop500_ab8500_rate; static int mop500_ab8500_channels; /* Clocks */ static const char * const enum_mclk[] = { "SYSCLK", "ULPCLK" }; enum mclk { MCLK_SYSCLK, MCLK_ULPCLK, }; static SOC_ENUM_SINGLE_EXT_DECL(soc_enum_mclk, enum_mclk); /* Private data for machine-part MOP500<->AB8500 */ struct mop500_ab8500_drvdata { /* Clocks */ enum mclk mclk_sel; struct clk *clk_ptr_intclk; struct clk *clk_ptr_sysclk; struct clk *clk_ptr_ulpclk; }; static inline const char *get_mclk_str(enum mclk mclk_sel) { switch (mclk_sel) { case MCLK_SYSCLK: return "SYSCLK"; case MCLK_ULPCLK: return "ULPCLK"; default: return "Unknown"; } } static int mop500_ab8500_set_mclk(struct device *dev, struct mop500_ab8500_drvdata *drvdata) { int status; struct clk *clk_ptr; if (IS_ERR(drvdata->clk_ptr_intclk)) { dev_err(dev, "%s: ERROR: intclk not initialized!\n", __func__); return -EIO; } switch (drvdata->mclk_sel) { case MCLK_SYSCLK: clk_ptr = drvdata->clk_ptr_sysclk; break; case MCLK_ULPCLK: clk_ptr = drvdata->clk_ptr_ulpclk; break; default: return -EINVAL; } if (IS_ERR(clk_ptr)) { dev_err(dev, "%s: ERROR: %s not initialized!\n", __func__, get_mclk_str(drvdata->mclk_sel)); return -EIO; } status = clk_set_parent(drvdata->clk_ptr_intclk, clk_ptr); if (status) dev_err(dev, "%s: ERROR: Setting intclk parent to %s failed (ret = %d)!", __func__, get_mclk_str(drvdata->mclk_sel), status); else dev_dbg(dev, "%s: intclk parent changed to %s.\n", __func__, get_mclk_str(drvdata->mclk_sel)); return status; } /* * Control-events */ static int mclk_input_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); struct mop500_ab8500_drvdata *drvdata = snd_soc_card_get_drvdata(card); ucontrol->value.enumerated.item[0] = drvdata->mclk_sel; return 0; } static int mclk_input_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); struct mop500_ab8500_drvdata *drvdata = snd_soc_card_get_drvdata(card); unsigned int val = ucontrol->value.enumerated.item[0]; if (val > (unsigned int)MCLK_ULPCLK) return -EINVAL; if (drvdata->mclk_sel == val) return 0; drvdata->mclk_sel = val; return 1; } /* * Controls */ static struct snd_kcontrol_new mop500_ab8500_ctrls[] = { SOC_ENUM_EXT("Master Clock Select", soc_enum_mclk, mclk_input_control_get, mclk_input_control_put), SOC_DAPM_PIN_SWITCH("Headset Left"), SOC_DAPM_PIN_SWITCH("Headset Right"), SOC_DAPM_PIN_SWITCH("Earpiece"), SOC_DAPM_PIN_SWITCH("Speaker Left"), SOC_DAPM_PIN_SWITCH("Speaker Right"), SOC_DAPM_PIN_SWITCH("LineOut Left"), SOC_DAPM_PIN_SWITCH("LineOut Right"), SOC_DAPM_PIN_SWITCH("Vibra 1"), SOC_DAPM_PIN_SWITCH("Vibra 2"), SOC_DAPM_PIN_SWITCH("Mic 1"), SOC_DAPM_PIN_SWITCH("Mic 2"), SOC_DAPM_PIN_SWITCH("LineIn Left"), SOC_DAPM_PIN_SWITCH("LineIn Right"), SOC_DAPM_PIN_SWITCH("DMic 1"), SOC_DAPM_PIN_SWITCH("DMic 2"), SOC_DAPM_PIN_SWITCH("DMic 3"), SOC_DAPM_PIN_SWITCH("DMic 4"), SOC_DAPM_PIN_SWITCH("DMic 5"), SOC_DAPM_PIN_SWITCH("DMic 6"), }; /* ASoC */ static int mop500_ab8500_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; /* Set audio-clock source */ return mop500_ab8500_set_mclk(rtd->card->dev, snd_soc_card_get_drvdata(rtd->card)); } static void mop500_ab8500_shutdown(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct device *dev = rtd->card->dev; dev_dbg(dev, "%s: Enter\n", __func__); /* Reset slots configuration to default(s) */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) tx_slots = DEF_TX_SLOTS; else rx_slots = DEF_RX_SLOTS; } static int mop500_ab8500_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct device *dev = rtd->card->dev; unsigned int fmt; int channels, ret = 0, driver_mode, slots; unsigned int sw_codec, sw_cpu; bool is_playback; dev_dbg(dev, "%s: Enter\n", __func__); dev_dbg(dev, "%s: substream->pcm->name = %s\n" "substream->pcm->id = %s.\n" "substream->name = %s.\n" "substream->number = %d.\n", __func__, substream->pcm->name, substream->pcm->id, substream->name, substream->number); /* Ensure configuration consistency between DAIs */ mutex_lock(&mop500_ab8500_params_lock); if (mop500_ab8500_usage) { if (mop500_ab8500_rate != params_rate(params) || mop500_ab8500_channels != params_channels(params)) { mutex_unlock(&mop500_ab8500_params_lock); return -EBUSY; } } else { mop500_ab8500_rate = params_rate(params); mop500_ab8500_channels = params_channels(params); } __set_bit(cpu_dai->id, &mop500_ab8500_usage); mutex_unlock(&mop500_ab8500_params_lock); channels = params_channels(params); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S32_LE: sw_cpu = 32; break; case SNDRV_PCM_FORMAT_S16_LE: sw_cpu = 16; break; default: return -EINVAL; } /* Setup codec depending on driver-mode */ if (channels == 8) driver_mode = DRIVERMODE_CODEC_ONLY; else driver_mode = DRIVERMODE_NORMAL; dev_dbg(dev, "%s: Driver-mode: %s.\n", __func__, (driver_mode == DRIVERMODE_NORMAL) ? "NORMAL" : "CODEC_ONLY"); /* Setup format */ if (driver_mode == DRIVERMODE_NORMAL) { fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CONT; } else { fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_GATED; } ret = snd_soc_runtime_set_dai_fmt(rtd, fmt); if (ret) return ret; /* Setup TDM-slots */ is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); switch (channels) { case 1: slots = 16; tx_slots = (is_playback) ? TX_SLOT_MONO : 0; rx_slots = (is_playback) ? 0 : RX_SLOT_MONO; break; case 2: slots = 16; tx_slots = (is_playback) ? TX_SLOT_STEREO : 0; rx_slots = (is_playback) ? 0 : RX_SLOT_STEREO; break; case 8: slots = 16; tx_slots = (is_playback) ? TX_SLOT_8CH : 0; rx_slots = (is_playback) ? 0 : RX_SLOT_8CH; break; default: return -EINVAL; } if (driver_mode == DRIVERMODE_NORMAL) sw_codec = sw_cpu; else sw_codec = 20; dev_dbg(dev, "%s: CPU-DAI TDM: TX=0x%04X RX=0x%04x\n", __func__, tx_slots, rx_slots); ret = snd_soc_dai_set_tdm_slot(cpu_dai, tx_slots, rx_slots, slots, sw_cpu); if (ret) return ret; dev_dbg(dev, "%s: CODEC-DAI TDM: TX=0x%04X RX=0x%04x\n", __func__, tx_slots, rx_slots); ret = snd_soc_dai_set_tdm_slot(codec_dai, tx_slots, rx_slots, slots, sw_codec); if (ret) return ret; return 0; } static int mop500_ab8500_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; mutex_lock(&mop500_ab8500_params_lock); __clear_bit(cpu_dai->id, &mop500_ab8500_usage); mutex_unlock(&mop500_ab8500_params_lock); return 0; } struct snd_soc_ops mop500_ab8500_ops[] = { { .hw_params = mop500_ab8500_hw_params, .hw_free = mop500_ab8500_hw_free, .startup = mop500_ab8500_startup, .shutdown = mop500_ab8500_shutdown, } }; int mop500_ab8500_machine_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct device *dev = rtd->card->dev; struct mop500_ab8500_drvdata *drvdata; int ret; dev_dbg(dev, "%s Enter.\n", __func__); /* Create driver private-data struct */ drvdata = devm_kzalloc(dev, sizeof(struct mop500_ab8500_drvdata), GFP_KERNEL); snd_soc_card_set_drvdata(rtd->card, drvdata); /* Setup clocks */ drvdata->clk_ptr_sysclk = clk_get(dev, "sysclk"); if (IS_ERR(drvdata->clk_ptr_sysclk)) dev_warn(dev, "%s: WARNING: clk_get failed for 'sysclk'!\n", __func__); drvdata->clk_ptr_ulpclk = clk_get(dev, "ulpclk"); if (IS_ERR(drvdata->clk_ptr_ulpclk)) dev_warn(dev, "%s: WARNING: clk_get failed for 'ulpclk'!\n", __func__); drvdata->clk_ptr_intclk = clk_get(dev, "intclk"); if (IS_ERR(drvdata->clk_ptr_intclk)) dev_warn(dev, "%s: WARNING: clk_get failed for 'intclk'!\n", __func__); /* Set intclk default parent to ulpclk */ drvdata->mclk_sel = MCLK_ULPCLK; ret = mop500_ab8500_set_mclk(dev, drvdata); if (ret < 0) dev_warn(dev, "%s: WARNING: mop500_ab8500_set_mclk!\n", __func__); drvdata->mclk_sel = MCLK_ULPCLK; /* Add controls */ ret = snd_soc_add_card_controls(rtd->card, mop500_ab8500_ctrls, ARRAY_SIZE(mop500_ab8500_ctrls)); if (ret < 0) { pr_err("%s: Failed to add machine-controls (%d)!\n", __func__, ret); return ret; } ret = snd_soc_dapm_disable_pin(&codec->dapm, "Earpiece"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Speaker Left"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Speaker Right"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineOut Left"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineOut Right"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Vibra 1"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Vibra 2"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Mic 1"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Mic 2"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineIn Left"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineIn Right"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 1"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 2"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 3"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 4"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 5"); ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 6"); return ret; } void mop500_ab8500_remove(struct snd_soc_card *card) { struct mop500_ab8500_drvdata *drvdata = snd_soc_card_get_drvdata(card); if (drvdata->clk_ptr_sysclk != NULL) clk_put(drvdata->clk_ptr_sysclk); if (drvdata->clk_ptr_ulpclk != NULL) clk_put(drvdata->clk_ptr_ulpclk); if (drvdata->clk_ptr_intclk != NULL) clk_put(drvdata->clk_ptr_intclk); snd_soc_card_set_drvdata(card, drvdata); }
Story highlights The President lashes out, without restraint, at anyone who doesn't fulfill his objectives, and that helps explain his failing presidency, Julian Zelizer writes Trump's attacks on Sessions, Comey and Mueller are remarkable, he writes Julian Zelizer, a history and public affairs professor at Princeton University and a CNN analyst, is the author of "The Fierce Urgency of Now: Lyndon Johnson, Congress, and the Battle for the Great Society." He's co-host of the "Politics & Polls" podcast. The opinions expressed in this commentary are his own. (CNN) The new moniker for President Donald Trump should be the "blamer in chief." When confronted with challenges and problems, there is nothing the President likes to do more than lash out against someone else. In his interview with the New York Times , President Trump seemed like a leader who is fed up with his job. He said he would never have appointed Attorney General Jeff Sessions, one of his longest and most loyal supporters in politics, had he known Sessions would recuse himself concerning the Russia investigation. He attacked the FBI director he fired, James Comey, as someone who had tried to intimidate him to keep his job, while saying that Special Counsel Robert Mueller's office is compromised as a result of conflicts of interest. "There are very few Republicans in Baltimore, if any," the President said about the hometown of Deputy Attorney General Rod Rosenstein, who appointed Mueller special counsel to investigate the Russia matter. This is saying a lot from a President who has two children running a massive global business while their father sits in the Oval Office. This interview came shortly after President Trump threatened Republican Senator Dean Heller's career if he did not vote for a bill that the Congressional Budget Office estimates would leave 32 million Americans without health insurance. You could watch Heller's nervous laugh and just imagine what he thought about his lunch partner. Trump's public rebukes were pretty stunning. The President says the kinds of things about his own Cabinet in public that other presidents would reserve for behind the scenes. This President has absolutely no restraint when it comes to attacking anyone who crosses his path. His own attorney general, the former FBI director and the special counsel now find themselves, at least temporarily, in the space occupied by the entire journalistic community -- other than Fox News -- which he has spent much of his time dismissing as "fake news" for its critical coverage. Read More
def load_yaml(yaml_file, obj_type=dict): with open(yaml_file, "r") as stream: return ordered_load(stream, Loader=yaml.SafeLoader, object_pairs_hook=obj_type)
Advanced Electron Microscopy Characterization of Intergranular Corrosion in Ni-20Cr Alloy Under Molten Salt Environment working at temperatures as high as 700 o C. While such coolant has been shown to have multiple benefits, the lack of understanding of the response of structural materials to simultaneous molten salt corrosion and radiation damage has limited materials selection, research and development and the licensing of the nuclear reactors. Here we report advanced microscopy techniques to characterize materials after molten salt corrosion with and without concurrent proton irradiation. We discovered that proton irradiation could slow down intergranular corrosion under certain situations .
<reponame>itandrik/Hedbanz package com.transcendensoft.hedbanz.domain.interactor.changepwd; /** * Copyright 2018. <NAME> * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import com.transcendensoft.hedbanz.data.exception.HedbanzApiException; import com.transcendensoft.hedbanz.data.repository.UserDataRepositoryImpl; import com.transcendensoft.hedbanz.domain.CompletableUseCase; import com.transcendensoft.hedbanz.domain.entity.User; import com.transcendensoft.hedbanz.domain.interactor.changepwd.exception.PasswordResetException; import com.transcendensoft.hedbanz.domain.repository.UserDataRepository; import com.transcendensoft.hedbanz.domain.validation.PasswordResetError; import com.transcendensoft.hedbanz.domain.validation.UserCrudValidator; import com.transcendensoft.hedbanz.domain.validation.UserError; import com.transcendensoft.hedbanz.utils.SecurityUtils; import javax.inject.Inject; import io.reactivex.Completable; import io.reactivex.CompletableTransformer; import io.reactivex.Observable; import io.reactivex.disposables.CompositeDisposable; /** * This class is an implementation of {@link com.transcendensoft.hedbanz.domain.UseCase} * that represents a use case for user reseting password. * * @author <NAME>. E-mail: <EMAIL> * Developed by <u>Transcendensoft</u> */ public class ResetPasswordUseCase extends CompletableUseCase<ResetPasswordUseCase.Param>{ private UserDataRepository mUserDataRepository; private PasswordResetException mException; @Inject public ResetPasswordUseCase(CompletableTransformer completableTransformer, CompositeDisposable mCompositeDisposable, UserDataRepositoryImpl userDataRepository) { super(completableTransformer, mCompositeDisposable); this.mUserDataRepository = userDataRepository; } @Override protected Completable buildUseCaseCompletable(ResetPasswordUseCase.Param param) { mException = new PasswordResetException(); if (isDataCorrect(param)) { param.setPassword(SecurityUtils.hash(param.getPassword())); return Completable.fromObservable(mUserDataRepository.resetPassword(param.login, param.keyword, param.password) .onErrorResumeNext(this::processOnError)); } return Completable.error(mException); } private boolean isDataCorrect(ResetPasswordUseCase.Param param){ User user = new User.Builder() .setPassword(param.password) .setConfirmPassword(param.confirmPassword) .build(); UserCrudValidator validator = new UserCrudValidator(user); boolean result = true; if (!validator.isPasswordValid()) { if (validator.getError() == UserError.EMPTY_PASSWORD) { mException.addError(PasswordResetError.EMPTY_PASSWORD); } else if (validator.getError() == UserError.INVALID_PASSWORD) { mException.addError(PasswordResetError.INCORRECT_PASSWORD); } result = false; } if (!validator.isConfirmPasswordValid()) { if (validator.getError() == UserError.EMPTY_PASSWORD_CONFIRMATION) { mException.addError(PasswordResetError.EMPTY_PASSWORD_CONFIRMATION); } else if (validator.getError() == UserError.INVALID_PASSWORD_CONFIRMATION) { mException.addError(PasswordResetError.INCORRECT_PASSWORD_CONFIRMATION); } result = false; } return result; } private Observable processOnError(Throwable throwable) { if(throwable instanceof HedbanzApiException){ HedbanzApiException exception = (HedbanzApiException) throwable; mException.addError( PasswordResetError.Companion.getErrorByCode( exception.getServerErrorCode())); } else { mException.addError(PasswordResetError.UNDEFINED_ERROR); } return Observable.error(mException); } public static class Param{ private String login; private String keyword; private String password; private String confirmPassword; public Param(String login, String keyword, String password, String confirmPassword) { this.login = login; this.keyword = keyword; this.password = password; this.confirmPassword = confirmPassword; } public String getLogin() { return login; } public void setLogin(String login) { this.login = login; } public String getKeyword() { return keyword; } public void setKeyword(String keyword) { this.keyword = keyword; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public String getConfirmPassword() { return confirmPassword; } public void setConfirmPassword(String confirmPassword) { this.confirmPassword = confirmPassword; } } }
def visit_extslice( self, node: "ast.ExtSlice", parent: nodes.Subscript ) -> nodes.Tuple: newnode = nodes.Tuple(ctx=Context.Load, parent=parent) newnode.postinit([self.visit(dim, newnode) for dim in node.dims]) return newnode
// MySQLExistingMySQLDumpSyncParametersFactory is just a simple function to instantiate the MySQLExistingMySQLDumpSyncParametersStruct func MySQLExistingMySQLDumpSyncParametersFactory( BackupLocation string, ReplicationCoordinates MySQLReplicationCoordinates, ) MySQLExistingMySQLDumpSyncParametersStruct { return MySQLExistingMySQLDumpSyncParametersStruct{ Type: "MySQLExistingMySQLDumpSyncParameters", BackupLocation: BackupLocation, ReplicationCoordinates: ReplicationCoordinates, } }
/** * Writes into an active part of the heap (index is a real index calculated * from the beginning of this part of the heap) * */ private <T extends SStruct> void writeToActiveHeap(int index, T value) { try { JVMLogger.log(JVMLogger.TAG_HEAP, "Write #" + index + "#-->" + value); heap[index + activeHeapOffset] = value; } catch (ArrayIndexOutOfBoundsException e) { try { AAAException.throwException(new OutOfHeapMemException(), this.jvmThread.getStack(), this, methodArea); } catch (Exception ex) { } } }
import { DevServerConfig } from '@web/dev-server'; import { CheckHtmlLinksCliOptions } from 'check-html-links/dist-types/types/main'; import { WatchOptions } from 'chokidar'; import { ImagePreset, RocketPreset } from './preset'; export { ImagePreset, RocketPreset }; import { Eleventy } from '@11ty/eleventy'; interface RocketStartConfig { createSocialMediaImages?: boolean; } type PresetKeys = | 'before11ty' | 'setupUnifiedPlugins' | 'setupDevAndBuildPlugins' | 'setupBuildPlugins' | 'setupDevPlugins' | 'setupCliPlugins' | 'setupEleventyPlugins' | 'setupEleventyComputedConfig'; export interface RocketCliOptions extends Pick<RocketPreset, PresetKeys> { presets?: Array<RocketPreset>; pathPrefix?: string; serviceWorkerName?: string; inputDir?: string; outputDir?: string; emptyOutputDir?: boolean; absoluteBaseUrl?: string; watch?: boolean; createSocialMediaImages?: boolean; imagePresets?: { [key: string]: ImagePreset; }; chokidarConfig?: WatchOptions; checkLinks?: Partial<CheckHtmlLinksCliOptions>; start?: RocketStartConfig; // advanced rollup?: (config: any) => void; // TODO: improve devServer?: DevServerConfig; eleventy?: (eleventyConfig: any) => void; // TODO: improve plugins?: RocketPlugin[]; // rarely used command?: string; configFile?: string; outputDevDir?: string; _inputDirCwdRelative?: string; _presetPaths?: string[]; __before11tyFunctions?: (() => void | Promise<void>)[]; } export type RocketPlugin = { // what can we do, typescript itself types the constructor as `Function` // eslint-disable-next-line @typescript-eslint/ban-types constructor: Function & { pluginName: string }; commands: string[]; setupCommand?(config?: RocketCliOptions): Required<RocketCliOptions>; setup?(opts: { config: RocketCliOptions; argv: string[]; eleventy: Eleventy }): Promise<void>; inspectRenderedHtml?(opts: { html: string; inputPath: string; outputPath: string; layout: string; title: string; url: string; data: any; eleventy: Eleventy; }): Promise<void>; } & { // later ts versions can do this // [index: `${string}Command`]: () => void|Promise<void>; [index: string]: () => void | Promise<void>; };
// // libshapes: high-level OpenVG API // <NAME> (<EMAIL>) // // Additional outline / windowing functions // Paeryn (github.com/paeryn) // #include <stdio.h> #include <stdlib.h> #include <jpeglib.h> #include "VG/openvg.h" // // Image services // VGImage createImageFromJpegDecompressStruct(struct jpeg_decompress_struct *jdc); // createImageFromJpegData decompresses a JPEG image to the standard image format // source: https://github.com/ileben/ShivaVG/blob/master/examples/test_image.c VGImage createImageFromJpegData(VGubyte *image_data, unsigned int length) { // Setup default error handling struct jpeg_decompress_struct jdc; struct jpeg_error_mgr jerr; jdc.err = jpeg_std_error(&jerr); jpeg_create_decompress(&jdc); // Set input file jpeg_mem_src(&jdc, image_data, length); return createImageFromJpegDecompressStruct(&jdc); } // createImageFromJpeg decompresses a JPEG image to the standard image format // source: https://github.com/ileben/ShivaVG/blob/master/examples/test_image.c VGImage createImageFromJpegFile(const char *filename) { // Try to open image file FILE *infile = fopen(filename, "rb"); if (infile == NULL) { printf("Failed opening '%s' for reading!\n", filename); return VG_INVALID_HANDLE; } // Setup default error handling struct jpeg_decompress_struct jdc; struct jpeg_error_mgr jerr; jdc.err = jpeg_std_error(&jerr); jpeg_create_decompress(&jdc); // Set input file jpeg_stdio_src(&jdc, infile); return createImageFromJpegDecompressStruct(&jdc); } VGImage createImageFromJpegDecompressStruct(struct jpeg_decompress_struct *jdc) { // Check for endianness unsigned int lilEndianTest = 1; VGImageFormat rgbaFormat; if (((unsigned char *)&lilEndianTest)[0] == 1) rgbaFormat = VG_sABGR_8888; else rgbaFormat = VG_sRGBA_8888; // Read header and start jpeg_read_header(jdc, TRUE); jpeg_start_decompress(jdc); unsigned int width = jdc->output_width; unsigned int height = jdc->output_height; // Allocate buffer using jpeg allocator unsigned int bbpp = jdc->output_components; unsigned int bstride = width * bbpp; JSAMPARRAY buffer = (*jdc->mem->alloc_sarray) ((j_common_ptr) jdc, JPOOL_IMAGE, bstride, 1); // Allocate image data buffer unsigned int dbpp = 4; unsigned int dstride = width * dbpp; VGubyte *data = (VGubyte *) malloc(dstride * height); // Iterate until all scanlines processed while (jdc->output_scanline < height) { // Read scanline into buffer jpeg_read_scanlines(jdc, buffer, 1); VGubyte *drow = data + (height - jdc->output_scanline) * dstride; VGubyte *brow = buffer[0]; // Expand to RGBA for (unsigned int x = 0; x < width; ++x, drow += dbpp, brow += bbpp) { switch (bbpp) { case 4: drow[0] = brow[0]; drow[1] = brow[1]; drow[2] = brow[2]; drow[3] = brow[3]; break; case 3: drow[0] = brow[0]; drow[1] = brow[1]; drow[2] = brow[2]; drow[3] = 255; break; } } } // Create VG image VGImage img = vgCreateImage(rgbaFormat, width, height, VG_IMAGE_QUALITY_BETTER); vgImageSubData(img, data, dstride, rgbaFormat, 0, 0, width, height); // Cleanup jpeg_destroy_decompress(jdc); return img; } // makeimage makes an image from a raw raster of red, green, blue, alpha values void makeimage(VGfloat x, VGfloat y, int w, int h, VGubyte * data) { unsigned int dstride = w * 4; VGImageFormat rgbaFormat = VG_sABGR_8888; VGImage img = vgCreateImage(rgbaFormat, w, h, VG_IMAGE_QUALITY_BETTER); vgImageSubData(img, (void *)data, dstride, rgbaFormat, 0, 0, w, h); vgSetPixels(x, y, img, 0, 0, w, h); vgDestroyImage(img); }
// STFStorageVolumeClaim returns the PVC definition that just be provisioned // with the STF storage service, or nil if no PVCSpec is provided. func (a *AndroidFarm) STFStorageVolumeClaim() *corev1.PersistentVolumeClaim { if a.STFConfig() != nil { if a.STFConfig().Storage != nil { if a.STFConfig().Storage.PVCSpec != nil { return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-stf-storage", a.GetName()), Namespace: a.STFConfig().GetNamespace(), Labels: a.STFComponentLabels("storage"), OwnerReferences: a.OwnerReferences(), }, Spec: *a.STFConfig().Storage.PVCSpec, } } } } return nil }
Morphological diversity in digital skin microstructure of didelphid marsupials The purpose of this study was to investigate how didelphid marsupials have diversified in morphology of their claws and digital pads as they evolved different foraging preferences such as terrestrial, aquatic, and arboreal feeding behaviours. Both arboreal and more terrestrial didelphids have papillary ridges on the digital pads of the fore and hindfoot. In contrast, the papillary ridges on the pedal digital skin of the water opossum Chironectes minimus have been replaced by nonoverlapping, thickened epidermal scales. Chironectes also differs from the other didelphids studied in having finger tips with reduced claws and digital pads that are covered with raised epidermal scales having projecting, finger‐like cones arranged radially around the perimeter of each scale. The reduced claws and unusual digit skin microstructure of Chironectes likely improve this animal's ability to recognise and identify live animal prey under water using only its sense of touch.
Why Are Gas Prices So High? Speculating About Oil Speculation AUDIO By Dave Fehling Email Tweet Dave Fehling/StateImpact Texas With oil surpassing $100 a barrel, drivers are feeling the pain at the pump and some wonder if it’s simply a case of supply and demand. Or maybe something else. “It’s sad, but people are very greedy,” said Houston driver Jodie Minear as she put $60 of fuel into her Jeep SUV at a Chevron station along Highway 59. Does she have suspicions as to how prices are set? “Definitely, I think everybody does.” She’s not alone. Related Topics The BP Oil Spill Back in July 2008, oil prices topped out at a record $145 a barrel. Some in Congress were convinced it had to be because greedy speculators were gaming the system, running up the price of oil and fleecing American consumers. “[The market for oil] has now been taken over by an orgy of speculation. Speculators control these markets, driving up the price, despite the fact there has been no change in the fundamentals.” said then-Senator Byron Dorgan, a Democrat from North Dakota. He was speaking on the Senate floor in July 2008, railing against what he said had “broken” the futures market for oil. “Investment banks, hedge funds, pension funds running deep into these future markets, driving up prices. Investment banks buying oil storage capability to buy oil and take it off the market,” said Dorgan. Since then, the price of oil dropped dramatically, and a debate began over whether there was in fact speculators manipulating the market and if so, what should be done to stop them. The oil futures market is under the regulation of the U.S. Commodities Futures Trading Commission(CFTC). Its chairman, the Obama Administration appointee Gary Gensler, pledged to take a more aggressive approach to investigating market manipulation. And in a speech Thursday on his energy policy, President Obama called out speculators as being one of the reasons behind high gas prices. “When uncertainty increases, speculative trading on Wall Street can drive up prices even more,” he said. “So there are short-term factors at work here.” In 2010, Congress passed the Dodd-Frank law that overhauled financial regulation including rules involving oil futures, callling on the CFTC to enforce new limits on the amount of oil speculators could trade. Last May, the CFTC announced it was charging a group of companies and individuals who allegedly conspired in a scheme to hoard then dump oil stored in Cushing, Oklahoma in an effort to manipulate the price. The CFTC alleged the scheme netted $50 million in illegal profits. But there were those who had their doubts about what the CFTC was finding. Dave Fehling/StateImpact Texas “I sort of look at it (like) if this is the best they can come up with after all that digging, that suggests the market is actually pretty clean,” said Craig Pirrong, a professor of finance and energy markets at the University of Houston’s Bauer College of Business. Pirrong is an outspoken skeptic of the extent of market manipulation. He said the Cushing case was, in his opinion, weak. “Frequently, when prices go up, or should I say, invariably when prices go up, it’s like in Casablanca: round up the usual suspects. And essentially, the speculators are the first ones in line,” Pirrong told StateImpact Texas. While acknowledging that price manipulation does happen, he said “it’s not what’s driving sort of the big price movement like we saw in 2008 … and it’s not driving what we see right now.” Pirrong said far more influential is a world oil output that has stagnated, combined with disruptions in supplies from Libya and now Iran. What’s more, the commodities trading industry is fighting the new limits on oil speculation, taking the CFTC to court to block it from enforcing limits. The CFTC declined to comment to StateImpact on active cases but a spokesperson indicated that the commission had brought dozens of enforcement cases against energy traders in the past decade, resulting in millions in fines.
The effects of aspirin plus cisplatin on SGC7901/CDDP cells in vitro The purpose of this study was to determine the effect of aspirin plus cisplatin (CDDP) in the chemotherapy of gastric cancer. We cultured SGC7901/CDDP cells by long-term exposure of SGC7901 cells to small doses of CDDP in vitro. The cells were treated with aspirin, CDDP or aspirin plus CDDP for 24 h and cell growth was assessed by the MTT assay, the apoptotic rate by flow cytometry, the survivin mRNA expression by RT-PCR and the survivin protein expression by western blotting. The results revealed that the cell growth in the aspirin plus CDDP group was significantly inhibited. The apoptotic rate in the aspirin plus CDDP was significantly higher compared to that in the other groups. The survivin mRNA and protein expression were also significantly reduced in the aspirin plus CDDP group. Our data suggest that the combination of aspirin and CDDP exhibited a higher degree of toxicity against SGC7901/CDDP cells compared to that of aspirin or CDDP alone. Thus, the combination of aspirin plus CDDP may reduce the expression of survivin and induce the apoptosis of SGC7901/CDDP cells. Introduction Chemotherapy is an indispensable component of the comprehensive treatment of gastric cancer. Cisplatin (CDDP) is a widely used chemotherapeutic drug for gastric cancer; however, the resistance of gastric cancer cells to CDDP reduces its therapeutic efficacy. Therefore, there is a need for CDDP-sensitizing agents to enhance the effect of CDDP in the treatment of gastric cancer. Survivin is a member of the inhibitor of apoptosis protein family (1) that inhibits cell apoptosis and division. The survivin gene is overexpressed in gastric cancer cells (2), which may be one of the main reasons for the resistance to CDDP. It was previously demonstrated that aspirin may enhance the sensitivity of HT-29 human colon cancer cells to 5-FU and a combination of aspirin and 5-FU induces apoptosis of HT-29 cells in a time-and concentration-dependent manner (3). In the present study, we investigated whether aspirin plus CDDP exhibited enhanced toxicity against SGC7901/CDDP cells. Materials and methods SGC7901 and SGC7901/CDDP cell cultures. SGC7901 cells were obtained from the Cell Bank of Chinese Academy of Sciences (Shanghai, China). The cells were cultured in RPMI-1640 medium (Invitrogen Life Technologies, Carlsbad, CA, USA) supplemented with 10% fetal bovine serum, 100 U̸ml penicillin (North China Pharmaceutical Group Corporation, Hebei, China) and 100 µg̸ml streptomycin (North China Pharmaceutical Group Corporation) in 75-cm 2 flasks at 37˚C in a humidified atmosphere of 5% CO 2 and 95% air. The pH value of the medium was adjusted to 7.2 with sterile 5.6% NaHCO 3 liquid and the medium was changed every 2-3 days. The cells were subcultured when 80% confluence was reached. As the SGC7901 cells grew to 80% confluence, RPMI-1640 medium with 100 ng/ml CDDP (Qilu Pharmaceutical Co., Ltd., Jinan, China) was added and the medium was changed every 2-3 days. When 80% confluence was reached, the cells were subcultured with RPMI-1640 medium to maintain good cell adhesion. As the cells became adherent to the bottom of the cell culture flasks, RPMI-1640 medium with 200 ng̸ml CDDP was added and the culture medium was changed every 2-3 days. The method was repeated with CDDP concentrations of 500, 700 and 1,000 ng/ml, until SGC7901/CDDP cells were obtained. The growth and reproduction of SGC7901/CDDP cells were maintained with RPMI-1640 medium with 1,000 ng/ml CDDP. The effects of aspirin plus cisplatin on SGC7901/CDDP cells in vitro The SGC7901/CDDP cells were divided into 4 groups and treated with i) CDDP (10 µg/ml), ii) aspirin (3 mmol/l; Sigma, St. Louis, MO, USA), iii) aspirin (3 mmol/l) plus CDDP (10 µg/ml) and iv) physiological saline. Following incubation for 24 h, the cells were collected with 0.25% trypsin and used in assays measuring viability, apoptosis, survivin mRNA and survivin protein expression. MTT cell proliferation assay. The SGC7901/CDDP cells were cultured at a density of 5x10 4 /ml in 96-well plates with 200 µl/well. After adhering overnight at 37˚C in a humidified 5% CO 2 and 95% air atmosphere, the culture solution was aspirated and CDDP (10 µg/ml), aspirin (3 mmol/l), aspirin (3 mmol/l) plus CDDP (10 µg/ml) and physiological saline were added to the respective groups. After 24 h, 20 µl 0.5% MTT solution was added to each well and incubated for 4 h; the culture solution was discarded and 200 µl dimethylsulphoxide was added to each well to dissolve the MTT formazan crystals for 5 min. The absorbance at 490 nm was determined by a multi-detection microplate reader (Sunrise™, Tecan Ltd., Austria). Cell viability was calculated using the following formula: viability=absorbance of the test group-blank/absorbance of the normal group-blank x 100%. Flow cytometry. The SGC7901/CDDP cells were centrifuged for 10 min at 1,500 x g and the supernatant was discarded. The cells were washed twice with 4˚C phosphate-buffered saline (PBS) solution and resuspended in PBS solution at a concentration of 1.0x10 6 /l. A total of 100 µl solution was collected in 5-ml culture tubes and 5 µl propidium iodide (BD Pharmingen, San Diego, CA, USA) were added. The cells were gently vortexed and incubated for 15 min at room temperature in the dark. A total of 400 µl of 1X binding buffer was added to each tube. Analysis was performed with an EPICS-XL II flow cytometer (Beckman Coulter, Inc., Miami, FL, USA). RNA extraction and semiquantitative RT-PCR. Total RNA was extracted using TRIzol reagent (Invitrogen Life Technologies) and cDNA was reverse-transcribed according to the manufacture's instructions. The 447-bp survivin DNA fragment was amplified using two primers synthesized by Invitrogen Life Technologies, 5'-GCATGGGTGCCCCGACGTTG-3' and 5'-GCTCCGGCCAGAGGCCTCAA-3'. The PCR reaction was performed in a total volume of 20 µl containing 2 µl 10X PCR buffer, 0.8 µl MgCl 2 , 1.0 µl dNTPs, 0.2 µl of each primer, 2.0 µl cDNA and 1.0 µl Taq DNA polymerase. The amplification conditions were as follows: denaturation at 94˚C for 30 sec, annealing at 55˚C for 60 sec and elongation at 72˚C for 60 sec for 30 cycles. The 241-bp β-actin fragment was amplified using two primers synthesized by Invitrogen Life Technologies, 5'-TAAAGACCTCTATGCCAACACAGT-3' and 5'-CACCATGGAGGGGCCGGACTCTTC-3'. The PCR reaction was performed in a total volume of 20 µl containing 2 µl 10X PCR buffer, 1.6 µl MgCl 2 , 1.0 µl dNTPs, 0.2 µl of each primer, 2.0 µl cDNA and 1.0 µl Taq DNA polymerase. The amplification conditions were as follows: denaturation at 94˚C for 30 sec, annealing at 58˚C for 40 sec and elongation at 72˚C for 40 sec for 28 cycles. The PCR products were separated on 1% agarose gels containing ethidium bromide. The gel images were digitally recorded and analyzed by computer-assisted image analyzer, Lab-work 4.5 analysis software (Ultra Violet Products, Upland, CA, USA). Western blotting. The SGC7901/CDDP cells were washed twice with 4˚C PBS. Following the addition of RIPA buffer (Invitrogen Life Technologies), the cells were lysed on ice for 30 min and then clarified by centrifugation at 10,000 x g for 10 min at 4˚C. The supernatants were used to assay protein concentration. A total of 25 µg protein were loaded, separated by polyacrylamide gel electrophoresis and transferred onto PVDF membranes. The PVDF membranes were incubated with 5% fat-free powder milk in 500 mmol̸l NaCl, 20 mmol̸l Tris-HCL (pH 7.5) and 0.5% PBS-Tween-20 for 2 h at room temperature, followed by incubation for 24 h with the appropriate dilutions of primary antibody at 4˚C in a refrigerator: 1:2,000 anti-human survivin antibodies (R&D Systems, Minneapolis, MN, USA) and 1:500 β-actin (Wuhan Boster Biological Technology, Ltd., Wuhan, China). Following washing with Tris-buffered saline and Tween-20, the PVDF membranes were incubated with 1:3,000 peroxidase-conjugated rabbit anti-goat sencondary antibodies (Wuhan Boster Biological Technology) for 2 h at room temperature. The proteins were visualized using chemiluminescent peroxidase substrate (Pierce Biotechnology, Inc., Rockford, IL, USA) and the blots were quantified and analyzed by computer-assisted image analyzer, Lab-work 4.5 analysis software. Statistical analysis. Data are expressed as means ± SD of at least three independent experiments. One-way ANOVA was used to compare three or more groups. All the analyses were performed with SPSS software, version 19.0 (IBM SPSS, Inc., Chicago, IL, USA). P<0.05 was considered to indicate a statistically significant difference. Results Cell viability. Compared to the control group, the survival rate of the SGC7901/CDDP cells in the CDDP, aspirin and aspirin plus CDDP groups were lower and the difference was statistically significant (P<0.01). Cell growth was significantly inhibited in the aspirin plus CDDP group and the survival rate of cells in the aspirin plus CDDP was lower compared to that in the CDDP and in the aspirin groups. The difference was statistically significant (P<0.01) (Fig. 1). Cell apoptosis. The apoptotic rate in the aspirin, CDDP, aspirin plus CDDP and control groups was 16.74, 24.93, 30.65 and 6.48%, respectively. Furthermore, the apoptotic rate in the aspirin plus CDDP group was significantly higher compared to that in the other groups (Fig. 2). Survivin mRNA expression. The expression of survivin mRNA in the CDDP and aspirin plus CDDP groups was significantly reduced compared to that in the control group. The difference was statistically significant (P<0.05). Furthermore, the expression of survivin mRNA in the aspirin plus CDDP was significantly reduced compared to that in the aspirin and CDDP alone groups. The difference was statistically significant (P<0.05) (Fig. 3). Survivin protein expression. The expression of survivin protein in the CDDP and aspirin plus CDDP groups was significantly reduced compared to that in the control group. The difference was statistically significant (P<0.05). Furthermore, the expression of survivin protein in the aspirin plus CDDP group was significantly reduced compared to that in the aspirin and CDDP alone groups. The difference was statistically significant (P<0.05) (Fig. 4). Discussion Gastric cancer is associated with high morbidity and mortality. Approximately 60% of gastric cancer patients in western countries present with advanced-stage disease (4), which is also the case for gastric cancer patients in China (5). Surgery and chemotherapy are currently the mainstay of treatment for patients with advanced gastric cancer. CDDP is one of the most widely used chemotherapeutic agents for the treatment of gastric cancer. However, resistance to CDDP is a major cause of ineffective treatment; therefore, there is a need for CDDP-sensitizing agents to improve the effects of chemotherapy. Non-steroidal anti-inflammatory drugs (NSAIDs) are widely used in clinical practice due to their antipyretic, analgesic, anti-inflammatory and antirheumatic properties. The antitumor effect of NSAIDs has been extensively investigated (6-8). Li et al (9) demonstrated that regular NSAID administration may reduce the incidence of colon cancer by 50% and also reduce the incidence of esophageal and gastric cancer. The apoptosis of cancer cells induced by aspirin may be the mechanism through which aspirin interferes with esophageal carcinogenesis and may be indicative of the potential of NSAIDs as chemopreventive agents in esophageal cancer. NCX-4016 (a derivative of aspirin containing a nitro group that releases nitric oxide in a sustained fashion for several hours in cells and in vivo) combined with CDDP was shown to sensitize drug-resistant strains of human ovarian cancer cells to CDDP. Furthermore, the inhibitory effect of CDDP plus NCX-4016 on drug-resistant strains of human ovarian cancer cells was significantly higher compared to that of CDDP and NCX-4016 alone, indicating that NCX-4016 may enhance the sensitivity of drug-resistant strains of human ovarian cancer cells to CDDP and may specifically eliminate CDDP-refractory cancer cells in patients with recurrent ovarian cancer (10). Kumar and Singh (11) suggested that pre-exposure of tumor cells to aspirin may lower the concentration of CDDP required to exert its cytotoxic effects. This finding may help design novel antitumor protocols with reduced doses of CDDP. Those studies indicate a novel method for overcoming CDDP resistance in the treatment of patients with gastric cancer. Nakamura et al (12) reported a negative correlation between survivin expression in gastric cancer cells and the survival time of patients with gastric cancer receiving CDDP chemotherapy. Those results indicated that survivin may be pivotal in the development of gastric cancer and resistance to CDDP and, therefore, controlling the expression of the survivin gene may be useful in the chemotherapy of gastric cancer, a hypothesis also supported by other studies (13,14). The abovementioned results indicated that the survivin gene is closely associated with resistance to CDDP in the chemotherapy of gastric cancer. In our experiments, the results of the MTT assay demonstrated that the cell survival rate in the aspirin plus CDDP group was significantly reduced. The flow cytometry test results revealed that the apoptotic rate in the aspirin plus CDDP group was significantly higher compared to that in the other groups. In addition, the RT-PCR and western blotting results revealed that the expression of survivin mRNA and protein were significantly reduced in the aspirin plus CDDP group. Taken together, these experimental results indicate that aspirin plus CDDP may exhibit significantly enhanced toxicity against SGC7901/CDDP cells compared to aspirin or CDDP alone, possibly through reducing survivin expression and inducing the apoptosis of SGC7901/CDDP cells. Related research demonstrated that NSAIDs may induce the apoptosis of tumor cells through a COX-2 non-dependent pathway and exert antitumor effects (15,16). Shao et al (17) suggested that the inhibition of NFκB activity is a plausible mechanism for apoptosis induced by the wild-type p53 gene transfer in human colon cancer cells and that anti-NFκB reagent aspirin may render these cells more susceptible to apoptosis. Adachi et al (18) suggested that increased ROS generation is one of the key mechanisms underlying the NSAID-mediated anticancer effects on various types of cancer cells. Oh et al (19) reported that the enhancement of mitochondrial permeability transition-dependent apoptosis by salicylates may be the mechanism underlying the protective effect of aspirin and other NSAIDs against colon, lung and breast cancers. Pathi et al (20) also suggested that the anticancer activity of aspirin may be due to its salicylate metabolite. Our results were obtained by aspirin (3 mmol/l) plus CDDP (10 µg/ml) acting on SGC7901/CDDP cells for 24 h. However, further investigation is required to determine whether the increased toxicity is dose-and time-dependent and whether there are additional mechanisms underlying the increased toxicity exhibited by aspirin plus CDDP against SGC7901/CDDP cells.
import { Rule } from "eslint"; export const getRuleMetaData = ( ruleName: string, ruleDescription: string, fix?: "code" | "whitespace" ): Rule.RuleMetaData => { const required = { type: "suggestion", docs: { description: ruleDescription, category: "Best Practices", recommended: true, url: `https://github.com/hack4impact-uiuc/eslint-plugin/blob/master/docs/rules/${ruleName}.md`, }, schema: [], }; return ( fix !== undefined ? { ...required, fixable: fix } : required ) as Rule.RuleMetaData; };
package listener import ( "fgame/fgame/core/event" gameevent "fgame/fgame/game/event" marryeventtypes "fgame/fgame/game/marry/event/types" pbuitl "fgame/fgame/game/marry/pbutil" marryscene "fgame/fgame/game/marry/scene" "fgame/fgame/game/player" ) //豪气值排行榜变化 func heroismRankChange(target event.EventTarget, data event.EventData) (err error) { sd, ok := target.(marryscene.MarrySceneData) if !ok { return } heroismList := sd.GetHeroismList() scMarryHeroismTopThree := pbuitl.BuildSCMarryHeroismTopThree(heroismList) sceneAllPlayer := sd.GetScene().GetAllPlayers() for _, spl := range sceneAllPlayer { pl, ok := spl.(player.Player) if !ok { continue } pl.SendMsg(scMarryHeroismTopThree) } return } func init() { gameevent.AddEventListener(marryeventtypes.EventTypeMarryHeriosmChange, event.EventListenerFunc(heroismRankChange)) }
from datetime import datetime, timedelta, tzinfo import unittest import pytz import re #noinspection PyUnresolvedReferences from nose.tools import assert_equal, assert_raises # you need it for tests in form of continuations import six from flask_restbolt import inputs def test_reverse_rfc822_datetime(): dates = [ ("Sat, 01 Jan 2011 00:00:00 -0000", datetime(2011, 1, 1, tzinfo=pytz.utc)), ("Sat, 01 Jan 2011 23:59:59 -0000", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)), ("Sat, 01 Jan 2011 21:59:59 -0200", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)), ] for date_string, expected in dates: yield assert_equal, inputs.datetime_from_rfc822(date_string), expected def test_reverse_iso8601_datetime(): dates = [ ("2011-01-01T00:00:00+00:00", datetime(2011, 1, 1, tzinfo=pytz.utc)), ("2011-01-01T23:59:59+00:00", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)), ("2011-01-01T23:59:59.001000+00:00", datetime(2011, 1, 1, 23, 59, 59, 1000, tzinfo=pytz.utc)), ("2011-01-01T23:59:59+02:00", datetime(2011, 1, 1, 21, 59, 59, tzinfo=pytz.utc)) ] for date_string, expected in dates: yield assert_equal, inputs.datetime_from_iso8601(date_string), expected def test_urls(): urls = [ 'http://www.djangoproject.com/', 'http://localhost/', 'http://example.com/', 'http://www.example.com/', 'http://www.example.com:8000/test', 'http://valid-with-hyphens.com/', 'http://subdomain.example.com/', 'http://192.168.3.11/', 'http://192.168.3.11:8000/test', 'http://valid-----hyphens.com/', 'http://example.com?something=value', 'http://example.com/index.php?something=value&another=value2', 'http://foo:[email protected]', 'http://foo:@example.com', 'http://foo:@2001:db8:85a3::8a2e:370:7334', 'http://foo2:qd1%[email protected]', ] for value in urls: yield assert_equal, inputs.url(value), value def check_bad_url_raises(value): try: inputs.url(value) assert False, "shouldn't get here" except ValueError as e: assert_equal(six.text_type(e), u"{0} is not a valid URL".format(value)) def test_bad_urls(): values = [ 'foo', 'http://', 'http://example', 'http://example.', 'http://.com', 'http://invalid-.com', 'http://-invalid.com', 'http://inv-.alid-.com', 'http://inv-.-alid.com', 'foo bar baz', u'foo \u2713', 'http://@foo:[email protected]', 'http://:<EMAIL>@<EMAIL>', 'http://bar:bar:[email protected]', ] for value in values: yield check_bad_url_raises, value def test_bad_url_error_message(): values = [ 'google.com', 'domain.google.com', 'kevin:[email protected]/path?query', u'google.com/path?\u2713', ] for value in values: yield check_url_error_message, value def check_url_error_message(value): try: inputs.url(value) assert False, u"inputs.url({0}) should raise an exception".format(value) except ValueError as e: assert_equal(six.text_type(e), (u"{0} is not a valid URL. Did you mean: http://{0}".format(value))) def test_regex_bad_input(): cases = ( 'abc', '123abc', 'abc123', '', ) num_only = inputs.regex(r'^[0-9]+$') for value in cases: yield assert_raises, ValueError, lambda: num_only(value) def test_regex_good_input(): cases = ( '123', '1234567890', '00000', ) num_only = inputs.regex(r'^[0-9]+$') for value in cases: yield assert_equal, num_only(value), value def test_regex_bad_pattern(): """Regex error raised immediately when regex input parser is created.""" assert_raises(re.error, inputs.regex, '[') def test_regex_flags_good_input(): cases = ( 'abcd', 'ABCabc', 'ABC', ) case_insensitive = inputs.regex(r'^[A-Z]+$', re.IGNORECASE) for value in cases: yield assert_equal, case_insensitive(value), value def test_regex_flags_bad_input(): cases = ( 'abcd', 'ABCabc' ) case_sensitive = inputs.regex(r'^[A-Z]+$') for value in cases: yield assert_raises, ValueError, lambda: case_sensitive(value) class TypesTestCase(unittest.TestCase): def test_boolean_false(self): assert_equal(inputs.boolean("False"), False) def test_boolean_is_false_for_0(self): assert_equal(inputs.boolean("0"), False) def test_boolean_true(self): assert_equal(inputs.boolean("true"), True) def test_boolean_is_true_for_1(self): assert_equal(inputs.boolean("1"), True) def test_boolean_upper_case(self): assert_equal(inputs.boolean("FaLSE"), False) def test_boolean(self): assert_equal(inputs.boolean("FaLSE"), False) def test_boolean_with_python_bool(self): """Input that is already a native python `bool` should be passed through without extra processing.""" assert_equal(inputs.boolean(True), True) assert_equal(inputs.boolean(False), False) def test_bad_boolean(self): assert_raises(ValueError, lambda: inputs.boolean("blah")) def test_date_later_than_1900(self): assert_equal(inputs.date("1900-01-01"), datetime(1900, 1, 1)) def test_date_input_error(self): assert_raises(ValueError, lambda: inputs.date("2008-13-13")) def test_date_input(self): assert_equal(inputs.date("2008-08-01"), datetime(2008, 8, 1)) def test_natual_negative(self): assert_raises(ValueError, lambda: inputs.natural(-1)) def test_natural(self): assert_equal(3, inputs.natural(3)) def test_natual_string(self): assert_raises(ValueError, lambda: inputs.natural('foo')) def test_positive(self): assert_equal(1, inputs.positive(1)) assert_equal(10000, inputs.positive(10000)) def test_positive_zero(self): assert_raises(ValueError, lambda: inputs.positive(0)) def test_positive_negative_input(self): assert_raises(ValueError, lambda: inputs.positive(-1)) def test_int_range_good(self): int_range = inputs.int_range(1, 5) assert_equal(3, int_range(3)) def test_int_range_inclusive(self): int_range = inputs.int_range(1, 5) assert_equal(5, int_range(5)) def test_int_range_low(self): int_range = inputs.int_range(0, 5) assert_raises(ValueError, lambda: int_range(-1)) def test_int_range_high(self): int_range = inputs.int_range(0, 5) assert_raises(ValueError, lambda: int_range(6)) def test_isointerval(): intervals = [ ( # Full precision with explicit UTC. "2013-01-01T12:30:00Z/P1Y2M3DT4H5M6S", ( datetime(2013, 1, 1, 12, 30, 0, tzinfo=pytz.utc), datetime(2014, 3, 5, 16, 35, 6, tzinfo=pytz.utc), ), ), ( # Full precision with alternate UTC indication "2013-01-01T12:30+00:00/P2D", ( datetime(2013, 1, 1, 12, 30, 0, tzinfo=pytz.utc), datetime(2013, 1, 3, 12, 30, 0, tzinfo=pytz.utc), ), ), ( # Implicit UTC with time "2013-01-01T15:00/P1M", ( datetime(2013, 1, 1, 15, 0, 0, tzinfo=pytz.utc), datetime(2013, 1, 31, 15, 0, 0, tzinfo=pytz.utc), ), ), ( # TZ conversion "2013-01-01T17:00-05:00/P2W", ( datetime(2013, 1, 1, 22, 0, 0, tzinfo=pytz.utc), datetime(2013, 1, 15, 22, 0, 0, tzinfo=pytz.utc), ), ), ( # Date upgrade to midnight-midnight period "2013-01-01/P3D", ( datetime(2013, 1, 1, 0, 0, 0, tzinfo=pytz.utc), datetime(2013, 1, 4, 0, 0, 0, 0, tzinfo=pytz.utc), ), ), ( # Start/end with UTC "2013-01-01T12:00:00Z/2013-02-01T12:00:00Z", ( datetime(2013, 1, 1, 12, 0, 0, tzinfo=pytz.utc), datetime(2013, 2, 1, 12, 0, 0, tzinfo=pytz.utc), ), ), ( # Start/end with time upgrade "2013-01-01/2013-06-30", ( datetime(2013, 1, 1, tzinfo=pytz.utc), datetime(2013, 6, 30, tzinfo=pytz.utc), ), ), ( # Start/end with TZ conversion "2013-02-17T12:00:00-07:00/2013-02-28T15:00:00-07:00", ( datetime(2013, 2, 17, 19, 0, 0, tzinfo=pytz.utc), datetime(2013, 2, 28, 22, 0, 0, tzinfo=pytz.utc), ), ), # Resolution expansion for single date(time) ( # Second with UTC "2013-01-01T12:30:45Z", ( datetime(2013, 1, 1, 12, 30, 45, tzinfo=pytz.utc), datetime(2013, 1, 1, 12, 30, 46, tzinfo=pytz.utc), ), ), ( # Second with tz conversion "2013-01-01T12:30:45+02:00", ( datetime(2013, 1, 1, 10, 30, 45, tzinfo=pytz.utc), datetime(2013, 1, 1, 10, 30, 46, tzinfo=pytz.utc), ), ), ( # Second with implicit UTC "2013-01-01T12:30:45", ( datetime(2013, 1, 1, 12, 30, 45, tzinfo=pytz.utc), datetime(2013, 1, 1, 12, 30, 46, tzinfo=pytz.utc), ), ), ( # Minute with UTC "2013-01-01T12:30+00:00", ( datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc), datetime(2013, 1, 1, 12, 31, tzinfo=pytz.utc), ), ), ( # Minute with conversion "2013-01-01T12:30+04:00", ( datetime(2013, 1, 1, 8, 30, tzinfo=pytz.utc), datetime(2013, 1, 1, 8, 31, tzinfo=pytz.utc), ), ), ( # Minute with implicit UTC "2013-01-01T12:30", ( datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc), datetime(2013, 1, 1, 12, 31, tzinfo=pytz.utc), ), ), ( # Hour, explicit UTC "2013-01-01T12Z", ( datetime(2013, 1, 1, 12, tzinfo=pytz.utc), datetime(2013, 1, 1, 13, tzinfo=pytz.utc), ), ), ( # Hour with offset "2013-01-01T12-07:00", ( datetime(2013, 1, 1, 19, tzinfo=pytz.utc), datetime(2013, 1, 1, 20, tzinfo=pytz.utc), ), ), ( # Hour with implicit UTC "2013-01-01T12", ( datetime(2013, 1, 1, 12, tzinfo=pytz.utc), datetime(2013, 1, 1, 13, tzinfo=pytz.utc), ), ), ( # Interval with trailing zero fractional seconds should # be accepted. "2013-01-01T12:00:00.0/2013-01-01T12:30:00.000000", ( datetime(2013, 1, 1, 12, tzinfo=pytz.utc), datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc), ), ), ] for value, expected in intervals: yield assert_equal, inputs.iso8601interval(value), expected def test_invalid_isointerval_error(): try: inputs.iso8601interval('2013-01-01/blah') except ValueError as error: assert_equal( str(error), "Invalid argument: 2013-01-01/blah. argument must be a valid ISO8601 " "date/time interval.", ) return assert False, 'Should raise a ValueError' def test_bad_isointervals(): bad_intervals = [ '2013-01T14:', '', 'asdf', '01/01/2013', ] for bad_interval in bad_intervals: yield ( assert_raises, ValueError, inputs.iso8601interval, bad_interval, ) if __name__ == '__main__': unittest.main()
#pragma once #include <type_traits> namespace aquarius { namespace detail { template<typename T> class singleton_wrapper : public T { }; template<typename T> class singleton { public: static T& instance() { return get_instance(); } private: static T& get_instance() { static singleton_wrapper<T> t; return static_cast<T&>(t); } protected: singleton() {} private: static T* instance_; }; } } template<typename T> T* aquarius::detail::singleton<T>::instance_ = &singleton<T>::instance();
def as_matrix(self) -> Tuple[List[Cluster], np.array]: clusters: List[Cluster] = list(self.clusters) indices: Dict[Cluster, int] = {cluster: i for i, cluster in enumerate(clusters)} matrix: np.array = np.zeros(shape=(len(clusters), len(clusters)), dtype=float) for edge in self.edges: i, j = indices[edge.left], indices[edge.right] matrix[i][j] = 1. / edge.distance matrix[j][i] = 1. / edge.distance return clusters, matrix
import { createApp } from 'vue'; import { VueReCaptcha } from 'vue-recaptcha-v3'; import App from './App.vue'; import { createI18n } from 'vue-i18n'; import I18nResources from './locale/messages'; import router from '@/router/router'; const moment = require('moment'); import store from '@/store/AppStore'; const paramStore = require('./store/paramStore'); /* import 'popper.js/dist/popper.min.js'; */ /* import 'jquery/src/jquery.js'; */ /* import 'jquery'; import 'bootstrap/dist/js/bootstrap.bundle.min.js'; */ //TODO const navigatorLang = navigator.language /* || navigator.userLanguage */; let language = 'fr'; if(navigatorLang.includes('en')){ language = 'en'; } let messages: any = I18nResources; if (store.state.general.education) { messages = { fr: { ...I18nResources.fr, ...I18nResources.educationfr }, en: { ...I18nResources.en, ...I18nResources.educationen }, }; } const i18n = createI18n({ locale: language, messages: messages, }); moment.locale(language); // Initialisation store paramStore .initialize({ generalParameters: { organisationId: 'ecbd98d9-79bd-4312-ad5e-fc7c1c4a191c', }, podcastPage: {}, podcastsPage: {}, emissionsPage: {}, emissionPage: {}, intervenantPage: {}, searchPage: {}, player: {}, organisation: {}, octopusApi: {}, footer: {}, }) .then(() => { createApp(App) .use(i18n) .use(store) .use(router) .use(VueReCaptcha, { siteKey: '<KEY>' }) .mount('#app'); });
def add(self, unpackers, *args): def transform(entry): return retrieve(entry, unpackers, *args) def adder(entries): results = map(transform, entries) parting = partition(lambda e: check(e, self.template) if e else e, results) self.entries.extend( parting.get(True, []) ) self.rejects.extend( parting.get(False, []) ) self.packed.extend( parting.get(None, []) ) return adder
/** * <p> * Title: <B>DES encryption and decryption using Java Crypto </B> * </p> * <p> * Description: * </p> * This class provides a decryption and encryption service based on DES (Data * Encryption Standard). It always generates a key on encryption. On decryption, * it uses the embedded key. As such, it is not really secure and only prevents * simple attacks. In addition, the channel on which it is being used is * supposed to be secure. */ public final class DESUtil { private DESUtil() { } private static final String ALGORITHM_OPTIONS = "DES/ECB/PKCS5Padding"; private static final String ALGORITHM_KEY = "DES"; private static final String ALGORITHM_ENCODING = "UTF-8"; private static final String PASSWORD_CRYPT_KEY = "__PAY__DES__KINGO__"; /** * Encrypts the provided clear text and returns encoded data as a string of * hex char representations. * * @param clearText * Clear text to be encrypted * @return String with the encrypted data of hex char. */ public static String encrypt(String clearText,String key) { String keyValue = null; try { // DES算法要求有一个可信任的随机数源 SecureRandom sr = new SecureRandom(); // 从原始密匙数据创建DESKeySpec对象 DESKeySpec dks = new DESKeySpec(key.getBytes()); // 创建一个密匙工厂,然后用它把DESKeySpec转换成 // 一个SecretKey对象 SecretKeyFactory keyFactory = SecretKeyFactory .getInstance(ALGORITHM_KEY); SecretKey securekey = keyFactory.generateSecret(dks); // Cipher对象实际完成加密操作 Cipher cipher = Cipher.getInstance(ALGORITHM_KEY); // 用密匙初始化Cipher对象 cipher.init(Cipher.ENCRYPT_MODE, securekey, sr); // 现在,获取数据并加密 // 正式执行加密操作 keyValue = HexUtil .toHexString(cipher.doFinal(clearText.getBytes())); } catch (Exception e) { keyValue = null; e.printStackTrace(); } if (keyValue == null) { keyValue = "NOKEY " + clearText; } return keyValue; } public static String encrypt(String clearText) { return encrypt(clearText,PASSWORD_CRYPT_KEY); } public static String decrypt(String encryptedText){ return decrypt(encryptedText,PASSWORD_CRYPT_KEY); } public static String decrypt(String encryptedText,String key){ String clearText = null; try{ // DES算法要求有一个可信任的随机数源 SecureRandom sr = new SecureRandom(); // 从原始密匙数据创建一个DESKeySpec对象 DESKeySpec dks = new DESKeySpec(key.getBytes()); // 创建一个密匙工厂,然后用它把DESKeySpec对象转换成 // 一个SecretKey对象 SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(ALGORITHM_KEY); SecretKey securekey = keyFactory.generateSecret(dks); // Cipher对象实际完成解密操作 Cipher cipher = Cipher.getInstance(ALGORITHM_KEY); // 用密匙初始化Cipher对象 cipher.init(Cipher.DECRYPT_MODE, securekey, sr); // 现在,获取数据并解密 // 正式执行解密操作 byte[] dataPart = HexUtil.toByteArray(encryptedText); byte[] decryptedData = cipher.doFinal(dataPart); clearText = new String(decryptedData, ALGORITHM_ENCODING); }catch(Exception e){ clearText = null; } return clearText; } public static String encryptRandom(String clearText) { String keyValue = null; try { // generate a secret key KeyGenerator kg = KeyGenerator.getInstance(ALGORITHM_KEY); kg.init(56); SecretKey key = kg.generateKey(); // get the encoded value for the string and convert to hex byte[] encodedKey = key.getEncoded(); keyValue = HexUtil.toHexString(encodedKey); keyValue += " "; // run the encryption algorithm Cipher cipher = Cipher.getInstance(ALGORITHM_OPTIONS); cipher.init(Cipher.ENCRYPT_MODE, key); keyValue += HexUtil.toHexString(cipher.doFinal(clearText .getBytes(ALGORITHM_ENCODING))); /* * None of below exceptions should happen in runtime because we do * both the encoding and the decoding and we assert a very common * encrypt algorthm (DES). That is why we ignore them. Later on, we * can add log4j to log these unexpected exceptions. */ } catch (IllegalStateException e) { keyValue = null; } catch (IllegalBlockSizeException e) { keyValue = null; } catch (BadPaddingException e) { keyValue = null; } catch (NoSuchAlgorithmException e) { keyValue = null; } catch (NoSuchPaddingException e) { keyValue = null; } catch (InvalidKeyException e) { keyValue = null; } catch (UnsupportedEncodingException e) { keyValue = null; } if (keyValue == null) { keyValue = "NOKEY " + clearText; } return keyValue; } /** * Decrypts the string, which is supposed to have been encrypted with above * encrypt function. * * @param encryptedText * encrypted text to be decrypted * @return String with the decrypted data. */ public static String decryptRandom(String encryptedText) { String clearText = null; // split the hex into key and data parts. int spaceLoc = encryptedText.indexOf(' '); String encryptKey = encryptedText.substring(0, spaceLoc); String encryptData = encryptedText.substring(spaceLoc + 1); // special case of no encryption if (encryptKey.equals("NOKEY")) { return encryptData; } // convert hex data to byte array byte[] keyPart = HexUtil.toByteArray(encryptKey); byte[] dataPart = HexUtil.toByteArray(encryptData); // reconstruct the secret key SecretKeySpec sks = new SecretKeySpec(keyPart, ALGORITHM_KEY); try { // decrypt using the recovered key spec Cipher cipher = Cipher.getInstance(ALGORITHM_OPTIONS); cipher.init(Cipher.DECRYPT_MODE, sks); byte[] decryptedData = cipher.doFinal(dataPart); clearText = new String(decryptedData, ALGORITHM_ENCODING); } catch (NoSuchAlgorithmException e) { clearText = null; } catch (NoSuchPaddingException e) { clearText = null; } catch (InvalidKeyException e) { clearText = null; } catch (IllegalStateException e) { clearText = null; } catch (IllegalBlockSizeException e) { clearText = null; } catch (BadPaddingException e) { clearText = null; } catch (UnsupportedEncodingException e) { clearText = null; } return clearText; } public static void main(String[] args){ System.out.println(decrypt("df4cd223373955dd")); } }
from keyring import backend from keyring.py310compat import metadata def test_entry_point(): """ Keyring provides exactly one 'keyring' console script that's a callable. """ matches = metadata.entry_points(group='console_scripts', name='keyring') (script,) = matches assert callable(script.load()) def test_missing_metadata(monkeypatch): """ _load_plugins should pass when keyring metadata is missing. """ monkeypatch.setattr(metadata, 'entry_points', metadata.EntryPoints().select) backend._load_plugins()
def upload( self, file_path=None, file_name=None, preflight_check=False, preflight_expected_size=0, upload_using_accelerator=False, ): if file_name is None: file_name = os.path.basename(file_path) with open(file_path, 'rb') as file_stream: return self.upload_stream( file_stream, file_name, preflight_check, preflight_expected_size=preflight_expected_size, upload_using_accelerator=upload_using_accelerator, )
def processSpam(tweet, tweetDict, userDict, event, myHandle): handle = str(tweet.user.screen_name.encode('utf-8')) name = str(tweet.user.name.encode('utf-8')) try: text = str(tweet.text.encode('utf-8')) except AttributeError: text = str(tweet.full_text.encode('utf-8')) if text[0:2] == "RT" or tweet.retweeted: return False, tweetDict, userDict if handle == myHandle: return False, tweetDict, userDict if (event.lower() in handle.lower()) or (event.lower() in name.lower()): if event.lower() not in twitterSpecific.cleanTweetTextofAts(text.lower()): return False, tweetDict, userDict if tweet.in_reply_to_screen_name: if event.lower() in str(tweet.in_reply_to_screen_name.lower()): if event.lower() not in twitterSpecific.cleanTweetTextofAts(text.lower()): return False, tweetDict, userDict cleanTweet = twitterSpecific.cleanTweetText(text) if cleanTweet in tweetDict: return False, tweetDict, userDict else: if handle in userDict: if userDict[handle] > 5: return False, tweetDict, userDict userDict[handle] += 1 userDict[handle] = 1 tweetDict[cleanTweet] = 1 return True, tweetDict, userDict
def generate_visualization_dict(self, dynamic_variables, constant_variables, dynamic_values, constant_values): self._scene_data = {} self._scene_data['name'] = self._name self._scene_data['height'] = self._height self._scene_data['width'] = self._width self._scene_data['frames'] = [] self._scene_data['cameras'] = [] self._scene_data['lights'] = [] constant_map = dict(zip(constant_variables, constant_values)) for frame in self.visualization_frames: frame.generate_transformation_matrix(self._reference_frame, self._origin) frame.generate_numeric_transform_function(dynamic_variables, constant_variables) frame.evaluate_transformation_matrix( dynamic_values, constant_values) self._scene_data['frames'].append( frame.generate_visualization_dict(constant_map=constant_map)) for camera in self.cameras: camera.generate_transformation_matrix(self._reference_frame, self._origin) camera.generate_numeric_transform_function(dynamic_variables, constant_variables) camera.evaluate_transformation_matrix(dynamic_values, constant_values) self._scene_data['cameras'].append( camera.generate_visualization_dict() ) for light in self.lights: light.generate_transformation_matrix(self._reference_frame, self._origin) light.generate_numeric_transform_function(dynamic_variables, constant_variables) light.evaluate_transformation_matrix(dynamic_values, constant_values) self._scene_data['lights'].append( light.generate_visualization_dict()) return self._scene_data
// Copyright © 2020 AMIS Technologies // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cl import ( "errors" "math/big" "github.com/getamis/alice/crypto/utils" "github.com/golang/protobuf/proto" ) var ( big0 = big.NewInt(0) big256bit = new(big.Int).Lsh(big1, 256) // ErrDifferentBQForms is returned if the two quadratic forms are different ErrDifferentBQForms = errors.New("different binary quadratic Forms") ) /* Notations: - upperboundOrder: A - public key: h - ciphertext: (c1, c2)=(g^r, f^a*h^r) - challenge set c - the message space: [0, p-1]. In our situation, the value p is the order of an elliptic curve group. - distributionDistance: d Alice(i.e. Prover) chooses the message a and a nonce r to get the CL ciphertext (c1, c2). Through the following protocol, Bob(i.e. Verifier) can be convinced that Alice knows a, r, but Bob does not learn a, r in this protocol. We use Fiat–Shamir heuristic to get the following protocol. Step 1: The prover - randomly chooses two integers r1 in [0, 2^{d}Ac] and r2 in [0, p-1]. - computes t1=g^{r1} and t2=h^{r1}f^{r2}. - computes k:=H(t1, t2, g, f, h, p, q) mod c. Here H is a cryptography hash function. - computes u1:=r1+kr in Z and u2:=r2+ka. Here Z is the ring of integer. The resulting proof is (u1, u2, t1, t2, c1, c2). Step 2: The verifier verifies - u1 in [0, (2^{d}+1)Ac]. - u2 in [0, p-1]. - g^{u1}=t1*c1^k. - h^{u1}*f^{u2}=t2*(c2)^k */ func (pubKey *PublicKey) buildProof(plainText *big.Int, r *big.Int) (*ProofMessage, error) { // Compute 2^{d}ac + 1 upperBound1 := new(big.Int).Mul(pubKey.a, pubKey.c) upperBound1 = upperBound1.Lsh(upperBound1, uint(pubKey.d)) upperBound1 = upperBound1.Add(upperBound1, big1) // r1 in [0, 2^{d}Ac] r1, err := utils.RandomInt(upperBound1) if err != nil { return nil, err } // r2 in [0, p-1] r2, err := utils.RandomInt(pubKey.p) if err != nil { return nil, err } // Compute t1=g^{r1} and t2=h^{r1}*f^{r2} t1, err := pubKey.g.Exp(r1) if err != nil { return nil, err } t2, err := pubKey.h.Exp(r1) if err != nil { return nil, err } fPower, err := pubKey.f.Exp(r2) if err != nil { return nil, err } t2, err = t2.Composition(fPower) if err != nil { return nil, err } // k:=H(t1, t2, g, f, h, p, q, a, c) mod c // In our application c = 1024. If the field order is 2^32, we will get the uniform distribution D in [0,2^32-1]. // If we consider the distribution E := { x in D| x mod c } is also the uniform distribution in [0,1023]=[0,c-1]. k, salt, err := utils.HashProtosRejectSampling(big256bit, &Hash{ T1: t1.ToMessage(), T2: t2.ToMessage(), G: pubKey.g.ToMessage(), F: pubKey.f.ToMessage(), H: pubKey.h.ToMessage(), P: pubKey.p.Bytes(), Q: pubKey.q.Bytes(), A: pubKey.a.Bytes(), C: pubKey.c.Bytes(), }) if err != nil { return nil, err } k = k.Mod(k, pubKey.c) // Compute u1:=r1+kr in Z and u2:=r2+k*plainText mod p u1 := new(big.Int).Mul(k, r) u1 = u1.Add(r1, u1) u2 := new(big.Int).Mul(k, plainText) u2 = u2.Add(u2, r2) u2 = u2.Mod(u2, pubKey.p) proof := &ProofMessage{ Salt: salt, U1: u1.Bytes(), U2: u2.Bytes(), T1: t1.ToMessage(), T2: t2.ToMessage(), } return proof, nil } func (pubKey *PublicKey) VerifyEnc(bs []byte) error { msg := &EncryptedMessage{} err := proto.Unmarshal(bs, msg) if err != nil { return err } t1, err := msg.Proof.T1.ToBQuadraticForm() if err != nil { return ErrInvalidMessage } t2, err := msg.Proof.T2.ToBQuadraticForm() if err != nil { return ErrInvalidMessage } c1, c2, err := msg.getBQs(pubKey.discriminantOrderP) if err != nil { return ErrInvalidMessage } // Compute (2^{d}+1)ac + 1 ac := new(big.Int).Mul(pubKey.c, pubKey.a) upperBound := new(big.Int).Lsh(ac, uint(pubKey.d)) upperBound = upperBound.Add(upperBound, ac) upperBound = upperBound.Add(upperBound, big1) // u1 in [0, (2^{d}+1)Ac] u1 := new(big.Int).SetBytes(msg.Proof.U1) err = utils.InRange(u1, big0, upperBound) if err != nil { return err } // u2 in [0, p-1]. u2 := new(big.Int).SetBytes(msg.Proof.U2) err = utils.InRange(u2, big0, pubKey.p) if err != nil { return err } // Check g^{u1}=t1*c1^k // k:=H(t1, t2, g, f, h, p, q, a, c) mod c k, err := utils.HashProtosToInt(msg.Proof.Salt, &Hash{ T1: msg.Proof.T1, T2: msg.Proof.T2, G: pubKey.g.ToMessage(), F: pubKey.f.ToMessage(), H: pubKey.h.ToMessage(), P: pubKey.p.Bytes(), Q: pubKey.q.Bytes(), A: pubKey.a.Bytes(), C: pubKey.c.Bytes(), }) if err != nil { return err } k = k.Mod(k, pubKey.c) t1c1k, err := c1.Exp(k) if err != nil { return err } t1c1k, err = t1c1k.Composition(t1) if err != nil { return err } g := pubKey.g gu1, err := g.Exp(u1) if err != nil { return err } if !gu1.Equal(t1c1k) { return ErrDifferentBQForms } // Check h^{u1}*f^{u2}=t2*(c2)^k f := pubKey.f hu1fu2, err := f.Exp(u2) if err != nil { return err } h := pubKey.h hu1, err := h.Exp(u1) if err != nil { return err } hu1fu2, err = hu1fu2.Composition(hu1) if err != nil { return err } c2k, err := c2.Exp(k) if err != nil { return err } t2c2k, err := c2k.Composition(t2) if err != nil { return err } if !t2c2k.Equal(hu1fu2) { return ErrDifferentBQForms } return nil }
Izvor: Vlada RH GONG je podnio Ustavnom sudu prijedlog za pokretanje postupka za ocjenu suglasnosti ustavnosti i zakonitosti rješenja Predsjednika Vlade Republike Hrvatske od 28. travnja 2017. godine o razrješenjima MOST-ovih ministara unutarnjih poslova, pravosuđa, zaštite okoliša i energetike te 28. travnja i potpredsjednika Vlade i ministra uprave. GONG-ov prijedlog Ustavnom sudu o razrješenjima Vlahe Orepića, Ante Šprlje, Slavena Dobrovića i Ivana Kovačića usmjeren je na tri temeljne razine: proceduru razrješenja ministara koja nije propisana u slučajevima kad ministar ne podnese ostavku, Vlada nije glasala o odlukama, tj. nije donijela odluku sukladno Zakonu o Vladi, a saborski zastupnici nisu imali mogućnost postavljati pitanja razriješenim ministrima na tzv. aktualnom prijepodnevu čime se nije mogla ostvariti ustavna odredba kojom Vlada odgovara Hrvatskom saboru, a Hrvatski sabor nadzire rad Vlade, pobrojala je ova organizacija civilnog društva. Podsjećaju da iako Ustav Republike Hrvatske i Zakon o Vladi Republike Hrvatske ne propisuju mogućnost razrješenja ministara od strane predsjednika Vlade osim u slučaju podnošenja ostavke ministra, Plenković je 27. travnja 2017. potpisao rješenja o razrješenjima. Također, iako Zakon o Vladi propisuje da Vlada odlučuje natpolovičnom većinom glasova svih članova Vlade, a u slučaju da su glasovi podijeljeni odlučuje glas predsjednika Vlade, Vlada kao kolektivno tijelo nije glasala o razrješenjima ministara, a predsjednik Vlade nije mogao potpisati odluku koju Vlada nije donijela. Konačno, iako je sukladno Ustavu Vlada odgovorna Hrvatskom saboru, a Hrvatski sabor nadzire njen rad te zastupnici imaju pravo postavljati zastupnička pitanja Vladi i pojedinim ministrima, na tzv. aktualnom prijepodnevu u srijedu, 7. lipnja na početku 4. sjednice Hrvatskog sabora, zastupnici nisu mogli postavljati pitanja ministrima iz jednih od najvažnijih resora (uprave, pravosuđa, unutarnjih poslova te zaštite okoliša i energetike). I to jer niti nakon 41 dan od razrješenja ministara, predsjednik Vlade još nije bio predložio nove ministre čime je prekršio razuman rok od 30 dana i svime navedenim povrijedio Zakon o Vladi i Ustav, smatraju u GONG-u te obrazlažu u tekstu samog prijedloga Ustavnom sudu. GONG očekuje da Ustavni sud proglasi Plenkovićeva razrješenja Mostovih ministara protuustavnima i protuzakonitima Slijedom navedenog, GONG očekuje da Ustavni sud pokrene postupak za ocjenu ustavnosti i zakonitosti razrješenja potpredsjednika Vlade i ministara koje je donio premijer proglasi razrješenja protuustavnima i protuzakonitima te ih ukine. Neovisno o tome, GONG očekuje Ustavni sud da reagira i pozove zakonodavca da popuni ustavno-pravne praznine koje se odnose na razrješenje ministara te se osvrne na dugi rok od 43 dana u kojima je Vlada funkcionirala bez ministara s nedostatkom legitimnosti i mogućom zloupotrebom izvršne vlasti bez kontrole Hrvatskog sabora. Plenković pogriješio u razrješenju ministara? Valja podsjetiti da zamjerke na način razrješenja ima i Most, čiji je Robert Podolnjak prije nekoliko dana zapitao je li i sam premijer Andrej Plenković shvatio da je pogriješio? Naime, "kada je 27. travnja predsjednik Vlade Andrej Plenković razriješio dužnosti Mostove ministre, to rješenje je stupilo na snagu danom donošenja. Ministri su smijenjeni, a njihove dužnosti su prenesene na državne tajnike, koje je imenovala Vlada i koji nisu odgovorni Hrvatskom saboru. Upravo to je većina ustavnih stručnjaka smatrala spornim, tvrdeći da je rješenje o razrješenju tih ministara trebalo stupiti na snagu s imenovanjem novih ministara", podsjetio je Podolnjak. "U četvrtak, 8. lipnja, predsjednik Vlade razriješio je dužnosti ministre Kuščevića i Ćorića, ali u točki 2. Rješenja navodi se da to Rješenje stupa na snagu trenutkom iskazivanja povjerenja Hrvatskog sabora novom ministru (graditeljstva i prostornog uređenja, odnosno rada i mirovinskog sustava)", ukazao je Mostov saborski zastupnik i zapitao: "Zašto je predsjednik Vlade donio različita Rješenja o razrješenju ministara u razmaku od 40-tak dana pozivajući se na istu ustavnu odredbu? Da li predsjednik Vlade donosi različita rješenja imajući u vidu samo činjenicu iz koje stranke dolaze ministri koje razrješuje dužnosti? Ili predsjednik Vlade zapravo priznaje da je pogriješio s razrješenjem Mostovih ministara i da uvažava stajalište većine ustavnopravnih stručnjaka da ministar koji je razriješen dužnosti ostaje na njoj do imenovanja novog ministra?“ N1 pratite putem aplikacija za Android | iPhone/iPad | Windows| i društvenih mreža Twitter | Facebook | Instagram.
package gov.cms.dpc.api.cli.keys; import gov.cms.dpc.api.cli.AbstractAdminCommand; import io.dropwizard.setup.Bootstrap; import net.sourceforge.argparse4j.inf.Namespace; import net.sourceforge.argparse4j.inf.Subparser; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.utils.URIBuilder; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.eclipse.jetty.http.HttpStatus; import org.hl7.fhir.dstu3.model.IdType; public class KeyDelete extends AbstractAdminCommand { private static final String KEY_ID = "key-id"; private static final String ORG_REFERENCE = "org-reference"; public KeyDelete() { super("delete", "Delete public key for registered Organization."); } @Override public void addAdditionalOptions(Subparser subparser) { subparser .addArgument("--org", "-o") .dest("org-reference") .required(true) .help("Organization entity"); subparser .addArgument("id") .required(true) .dest(KEY_ID) .help("ID of Public Key to delete"); } @Override public void run(Bootstrap<?> bootstrap, Namespace namespace) throws Exception { // Get the reference final String orgReference = namespace.getString(ORG_REFERENCE); final String keyID = namespace.getString(KEY_ID); System.out.println(String.format("Deleting public key %s for organization %s", keyID, orgReference)); final String apiService = namespace.getString(API_HOSTNAME); System.out.println(String.format("Connecting to API service at: %s", apiService)); // Delete the token try (final CloseableHttpClient httpClient = HttpClients.createDefault()) { final URIBuilder builder = new URIBuilder(String.format("%s/delete-key", apiService)); builder.setParameter("organization", new IdType(orgReference).getIdPart()); builder.setParameter("key", keyID); final HttpPost keyDelete = new HttpPost(builder.build()); try (CloseableHttpResponse response = httpClient.execute(keyDelete)) { if (!HttpStatus.isSuccess(response.getStatusLine().getStatusCode())) { System.err.println("Error deleting key: " + response.getStatusLine().getReasonPhrase()); System.exit(1); } } } System.out.println("Successfully deleted public key"); } }
Charge balancing PV System using charge-pumped flyback-boost-forward converter including differential power processor This paper proposes a differential power processor using charge-pumped flyback-boost-forward converter including a function of charge balancing for PV System. This converter operates a boost converter and forward converter independently even with the transformer coupling. In the normal operation when the multiple loads have a power balancing, it has a high efficiency without the flyback operation. If the loads have imbalance among them, the charge-pumped flyback converter starts to operate for the output-capacitor charge balancing. This converter is cost-effective due to the unified transformer shared by the forward, boost and the flyback converters. Also, perspective of the MPPT efficiency, the forward converter operates as the differential power processor for the distributed MPPT control. Furthermore, since the forward converter is not fed by the boost output but by the coaxial coupled-inductor (transformer) of the boost, the power conversion efficiency is improved. An experimental verification from the 143-W hardware prototype is presented.
package org.jbrew.concurrent_tests.register; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import org.junit.Test; import org.jbrew.concurrent.*; @org.jbrew.core.annotations.Testing public class TaskRegisterTest { private Task<Integer> task; @org.junit.Before public void setup() { task = new BT<>(); } @Test public void instantiateTaskRegisterTest() { assertNotNull(new TaskRegister()); } @Test public void removeTaskTest() throws NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException, InvocationTargetException, NoSuchMethodException, InstantiationException { TaskRegister register = new TaskRegister(); register.offer(task); // add reflection to test whether the actual PriorityBlockingQueue removes the // Task<?>. Class<? extends Object> taskRegister = register.getClass(); Class<?>[] taskParam = { Object.class }; Field fieldDefinition = taskRegister.getDeclaredField("taskQueue"); fieldDefinition.setAccessible(true); Object fieldValue = fieldDefinition.get(taskRegister.getDeclaredConstructor(new Class<?>[] {}).newInstance()); Method contains = fieldValue.getClass().getDeclaredMethod("contains", taskParam); assert (Boolean) contains.invoke(fieldValue, this.task) == false; } @Test public void offerAndCheckSizeTest() { TaskRegister register = new TaskRegister(); register.offer(task); register.offer(task); assert register.getRegistrySize() == 2; } @Test public void offerAndRemoveThenCheckSizeTest() { TaskRegister register = new TaskRegister(); Task<Integer> task2 = new BT<>(); register.offer(task); register.offer(task2); register.remove(task); assert register.getRegistrySize() == 1; } @Test public void offerAndRemoveTest() { TaskRegister register = new TaskRegister(); Task<Integer> task2 = new BT<>(); register.offer(task); register.offer(task2); assertTrue(register.remove(task)); } private Task<?> t; //exists outside of the test method to remain unintialized / null. @Test public void offerNullTest() { TaskRegister reg = new TaskRegister(); reg.offer(t); assertEquals(0, reg.getRegistrySize()); } /** * Ensures that {@link org.jbrew.concurrent.TaskRegister#pollTask()} correctly * sorts ands polls the highest priority Task. */ @Test public void offerAndPollTaskTest() { TaskRegister register = new TaskRegister(); Task<Integer> lowPTask = new BT<>(), highPTask = new BT<>(); highPTask.setPriority(10); highPTask.setName("High Priority"); lowPTask.setPriority(1); lowPTask.setName("Low Priority"); register.offer(lowPTask); register.offer(highPTask); assert register.pollTask().getName() == "High Priority"; } @Test public void offerVarArgsTest() { TaskRegister register = new TaskRegister(); Task<Integer> task1 = new BT<>(), task2 = new BT<>(), task3 = new BT<>(); register.offer(task1, task2, task3); assertEquals(3, register.getRegistrySize()); } @Test public void offerVarArgsEmptyTest() { TaskRegister register = new TaskRegister(); register.offer(); assertEquals(0, register.getRegistrySize()); } private Task<?> t1; //exists outside of the test method to remain unintialized / null. @Test public void offerVarArgsNullTest() { TaskRegister register = new TaskRegister(); register.offer(t, t1); //"t" declared above at offerNullTest() - line 75. assertEquals(0, register.getRegistrySize()); } private Task<?>[] tArr; @Test public void offerVarArgsNullTest2() { TaskRegister register = new TaskRegister(); register.offer(tArr); //"t" declared above at offerNullTest() - line 75. assertEquals(0, register.getRegistrySize()); } @Test(expected = UnsupportedOperationException.class) public void interuptAllTest() { TaskRegister register = new TaskRegister(); register.interruptAll(); } private class BT<T> extends ObjectBlockingTask<Integer> { @Override protected void execute() { this.accept(69); } } }
def _simple_lca(self, node_x: Hashable, node_y: Hashable) -> Hashable: if node_x == node_y: return node_x answer = self.root for node_u, _ in self.ancestors[node_x]: for node_v, _ in self.ancestors[node_y]: treelca = self._treelca(node_u, node_v) if self.rank[treelca] >= self.rank[answer]: answer = treelca return answer
//Delete removes a comment from db func (cm *CommentsDBMock) Delete(userID uint64, id interface{}) error { idi, _ := strconv.ParseUint(id.(string), 10, 64) for i := range cm.Comments { if cm.Comments[i].ID == idi && cm.Comments[i].UserID == userID { cm.Comments[i] = cm.Comments[len(cm.Comments)-1] cm.Comments = cm.Comments[:len(cm.Comments)-1] return nil } } return fmt.Errorf("Comment not found") }
<filename>core/src/com/captstudios/games/tafl/core/es/model/ai/optimization/search/AISearchAgent.java /************************************************************************** * jcAISearchAgent - An object which picks a best move according to a * variant of alphabeta search or another * * Purpose: * This is the object which picks a move for the computer player. Implemented * as an abstract class to allow multiple search strategies to be played with. * * History * 07.08.00 Creation * 05.10.00 Added statistics and some corrections *************************************************************************/ package com.captstudios.games.tafl.core.es.model.ai.optimization.search; import java.util.Random; import com.badlogic.gdx.math.FloatCounter; import com.badlogic.gdx.utils.Array; import com.badlogic.gdx.utils.Pool; import com.captstudios.games.tafl.core.enums.EvaluationType; import com.captstudios.games.tafl.core.es.model.ai.evaluators.BoardEvaluator; import com.captstudios.games.tafl.core.es.model.ai.optimization.GameBoard; import com.captstudios.games.tafl.core.es.model.ai.optimization.moves.HistoryTable; import com.captstudios.games.tafl.core.es.model.ai.optimization.moves.Move; import com.captstudios.games.tafl.core.es.model.ai.optimization.moves.RulesChecker; import com.captstudios.games.tafl.core.es.model.ai.optimization.transposition.TranspositionTable; import com.captstudios.games.tafl.core.es.model.ai.optimization.transposition.TranspositionTableEntry; public abstract class AISearchAgent<U extends GameBoard> { /*************************************************************************** * DATA MEMBERS **************************************************************************/ RulesChecker rulesChecker; // A transposition table for this object TranspositionTable transTable; // A handle to the system's history table HistoryTable historyTable; // How will we assess position strengths? protected BoardEvaluator<U> evaluator; protected int fromWhosePerspective; // Alphabeta search boundaries protected static final int ALPHABETA_MAXVAL = 30000; protected static final int ALPHABETA_MINVAL = -30000; protected static final float NANOS_IN_SECOND = 1000000000.0f; Random random; // Statistics protected int numRegularNodes; protected int numEvaluationNodes; protected int numRegularTTHits; protected int numEvaluationTTHits; protected int numRegularCutoffs; protected int numEvaluationCutoffs; protected Array<FloatCounter> depthCounters; protected int depth; Pool<Array<Move>> arrayPool; // Construction public AISearchAgent( TranspositionTable transpositionTable, HistoryTable historyTable, BoardEvaluator<U> evaluator, RulesChecker rulesChecker, int depth) { this.transTable = transpositionTable; this.historyTable = historyTable; this.evaluator = evaluator; this.rulesChecker = rulesChecker; this.depth = depth; this.random = new Random(); depthCounters = new Array<FloatCounter>(); for (int i = 0; i < depth; i++) { depthCounters.add(new FloatCounter(0){ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{count = " + count); sb.append(", total = " + total); sb.append(", min = " + min); sb.append(", max = " + max); sb.append(", average = " + average); sb.append(", value = " + value); sb.append("}"); return sb.toString(); } }); } this.arrayPool = new Pool<Array<Move>>(){ @Override protected Array<Move> newObject() { return new Array<Move>(); } @Override public void free (Array<Move> object) { object.size = 0; super.free(object); } }; } /** * Pick a function which the agent will use to assess the potency of a * position. This may change during the game; for example, a special * "mop-up" evaluator may replace the standard when it comes time to drive * a decisive advantage home at the end of the game. * * @param eval */ public void setEvaluator(BoardEvaluator<U> eval) { evaluator = eval; } /** * The basic alpha-beta algorithm, used in one disguise or another by * every search agent class. * * @param nodeType * @param board * @param depth * @param alpha * @param beta * @return */ public int max(U board, int turn, int depth, int alpha, int beta) { // Count the number of nodes visited in the full-width search numRegularNodes++; TranspositionTableEntry entry = transTable.lookupBoard(board.hashCode()); // First things first: let's see if there is already something useful // in the transposition table, which might save us from having to search // anything at all if (entry != null && (entry.evalType == EvaluationType.ACCURATE || entry.evalType == EvaluationType.LOWERBOUND) && entry.eval >= beta) { numRegularTTHits++; return entry.eval; } else if (depth == 0) { return evaluate(board); } Array<Move> legalMoves = getLegalMoves(board, turn); if (legalMoves.size == 0) { return evaluate(board); } // Sort the moves according to History heuristic values historyTable.sortMoveList(legalMoves, turn); // OK, now, get ready to search // Case #1: We are searching a Max Node int bestSoFar = doMax(board, turn, depth, alpha, beta, legalMoves); transTable.storeBoard(board.hashCode(), bestSoFar, EvaluationType.ACCURATE); clearLegalMoves(legalMoves); return bestSoFar; } public int min(U board, int turn, int depth, int alpha, int beta) { // Count the number of nodes visited in the full-width search numRegularNodes++; TranspositionTableEntry entry = transTable.lookupBoard(board.hashCode()); // First things first: let's see if there is already something useful // in the transposition table, which might save us from having to search // anything at all if (entry != null && (entry.evalType == EvaluationType.ACCURATE || entry.evalType == EvaluationType.UPPERBOUND) && entry.eval <= alpha) { numRegularTTHits++; return entry.eval; } else if (depth == 0) { return evaluate(board); } Array<Move> legalMoves = getLegalMoves(board, turn); if (legalMoves.size == 0) { return evaluate(board); } // Sort the moves according to History heuristic values historyTable.sortMoveList(legalMoves, turn); // OK, now, get ready to search // Case #1: We are searching a Max Node int bestSoFar = doMin(board, turn, depth, alpha, beta, legalMoves); transTable.storeBoard(board.hashCode(), bestSoFar, EvaluationType.ACCURATE); clearLegalMoves(legalMoves); return bestSoFar; } private int evaluate(U board) { int val = evaluator.evaluate(board, fromWhosePerspective); transTable.storeBoard(board.hashCode(), val, EvaluationType.ACCURATE); return val; } private int doMin(U board, int turn, int depth, int alpha, int beta, Array<Move> legalMoves) { int bestSoFar = ALPHABETA_MAXVAL; int currentBeta = beta; for (Move move : legalMoves) { board.simulateMove(move); try { // And search it in turn int movScore = 0; long start = System.nanoTime(); try { movScore = max(board, (turn + 1) % 2, depth - 1, alpha, currentBeta); } finally { depthCounters.get(depth-1).put((System.nanoTime() - start) / NANOS_IN_SECOND); } currentBeta = Math.min(currentBeta, movScore); if (movScore < bestSoFar) { bestSoFar = movScore; // Cutoff? if (bestSoFar <= alpha) { transTable.storeBoard(board.hashCode(), bestSoFar, EvaluationType.LOWERBOUND); historyTable.addCount(move, turn); numRegularCutoffs++; return bestSoFar; } } } finally { board.undoSimulatedMove(); } } return bestSoFar; } private int doMax(U board, int turn, int depth, int alpha, int beta, Array<Move> legalMoves) { int bestSoFar = ALPHABETA_MINVAL; int currentAlpha = alpha; for (Move move : legalMoves) { board.simulateMove(move); try { // And search it in turn int movScore = 0; long start = System.nanoTime(); try { movScore = min(board, (turn + 1) % 2, depth - 1, currentAlpha, beta); } finally { depthCounters.get(depth-1).put((System.nanoTime() - start) / NANOS_IN_SECOND); } currentAlpha = Math.max(currentAlpha, movScore); // Is the current successor better than the previous best? if (movScore > bestSoFar) { bestSoFar = movScore; // Can we cutoff now? if (bestSoFar >= beta) { // Store this best move in the TransTable transTable.storeBoard(board.hashCode(), bestSoFar, EvaluationType.UPPERBOUND); // Add this move's efficiency in the HistoryTable historyTable.addCount(move, turn); numRegularCutoffs++; return bestSoFar; } } } finally { board.undoSimulatedMove(); } } return bestSoFar; } protected Array<Move> getLegalMoves(U board, int turn) { Array<Move> generatedMoves = rulesChecker.generateLegalMoves(turn); Array<Move> legalMoves = arrayPool.obtain(); for (Move move : generatedMoves) { legalMoves.add(move.clone()); } return legalMoves; } protected void clearLegalMoves(Array<Move> moves) { rulesChecker.freeMoves(moves); arrayPool.free(moves); } /** * Each agent class needs some way of picking a move! * @param theBoard * @return */ public abstract Move pickBestMove(U board, int turn); }
import hashlib from typing import Any, Dict, List import posthoganalytics from django.conf import settings from django.contrib.postgres.fields import JSONField from django.db import models from django.db.models.expressions import ExpressionWrapper, RawSQL from django.db.models.fields import BooleanField from django.db.models.query import QuerySet from django.db.models.query_utils import Q from django.dispatch import receiver from django.utils import timezone from sentry_sdk.api import capture_exception from posthog.models.filters.mixins.utils import cached_property from posthog.models.team import Team from posthog.queries.base import properties_to_Q from .filters import Filter from .person import Person __LONG_SCALE__ = float(0xFFFFFFFFFFFFFFF) class FeatureFlag(models.Model): class Meta: constraints = [models.UniqueConstraint(fields=["team", "key"], name="unique key for team")] name: models.CharField = models.CharField(max_length=400) key: models.CharField = models.CharField(max_length=400) filters: JSONField = JSONField(default=dict) rollout_percentage: models.IntegerField = models.IntegerField(null=True, blank=True) team: models.ForeignKey = models.ForeignKey("Team", on_delete=models.CASCADE) created_by: models.ForeignKey = models.ForeignKey("User", on_delete=models.CASCADE) created_at: models.DateTimeField = models.DateTimeField(default=timezone.now) deleted: models.BooleanField = models.BooleanField(default=False) active: models.BooleanField = models.BooleanField(default=True) def distinct_id_matches(self, distinct_id: str) -> bool: return FeatureFlagMatcher(distinct_id, self).is_match() def get_analytics_metadata(self) -> Dict: filter_count = sum(len(group.get("properties", [])) for group in self.groups) return { "groups_count": len(self.groups), "has_filters": filter_count > 0, "has_rollout_percentage": any(group.get("rollout_percentage") for group in self.groups), "filter_count": filter_count, "created_at": self.created_at, } @property def groups(self): return self.get_filters().get("groups", []) def get_filters(self): if "groups" in self.filters: return self.filters else: # :TRICKY: Keep this backwards compatible. # We don't want to migrate to avoid /decide endpoint downtime until this code has been deployed return { "groups": [ {"properties": self.filters.get("properties", []), "rollout_percentage": self.rollout_percentage} ] } class FeatureFlagMatcher: def __init__(self, distinct_id: str, feature_flag: FeatureFlag): self.distinct_id = distinct_id self.feature_flag = feature_flag def is_match(self): return any(self.is_group_match(group, index) for index, group in enumerate(self.feature_flag.groups)) def is_group_match(self, group: Dict, group_index: int): rollout_percentage = group.get("rollout_percentage") if len(group.get("properties", [])) > 0: if not self._match_distinct_id(group_index): return False elif not rollout_percentage: return True if rollout_percentage is not None: if self._hash <= (rollout_percentage / 100): return True return False def _match_distinct_id(self, group_index: int) -> bool: return len(self.query_groups) > 0 and self.query_groups[0][group_index] @cached_property def query_groups(self) -> List[List[bool]]: query: QuerySet = Person.objects.filter( team_id=self.feature_flag.team_id, persondistinctid__distinct_id=self.distinct_id, persondistinctid__team_id=self.feature_flag.team_id, ) fields = [] for index, group in enumerate(self.feature_flag.groups): key = f"group_{index}" if len(group.get("properties", {})) > 0: expr: Any = properties_to_Q( Filter(data=group).properties, team_id=self.feature_flag.team_id, is_person_query=True ) else: expr = RawSQL("true", []) query = query.annotate(**{key: ExpressionWrapper(expr, output_field=BooleanField())}) fields.append(key) return list(query.values_list(*fields)) # This function takes a distinct_id and a feature flag key and returns a float between 0 and 1. # Given the same distinct_id and key, it'll always return the same float. These floats are # uniformly distributed between 0 and 1, so if we want to show this feature to 20% of traffic # we can do _hash(key, distinct_id) < 0.2 @cached_property def _hash(self) -> float: hash_key = "%s.%s" % (self.feature_flag.key, self.distinct_id) hash_val = int(hashlib.sha1(hash_key.encode("utf-8")).hexdigest()[:15], 16) return hash_val / __LONG_SCALE__ @receiver(models.signals.post_save, sender=FeatureFlag) def feature_flag_created(sender, instance, created, raw, using, **kwargs): if instance.created_by: event_name: str = "feature flag created" if created else "feature flag updated" posthoganalytics.capture( instance.created_by.distinct_id, event_name, instance.get_analytics_metadata(), ) def get_active_feature_flags(team: Team, distinct_id: str) -> List[str]: flags_enabled = [] feature_flags = FeatureFlag.objects.filter(team=team, active=True, deleted=False).only( "id", "team_id", "filters", "key", "rollout_percentage" ) for feature_flag in feature_flags: try: # distinct_id will always be a string, but data can have non-string values ("Any") if feature_flag.distinct_id_matches(distinct_id): flags_enabled.append(feature_flag.key) except Exception as err: capture_exception(err) return flags_enabled
<reponame>zhaozhenghao1993/zh-admin<gh_stars>1-10 package com.zhenghao.admin.server.handler.avatar; import com.zhenghao.admin.common.constant.UploadConstants; import com.zhenghao.admin.common.enums.FileTypeEnum; import com.zhenghao.admin.common.exception.upload.UploadException; import com.zhenghao.admin.common.exception.upload.UploadSizeException; import com.zhenghao.admin.common.exception.upload.UploadTypeException; import com.zhenghao.admin.common.util.FileUtils; import com.zhenghao.admin.common.config.UploadConfig; import com.zhenghao.admin.common.util.UploadUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import org.springframework.web.multipart.MultipartFile; import java.io.IOException; /** * 🙃 * 🙃 用户头像处理类 * 🙃 * * @author:zhaozhenghao * @Email :<EMAIL> * @date :2019/05/13 22:43 * UserAvatarHandler.java */ @Component public class UserAvatarHandler { private final static Logger logger = LoggerFactory.getLogger(UserAvatarHandler.class); private final UploadConfig uploadConfig; @Autowired public UserAvatarHandler(UploadConfig uploadConfig) { this.uploadConfig = uploadConfig; } /** * 头像上传处理 * * @param userId 用户id * @param file * @return */ public String avatarHandler(Long userId, MultipartFile file) { // 如果没有文件上传就 return 默认头像 if (file == null) { return UploadConstants.USER_AVATAR_DEFAULT_PATH; } if (FileUtils.fileType(file.getOriginalFilename()) != FileTypeEnum.IMAGE) { throw new UploadTypeException("Please upload images!"); } if (!FileUtils.checkFileSize(file.getSize(), UploadConstants.USER_AVATAR_FILE_SIZE, UploadConstants.USER_AVATAR_FILE_SIZE_UNIT)) { throw new UploadSizeException("Upload file too large!"); } String fileName = UploadConstants.USER_AVATAR_FILE_NAME.concat(file.getOriginalFilename().substring(file.getOriginalFilename().lastIndexOf('.'))); String directoryPath = uploadConfig.getUploadFileDirectory(UploadConstants.USER_AVATAR_DIRECTORY + userId.toString()); try { UploadUtils.uploadFile(file, directoryPath, fileName); } catch (IOException e) { logger.error("upload file Error", e); throw new UploadException("Upload file Error!"); } return uploadConfig.getUploadFilePath(UploadConstants.USER_AVATAR_DIRECTORY + userId.toString() + UploadConstants.PATH_SEPARATOR + fileName); } }
<gh_stars>1-10 package main import ( "encoding/json" "fmt" "log" "net/http" _ "net/http/pprof" "runtime" "runtime/debug" "github.com/gin-gonic/gin" "dev.project/BackEndCode/devcontrol/app" "dev.project/BackEndCode/devcontrol/coap/coapserver" "dev.project/BackEndCode/devcontrol/ioutil/dbutil" webcontrol "dev.project/BackEndCode/devcontrol/web/control" ) func main() { defer _DumpErr() _AppInit() // 启动CoAP服务 go coapserver.StartCoapService() gin.SetMode(gin.ReleaseMode) pEngine := gin.Default() webcontrol.RouteConfig(pEngine) addr := fmt.Sprintf(":%d", app.GetConfig().GinHTTPPort) pEngine.Run(addr) } func _AppInit() { // 检查数据库 dbutil.CheckDB() //性能监测: http://host:port/debug/pprof go func() { addr := fmt.Sprintf(":%d", app.GetConfig().PprofHTTPPort) log.Println(http.ListenAndServe(addr, nil)) }() } func _DumpErr() { if p := recover(); p != nil { log.Println("ERR: ", p) log.Println(string(debug.Stack())) panic(p) } } func init() { if ptr, _, line, ok := runtime.Caller(0); ok { fun := runtime.FuncForPC(ptr) str := fmt.Sprintf("初始化: %s /%d", fun.Name(), line) log.Println(str) } log.SetFlags(log.Lshortfile | log.LstdFlags) if pLogFile := app.GetLogFile(); pLogFile != nil { log.Printf("<<<<<<<\t 日志文件: %s\n", pLogFile.Name()) // os.Stdout = pLogFile // log.SetOutput(pLogFile) } buff, _ := json.Marshal(app.GetConfig()) log.Printf("<<<<<<<<<< APP配置: \n\t%s\n", string(buff)) log.Printf(`性能监测: http://%s:%d/debug/pprof`, app.GetConfig().ThisHostAddr, app.GetConfig().PprofHTTPPort) }
/** * Fired when the Server receives a custom plugin message. * * @param channel The channel name of the plugin message. In my case '5zig' * @param player The player who sent the plugin message. * @param bytes The byte data of the plugin message. */ @Override public void onPluginMessageReceived(String channel, Player player, byte[] bytes) { if (channel.equals(The5zigMod.CHANNEL)) { ByteBuf byteBuf = Unpooled.buffer().writeBytes(bytes).resetReaderIndex(); handlePluginMessage(player, byteBuf); } }
/** * This method validates the sudoku itself. Impossible sudokus (containing identical numbers in the same * row, box or column) will be rejected by the program, so we don't start the solve process with a bogus * sudoku. * * It marks the rows, columns or boxes where the problem occurs. * * Note that this method only accepts sudokus that already have been generated by the GUI. (avoiding double work) * * @ returns True if the sudoku is valid */ private boolean validateSudoku(Sudoku thissudoku) { boolean valid = thissudoku.validator(); if(!valid) { int[][] errorcells = thissudoku.getErrorCells(); int howmany = errorcells.length; for(int i = 0; i <= howmany-1; i++) {markField(errorcells[i][0], errorcells[i][1]);} String text = thissudoku.getErrorInfo(); output(text); } return valid; }
/** * The base IQ (Info/Query) packet. IQ packets are used to get and set information * on the server, including authentication, roster operations, and creating * accounts. Each IQ stanza(/packet) has a specific type that indicates what type of action * is being taken: "get", "set", "result", or "error".<p> * * IQ packets can contain a single child element that exists in a specific XML * namespace. The combination of the element name and namespace determines what * type of IQ stanza(/packet) it is. Some example IQ subpacket snippets:<ul> * * <li>&lt;query xmlns="jabber:iq:auth"&gt; -- an authentication IQ. * <li>&lt;query xmlns="jabber:iq:private"&gt; -- a private storage IQ. * <li>&lt;pubsub xmlns="http://jabber.org/protocol/pubsub"&gt; -- a pubsub IQ. * </ul> * * @author Matt Tucker */ public abstract class IQ extends Stanza { // Don't name this field 'ELEMENT'. When it comes to IQ, ELEMENT is the child element! public static final String IQ_ELEMENT = "iq"; public static final String QUERY_ELEMENT = "query"; private final String childElementName; private final String childElementNamespace; private Type type = Type.get; public IQ(IQ iq) { super(iq); type = iq.getType(); this.childElementName = iq.childElementName; this.childElementNamespace = iq.childElementNamespace; } protected IQ(String childElementName) { this(childElementName, null); } protected IQ(String childElementName, String childElementNamespace) { this.childElementName = childElementName; this.childElementNamespace = childElementNamespace; } /** * Returns the type of the IQ packet. * * @return the type of the IQ packet. */ public Type getType() { return type; } /** * Sets the type of the IQ packet. * <p> * Since the type of an IQ must present, an IllegalArgmentException will be thrown when type is * <code>null</code>. * </p> * * @param type the type of the IQ packet. */ public void setType(Type type) { this.type = Objects.requireNonNull(type, "type must not be null"); } /** * Return true if this IQ is a request IQ, i.e. an IQ of type {@link Type#get} or {@link Type#set}. * * @return true if IQ type is 'get' or 'set', false otherwise. * @since 4.1 */ public boolean isRequestIQ() { switch (type) { case get: case set: return true; default: return false; } } public final String getChildElementName() { return childElementName; } public final String getChildElementNamespace() { return childElementNamespace; } @Override public final XmlStringBuilder toXML() { XmlStringBuilder buf = new XmlStringBuilder(); buf.halfOpenElement(IQ_ELEMENT); addCommonAttributes(buf); if (type == null) { buf.attribute("type", "get"); } else { buf.attribute("type", type.toString()); } buf.rightAngleBracket(); buf.append(getChildElementXML()); buf.closeElement(IQ_ELEMENT); return buf; } /** * Returns the sub-element XML section of the IQ packet, or the empty String if there * isn't one. * * @return the child element section of the IQ XML. */ public final XmlStringBuilder getChildElementXML() { XmlStringBuilder xml = new XmlStringBuilder(); if (type == Type.error) { // Add the error sub-packet, if there is one. appendErrorIfExists(xml); } else if (childElementName != null) { // Add the query section if there is one. IQChildElementXmlStringBuilder iqChildElement = getIQChildElementBuilder(new IQChildElementXmlStringBuilder(this)); if (iqChildElement != null) { xml.append(iqChildElement); XmlStringBuilder extensionsXml = getExtensionsXML(); if (iqChildElement.isEmptyElement) { if (extensionsXml.length() == 0) { xml.closeEmptyElement(); return xml; } else { xml.rightAngleBracket(); } } xml.append(extensionsXml); xml.closeElement(iqChildElement.element); } } return xml; } /** * This method must be overwritten by IQ subclasses to create their child content. It is important that the builder * <b>does not include the final end element</b>. This will be done automatically by IQChildelementXmlStringBuilder * after eventual existing stanza(/packet) extensions have been added. * <p> * For example to create an IQ with a extra attribute and an additional child element * </p> * <pre> * {@code * <iq to='[email protected]' id='123'> * <bar xmlns='example:bar' extraAttribute='blaz'> * <extraElement>elementText</extraElement> * </bar> * </iq> * } * </pre> * the body of the {@code getIQChildElementBuilder} looks like * <pre> * {@code * // The builder 'xml' will already have the child element and the 'xmlns' attribute added * // So the current builder state is "<bar xmlns='example:bar'" * xml.attribute("extraAttribute", "blaz"); * xml.rightAngleBracket(); * xml.element("extraElement", "elementText"); * // Do not close the 'bar' attribute by calling xml.closeElement('bar') * } * </pre> * If your IQ only contains attributes and no child elements, i.e. it can be represented as empty element, then you * can mark it as such. * <pre> * xml.attribute(&quot;myAttribute&quot;, &quot;myAttributeValue&quot;); * xml.setEmptyElement(); * </pre> * If your IQ does not contain any attributes or child elements (besides stanza(/packet) extensions), consider sub-classing * {@link SimpleIQ} instead. * * @param xml a pre-created builder which already has the child element and the 'xmlns' attribute set. * @return the build to create the IQ child content. */ protected abstract IQChildElementXmlStringBuilder getIQChildElementBuilder(IQChildElementXmlStringBuilder xml); /** * Convenience method to create a new empty {@link Type#result IQ.Type.result} * IQ based on a {@link Type#get IQ.Type.get} or {@link Type#set IQ.Type.set} * IQ. The new stanza(/packet) will be initialized with:<ul> * <li>The sender set to the recipient of the originating IQ. * <li>The recipient set to the sender of the originating IQ. * <li>The type set to {@link Type#result IQ.Type.result}. * <li>The id set to the id of the originating IQ. * <li>No child element of the IQ element. * </ul> * * @param request the {@link Type#get IQ.Type.get} or {@link Type#set IQ.Type.set} IQ packet. * @throws IllegalArgumentException if the IQ stanza(/packet) does not have a type of * {@link Type#get IQ.Type.get} or {@link Type#set IQ.Type.set}. * @return a new {@link Type#result IQ.Type.result} IQ based on the originating IQ. */ public static IQ createResultIQ(final IQ request) { return new EmptyResultIQ(request); } /** * Convenience method to create a new {@link Type#error IQ.Type.error} IQ * based on a {@link Type#get IQ.Type.get} or {@link Type#set IQ.Type.set} * IQ. The new stanza(/packet) will be initialized with:<ul> * <li>The sender set to the recipient of the originating IQ. * <li>The recipient set to the sender of the originating IQ. * <li>The type set to {@link Type#error IQ.Type.error}. * <li>The id set to the id of the originating IQ. * <li>The child element contained in the associated originating IQ. * <li>The provided {@link XMPPError XMPPError}. * </ul> * * @param request the {@link Type#get IQ.Type.get} or {@link Type#set IQ.Type.set} IQ packet. * @param error the error to associate with the created IQ packet. * @throws IllegalArgumentException if the IQ stanza(/packet) does not have a type of * {@link Type#get IQ.Type.get} or {@link Type#set IQ.Type.set}. * @return a new {@link Type#error IQ.Type.error} IQ based on the originating IQ. */ public static ErrorIQ createErrorResponse(final IQ request, final XMPPError error) { if (!(request.getType() == Type.get || request.getType() == Type.set)) { throw new IllegalArgumentException( "IQ must be of type 'set' or 'get'. Original IQ: " + request.toXML()); } final ErrorIQ result = new ErrorIQ(error); result.setStanzaId(request.getStanzaId()); result.setFrom(request.getTo()); result.setTo(request.getFrom()); return result; } /** * A enum to represent the type of the IQ stanza. */ public enum Type { /** * The IQ stanza requests information, inquires about what data is needed in order to complete further operations, etc. */ get, /** * The IQ stanza provides data that is needed for an operation to be completed, sets new values, replaces existing values, etc. */ set, /** * The IQ stanza is a response to a successful get or set request. */ result, /** * The IQ stanza reports an error that has occurred regarding processing or delivery of a get or set request. */ error, ; /** * Converts a String into the corresponding types. Valid String values * that can be converted to types are: "get", "set", "result", and "error". * * @param string the String value to covert. * @return the corresponding Type. * @throws IllegalArgumentException when not able to parse the string parameter * @throws NullPointerException if the string is null */ public static Type fromString(String string) { return Type.valueOf(string.toLowerCase(Locale.US)); } } public static class IQChildElementXmlStringBuilder extends XmlStringBuilder { private final String element; private boolean isEmptyElement; private IQChildElementXmlStringBuilder(IQ iq) { this(iq.getChildElementName(), iq.getChildElementNamespace()); } public IQChildElementXmlStringBuilder(ExtensionElement pe) { this(pe.getElementName(), pe.getNamespace()); } private IQChildElementXmlStringBuilder(String element, String namespace) { prelude(element, namespace); this.element = element; } public void setEmptyElement() { isEmptyElement = true; } } }
"use strict"; document.getElementById("timeChange").addEventListener("click", timeChangeReveal); document.getElementById("submitButton").addEventListener("click", timeChange); window.onload = function getTime() : void { fetch('/scheduler').then(response => response.json()).then((response) =>{ console.log(response); document.getElementById("timeDisplay").innerHTML = response; }) } // function timeChangeReveal(){ // document.getElementById("selectTime").style.display = "block"; // document.getElementById("currentTime").style.direction = "none"; // } // function timeChange(){ // console.log("Button clicked"); // var timeContainer = document.getElementById("appt") as HTMLInputElement; // var time = timeContainer.value; // var url = "/scheduler?time=" + time; // console.log(url); // fetch(url).then(response => response.text()).then((response) =>{ // console.log(response); // document.getElementById("timeDisplay").innerHTML = response; // document.getElementById("selectTime").style.display = "none"; // document.getElementById("currentTime").style.direction = "block"; // }); // }
use aoc_runner_derive::{aoc, aoc_generator}; use crate::shared::*; // ====================================================== // DAY 9 // ====================================================== #[aoc_generator(day9)] pub fn input_generator_day9(input: &str) -> Vec<i64> { input .split(',') .map(|x| x.trim().parse().unwrap()) .collect() } #[aoc(day9, part1)] pub fn solve_day9_part1(input: &[i64]) -> String { let mut program = Program::new(input, &[1]); program.run(); format!("{:?}", program.outputs) } #[aoc(day9, part2)] pub fn solve_day9_part2(input: &[i64]) -> String { let mut program = Program::new(input, &[2]); program.run(); format!("{:?}", program.outputs) }
// FormatVolume formats a volume identified by `volumeId` func (proxy *CSIProxyV1) FormatVolume(volumeId string) (err error) { _, err = proxy.VolumeClient.FormatVolume( context.Background(), &volumeapi.FormatVolumeRequest{ VolumeId: volumeId, }, ) if err != nil { return err } return nil }
For me, malfunctioning escalators are the stuff of nightmares. I still secretly fear that up escalators are going to eat me. So this people-flinging Metro station escalator from the day of Jon Stewart and Stephen Colbert’s “Rally to Restore Sanity and/or Fear” last year has given me a whole new category of escalator malfunctions to fear. Escalators have a braking system that’s supposed to kick in when the steps are moving too fast. In this case, the braking system failed, and people rushed to get out of the way as the escalator moved faster and faster. When one passenger fell, a pileup occurred. The local Fox affiliate spoke to a man who was on the escalator at the time. Initially the reaction was, oh good we’re moving faster. Then in about a split second I realized that’s not good. This escalator’s out of control and we were hurtling towards the bottom. So gut instinct, I decided I didn’t want to wind up at the bottom of the pile so I jumped over the railing and grabbed my son and he slid down behind me. We slid down the balustrade that runs between the two escalators. Someone has already set this to “Yakety Sax,” so don’t bother. Online, this seems hilarious, but six people were injured–four of them badly enough that they needed an ambulance ride to the hospital. Metro’s statement on the incident: Staff reported to the Board in November that the cause of this incident was a failure of the braking system to stop when the over-speed safety switch activated. On November 4, Metro began a program with our escalators that included:∙ Inspection of the braking systems of the 588 escalators in service (which was completed);∙ Modification of our procedures to increase the frequency of our brake tests to monthly from quarterly;∙ Implemented additional steps to improve quality control and compliance with escalator maintenance standards. Of course, longtime D.C. residents are probably shaking their heads and saying, “this is what happens when tourists don’t stand on the right.” Video: L’Enfant Plaza Escalator Malfunction in DC [My FOX DC] (via Gizmodo)
/* * Copyright (c) 2016, Alliance for Open Media. All rights reserved * * This source code is subject to the terms of the BSD 2 Clause License and * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License * was not distributed with this source code in the LICENSE file, you can * obtain it at www.aomedia.org/license/software. If the Alliance for Open * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ #ifndef AOM_AV1_COMMON_WARPED_MOTION_H_ #define AOM_AV1_COMMON_WARPED_MOTION_H_ #include <stdio.h> #include <stdlib.h> #include <memory.h> #include <math.h> #include <assert.h> #include "config/aom_config.h" #include "aom_ports/mem.h" #include "aom_dsp/aom_dsp_common.h" #include "av1/common/mv.h" #include "av1/common/convolve.h" #define MAX_PARAMDIM 9 #define LEAST_SQUARES_SAMPLES_MAX_BITS 3 #define LEAST_SQUARES_SAMPLES_MAX (1 << LEAST_SQUARES_SAMPLES_MAX_BITS) #define SAMPLES_ARRAY_SIZE (LEAST_SQUARES_SAMPLES_MAX * 2) #define WARPED_MOTION_DEBUG 0 #define DEFAULT_WMTYPE AFFINE extern const int16_t warped_filter[WARPEDPIXEL_PREC_SHIFTS * 3 + 1][8]; static const uint8_t warp_pad_left[14][16] = { { 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 3, 3, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 5, 5, 5, 5, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 6, 6, 6, 6, 6, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 7, 7, 7, 7, 7, 7, 7, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 10, 11, 12, 13, 14, 15 }, { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 11, 12, 13, 14, 15 }, { 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 12, 13, 14, 15 }, { 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 13, 14, 15 }, { 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 14, 15 }, { 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 15 }, { 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15 }, }; static const uint8_t warp_pad_right[14][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 14 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 13, 13 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 12, 12 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 11, 11, 11 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 10, 10 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 8, 8, 8, 8, 8 }, { 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7 }, { 0, 1, 2, 3, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, { 0, 1, 2, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }, { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, { 0, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, { 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } }; // Returns the error between the result of applying motion 'wm' to the frame // described by 'ref' and the frame described by 'dst'. int64_t av1_warp_error(WarpedMotionParams *wm, int use_hbd, int bd, const uint8_t *ref, int width, int height, int stride, uint8_t *dst, int p_col, int p_row, int p_width, int p_height, int p_stride, int subsampling_x, int subsampling_y, int64_t best_error); // Returns the error between the frame described by 'ref' and the frame // described by 'dst'. int64_t av1_frame_error(int use_hbd, int bd, const uint8_t *ref, int stride, uint8_t *dst, int p_width, int p_height, int p_stride); void av1_warp_plane(WarpedMotionParams *wm, int use_hbd, int bd, const uint8_t *ref, int width, int height, int stride, uint8_t *pred, int p_col, int p_row, int p_width, int p_height, int p_stride, int subsampling_x, int subsampling_y, ConvolveParams *conv_params); int find_projection(int np, int *pts1, int *pts2, BLOCK_SIZE bsize, int mvy, int mvx, WarpedMotionParams *wm_params, int mi_row, int mi_col); int get_shear_params(WarpedMotionParams *wm); #endif // AOM_AV1_COMMON_WARPED_MOTION_H_
/** * Copyright &copy; 2012-2016 <a href="https://github.com/thinkgem/jeesite">JeeSite</a> All rights reserved. */ package com.thinkgem.jeesite.modules.affair.service; import com.thinkgem.jeesite.common.persistence.Page; import com.thinkgem.jeesite.common.service.CrudService; import com.thinkgem.jeesite.modules.affair.dao.AffairYearThreeOneDao; import com.thinkgem.jeesite.modules.affair.entity.AffairYearThreeOne; import com.thinkgem.jeesite.modules.sys.utils.UserUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import java.util.Date; import java.util.List; /** * &ldquo;三会一课&rdquo;录入Service * @author eav.liu * @version 2019-11-08 */ @Service @Transactional(readOnly = true) public class AffairYearThreeOneService extends CrudService<AffairYearThreeOneDao, AffairYearThreeOne> { @Autowired private AffairYearThreeOneDao affairYearThreeOneDao; public AffairYearThreeOne get(String id) { return super.get(id); } public List<AffairYearThreeOne> findList(AffairYearThreeOne affairYearThreeOne) { return super.findList(affairYearThreeOne); } public Page<AffairYearThreeOne> findPage(Page<AffairYearThreeOne> page, AffairYearThreeOne affairYearThreeOne) { affairYearThreeOne.getSqlMap().put("dsf", dataScopeFilter(UserUtils.getUser(), "o", "u")); return super.findPage(page, affairYearThreeOne); } @Transactional(readOnly = false) public void save(AffairYearThreeOne affairYearThreeOne) { affairYearThreeOne.setStatus("3");//未审核 super.save(affairYearThreeOne); } @Transactional(readOnly = false) public void delete(AffairYearThreeOne affairYearThreeOne) { super.delete(affairYearThreeOne); } @Transactional(readOnly = false) public void shenHeSave(AffairYearThreeOne affairYearThreeOne) { affairYearThreeOne.setUpdateDate(new Date()); affairYearThreeOne.setShPerson(UserUtils.getUser().getName()); affairYearThreeOneDao.shenHeSave(affairYearThreeOne); } public List<String> selectAllYear(){ return affairYearThreeOneDao.selectAllYear(); } public List<String> selectAllUnitId(){ return affairYearThreeOneDao.selectAllUnitId(); } public Integer selectTime(String id,String year,String type){ return affairYearThreeOneDao.selectTime(id,year,type); } public String selectName(String unitId){ return affairYearThreeOneDao.selectName(unitId); } public Integer selectNum(String year,String time,String id){ return affairYearThreeOneDao.selectNum(year,time,id); } public Integer selectNumYear(String year,String id){ return affairYearThreeOneDao.selectNumYear(year,id); } public List<AffairYearThreeOne> selectdzzzyThreeOne(Date startDate, Date endDate, String partyOrganization,String type) { return affairYearThreeOneDao.selectdzzzyThreeOne(startDate,endDate,partyOrganization,type); } public List<AffairYearThreeOne> selectdzbThreeOne(String startDate, String endDate, String partyOrganization) { return affairYearThreeOneDao.selectdzbThreeOne(startDate,endDate,partyOrganization); } public Integer selectHuiyiNumber(String unitId, String yearL, String yearT, String type) { return affairYearThreeOneDao.selectHuiyiNumber(unitId,yearL,yearT,type); } }
The Saskatchewan government is investigating after two homeless men who were denied funding to stay at a shelter in that province were instead given one-way bus tickets to British Columbia. Charles Neil-Curly, 23, and Jeremy Roy, 21, arrived in Vancouver on Wednesday, one day after boarding a Greyhound bus in Saskatchewan – voluntarily – on the province's dime. The men were both homeless in North Battleford, a small city located about 140 kilometres northwest of Saskatoon. They had stayed at a homeless shelter on and off for some time, but the shelter's manager said the men were recently denied government funding to stay. Instead, they told the shelter manager that the social-services ministry offered them one-way bus tickets to British Columbia. Story continues below advertisement Donna Harpauer, Saskatchewan's Minister of Social Services, who is currently in the middle of an election campaign, promised to look into what happened. Mr. Neil-Curly said relocating to Vancouver means he won't "have to sleep in a snow bank," but that, had he been able to secure funding for housing, he probably would have preferred staying in Saskatchewan. Asked what his plans are, Mr. Neil-Curly replied: "Try to get a job and a place for me and Jeremy and have a life, I guess." Mr. Roy has epilepsy and other health issues. Once a Saskatchewan newspaper reported on the men's journey, their story made it to British Columbia before they did. They were met at the Greyhound bus terminal not far from downtown Vancouver by a crowd of reporters, a local city councillor and a representative from a local shelter. Jeremy Hunka of the Union Gospel Mission said the men would receive hot meals and shelter immediately, and connections to support networks later on. Mr. Neil-Curly said the two hope to go to Vancouver Island, where his best friend lives. "We were surprised and we were concerned when we heard they were being put on a bus," Mr. Hunka said. "We knew we needed to step up, because coming to Vancouver without a plan, without a place to stay, and joining the other people who are struggling in the streets is a bad situation." Caitlin Glencross, manager of the Lighthouse homeless shelter in North Battleford, spoke with both men before they left Saskatchewan. She said Mr. Neil-Curly was "very frustrated" that he could not get the help he needed in his home province. Mr. Roy has significant mental-health issues and did not seem to understand the gravity of the situation, Ms. Glencross said. Story continues below advertisement Story continues below advertisement "I just find it shocking," she said. "I have never heard of something like this taking place. We are really concerned about these two individuals. If something were to happen to either one of them – that's a question that's in our minds. Who's responsible then?" Details of the exchanges between the men and Saskatchewan social services have not been independently confirmed. Ms. Harpauer, the social services minister, could not be reached for an interview but said in a statement that she has discussed the matter with Social Services deputy minister Greg Miller. "I reaffirmed to the deputy minister that regulations require a case plan be established by workers and clients before transportation be provided," she said. "The deputy minister is also reviewing if case plans were in place for these individuals and he will be reminding front-line workers that clients should have a plan in place before they are given bus tickets for destinations away." Vancouver city councillor Kerry Jang, who also greeted the two at the Vancouver bus station, called the news "inhumane" and underscored the need for a national program that puts supports in home communities to forestall homelessness in the first place. "How can anybody treat anybody that way? It's unbelievable," he said. "You can't ship people across the country. You must have a housing-first program, not a Greyhound-first program." Mr. Jang referenced former Alberta premier Ralph Klein, who infamously offered welfare recipients free one-way bus tickets out of that province. There was a perception at the time that many of those taking advantage of those bus tickets were ending up in British Columbia. Story continues below advertisement "But this is the first time in my entire career I've seen somebody with mental-health issues, with no support, thrown on a bus and told, 'You're on your own.' That is the height of disrespect, of inhumanity," he said. B.C. Premier Christy Clark said the province is not going to turn away people who may need help. "I don't know much about them, but I hope that wherever they are, they are able to get the care that they need and if they decide to come to B.C., we are going to support them in that. We should," Ms. Clark told reporters in Victoria. The Premier did concede that British Columbia needs to do more in providing mental-health services. B.C. Housing Minister Rich Coleman echoed Ms. Clark's support, adding that it's not unusual for authorities to offer transportation to someone with addictions or mental-health issues to another jurisdiction if they have family support there. "We've done that both ways in Canada for a long time, to allow people to go back and connect with family," he said. "If that's the case, I have no problem with it." Because of the Saskatchewan election campaign, Mr. Coleman said he won't try to reach his counterpart in that province for more details. However, he added that one of the men does have family in British Columbia. Story continues below advertisement "So if they come [here] and they get better results, well, that's how it works," he said.
Best option for car shoppers: Postpone buying Inventory shortages caused by the effects of Japan's earthquake have led to rising prices for new and used vehicles, creating what one analyst describes as a 'huge seller's market.' "If you don't have an immediate need, you are probably better to wait and figure out where the market is headed," said Jesse Toprak, an analyst with auto information company TrueCar.com. That's prompted many experts to voice something rarely said in the sales-happy auto industry: With consumers facing the toughest market in recent memory, if you can, put off purchases until things sort out, probably early next year. With the effects of the earthquake in Japan rippling through the industry and causing shortages, prices are rising for both new and used cars, and fewer models and options will be available come summer, especially for the hybrids and fuel-efficient vehicles that Japan produces. Attention all car buyers: The era of cut-rate financing, generous cash-back offers and big discounts is coming to an end. Toprak concedes his reluctance to give such advice. TrueCar collects referral fees for new-car sales to visitors to its website. Yet he's not a lone voice. "If people were paying attention they would have bought in March and April. Now, if they have the latitude, it is probably best to wait," said Jeremy Anwyl, chief executive of Edmunds.com, an auto information company that makes money from Internet-based auto industry advertising. (OK, so the transmission on your clunker went out and you don't want to shell out big bucks to fix it. If you have to purchase a car, see the accompanying story for buying tips.) || Related: If you must buy a car now, here's what to do Tom Libby, an automotive industry analyst at R.L. Polk & Co., intended to replace his aging Ford Explorer this spring but said he's abandoned the plan for now, even though his SUV has 125,000 miles on the odometer and he needs it to commute every day. The disruptions caused by the March 11 earthquake have triggered "the huge seller's market we are seeing now," Libby said. "The customer has lost all leverage, and that is going to last at least into the fall." With Toyota Motor Corp.'s U.S. factories operating at just 30% capacity this month because of Japan-related parts shortages and Honda Motor Co. warning that its supply of vehicles is diminished, some of the bestselling cars, including Toyota's Camry and Corolla and Honda's Civic and CR-V SUV, will become scarce. Toyota and Honda — including their respective Lexus and Acura brands — account for about a quarter of all U.S. auto sales and an even larger share of the retail market. Analysts at IHS Automotive estimate the industry has fallen millions of vehicles behind its expected global production. Although automakers will work hard to catch up during the second half of this year, ultimately about 700,000 vehicles will never be built because of the quake. The shortfall has allowed Toyota and competitors such as General Motors Co. and Ford Motor Co. to raise sticker prices. At the same time, just about every manufacturer has cut back on deals, either killing or reducing cash-back offers and raising auto loan interest rates. The reduction in incentives will hurt shoppers the most, Anwyl said. "Dealers charging a few hundred dollars more is not as bad as manufacturers cutting incentives by $1,000 or $1,500. That's just huge," he said.
/* * css_vector_send() - Winsock simulation of css_vector_send. * return: size of sent if success, or error code * fd(in): socket descripter * vec(in): vector buffer * len(in): vector length * bytes_written(in): * timeout(in): timeout value in milli-seconds * * Note: Does not support the "byte_written" argument for retries, we'll * internally keep retrying the operation until all the data is written. * That's what all the callers do anyway. */ int css_vector_send (SOCKET fd, struct iovec *vec[], int *len, int bytes_written, int timeout) { int i, total_size, available, amount, rc; char *src, *dest; int handle_os_error; #if defined(SERVER_MODE) int vb_index; #endif handle_os_error = 1; if (bytes_written) { rc = -1; handle_os_error = 0; goto error; } total_size = 0; for (i = 0; i < *len; i++) { total_size += (*vec)[i].iov_len; } #if defined(SERVER_MODE) vb_index = alloc_vector_buffer (); dest = css_Vector_buffer_piece[vb_index]; #else dest = css_Vector_buffer; #endif available = CSS_VECTOR_SIZE; for (i = 0; i < *len; i++) { src = (*vec)[i].iov_base; amount = (*vec)[i].iov_len; while (amount > available) { memcpy (dest, src, available); #if defined(SERVER_MODE) rc = css_writen (fd, css_Vector_buffer_piece[vb_index], CSS_VECTOR_SIZE); #else rc = css_writen (fd, css_Vector_buffer, CSS_VECTOR_SIZE); #endif if (rc != CSS_VECTOR_SIZE) { goto error; } src += available; amount -= available; #if defined(SERVER_MODE) dest = css_Vector_buffer_piece[vb_index]; #else dest = css_Vector_buffer; #endif available = CSS_VECTOR_SIZE; } if (amount) { memcpy (dest, src, amount); dest += amount; available -= amount; } } if (available < CSS_VECTOR_SIZE) { amount = CSS_VECTOR_SIZE - available; #if defined(SERVER_MODE) rc = css_writen (fd, css_Vector_buffer_piece[vb_index], amount); #else rc = css_writen (fd, css_Vector_buffer, amount); #endif if (rc != amount) { goto error; } } #if defined(SERVER_MODE) free_vector_buffer (vb_index); #endif return total_size; error: #if defined(SERVER_MODE) free_vector_buffer (vb_index); #endif return rc; }
/** * Append check constraint details to the XML node * * @param tableNode * @param table */ private void appendCheckConstraints(Element tableNode, Table table) { Document document = tableNode.getOwnerDocument(); Map<String, String> constraints = table.getCheckConstraints(); if (constraints != null && !constraints.isEmpty()) { for (String name : constraints.keySet()) { Node constraintNode = document.createElement("checkConstraint"); tableNode.appendChild(constraintNode); DOMUtil.appendAttribute(constraintNode, "name", name); DOMUtil.appendAttribute(constraintNode, "constraint", constraints.get(name).toString()); } } }
/** * Returns bindings for a list of target variables of a * control step, using the source variables * combined with the output parameters of the call. */ static Assignment modify(Switch swit) { assert swit.getKind() == Callable.Kind.RULE; List<Binding> result = new ArrayList<>(); List<CtrlVar> sourceVars = swit.getSource() .getVars(); Map<CtrlVar,Integer> outVars = swit.getCall() .getOutVars(); for (CtrlVar var : swit.onFinish() .getVars()) { Integer ix = outVars.get(var); Binding rhs; if (ix == null) { int pos = sourceVars.indexOf(var); assert pos >= 0; rhs = Binding.var(pos); } else { Rule rule = (Rule) swit.getUnit(); rhs = rule.getParBinding(ix); } result.add(rhs); } return call(result); }
def _on_verification_mode_change(self, change): with self._verification_mode_output: clear_output() if self._verification_mode.value == "private_key": display(self._inp_private_key) elif self._verification_mode.value == "public_key": public_key = Path.home() / ".ssh" / "id_rsa.pub" if public_key.exists(): display( ipw.HTML( f"""<pre style="background-color: #253239; color: #cdd3df; line-height: normal; custom=test">{public_key.read_text()}</pre>""", layout={"width": "100%"}, ) )
'use strict' import * as React from 'react' import { SignupDialog } from './SignupDialog' import { LoginDialog } from './LoginDialog' import { SubscribeDialog } from './SubscribeDialog' export default class AuthUI extends React.Component<{}, {}> { render() { return ( <div> <SignupDialog /> <LoginDialog /> <SubscribeDialog /> </div> ); } }
The daft theist claim I have for you this week comes to you from a letter in Frederickburg.com … it reads … Atheists: Open your minds, ask questions I am writing in response to the letter from Dan Dormer [“What is an atheist?” May 15]. He said he doesn’t believe in God because he has no proof that He is real. That is where faith comes into the picture. Have you ever seen the air that you breathe? No, but you know that it is there, right? That is the way it is with God; we have not seen Him but we know He is there. I would challenge atheists to take some time and speak to a preacher of their choice about their questions and beliefs, and really open their minds to what they say. If you have not been brought up in church, in a way it’s like you are a child, unknowing of God’s love and the peace and love Christians feel in their hearts knowing He is there and that there is eternal life after this world. What do you have to lose in meeting with someone? And it just may change the way you feel. (*Sigh*) If believers ever paused to wonder why non-believers have a reputation for being a bit rude sometimes, then they might like to consider the thought that when faced with utterly daft stuff like this, the initial reply is to simply say two words … the second one being “off” – I shall leave it up to you to speculate about what the first one might be. If you just happen to be reading the above and thinking, “Well gosh, she is making a jolly good point”, then this next statement of the bleeding obvious is just for you. We know about the reality of air because we have solid empirical evidence regarding it. Not only can we measure it, but we also have a precise understanding regarding its composition, not by “faith”, but because it is possible to made such observations. In stark contrast, there is not one single jot of empirical evidence for a god or gods. Now, before you reply with, “Ah well OK, not air that was a bad choice, but how about radiation / love / etc...”. Nope, all of those are known about because we can measure them, and yes “love” is an emotion, but that is all about chemicals in your brain, if we wire you up to an ECG (electrocardiogram) then we can measure that as well. As for the “speak to a preacher of their choice” thought … seriously!, the implied suggestion here is that non-believers have given the god claim no thought, that is not credible, what fracking planet does the letter writer live on. One can also wonder if that preacher of choice could perhaps be a Muslim or Buddhist cleric, would they still think that was a good option? I suspect not, they would only want you to sup their specific variation of belief, other flavours would be deemed poison. So yes, “Faith”, that special word used to describe believing in stuff that has no evidence for it at all … belief might indeed consider that to be a virtue, but I have a better way to describe such an approach, and that is “insanely stupid”. As for knowing about god’s love, would that just happen to be the very same “love” that says woman are inferior to men and also expresses open hostility of gay people … er, no thanks, I prefer to keep my ethics intact. So what else can I say except … Christians: Open your minds, ask questions You believe in God but you have no proof that He is real, not one jot, so why are you believing stuff that is not real. Have you ever seen God? No, so you have no way of knowing that he is there. I would challenge Christians to take some time and speak to any non-believer of their choice and really open their minds to what they say. If you have been brought up in church, in a way it’s like you are a child, knowing the indoctrination, riding the emotional wave and experiencing a weekly dose of psychological manipulation will lumber you with a distorted view of reality. What do you have to lose in meeting with someone? And it just may change the way you feel. Will they do it? … I suspect not, but I would indeed love to be proven wrong. Share this: Facebook Twitter Reddit Tumblr Pinterest LinkedIn Pocket Skype WhatsApp Email Print Like this: Like Loading...
<gh_stars>10-100 /* * Copyright (c) 2004,2011-2012,2014 Apple Inc. All Rights Reserved. * * @APPLE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_LICENSE_HEADER_END@ */ /*! @header SecCmsEncoder.h @Copyright (c) 2004,2011-2012,2014 Apple Inc. All Rights Reserved. @availability 10.4 and later @abstract CMS message encoding @discussion The functions here implement functions for encoding Cryptographic Message Syntax (CMS) objects as described in rfc3369. A SecCmsEncoder object is used to encode CMS messages into BER. */ #ifndef _SECURITY_SECCMSENCODER_H_ #define _SECURITY_SECCMSENCODER_H_ 1 #include <Security/SecCmsBase.h> #if defined(__cplusplus) extern "C" { #endif /*! @functiongroup Streaming interface */ /*! @function @abstract Set up encoding of a CMS message. @param outputfn callback function for delivery of BER-encoded output will not be called if NULL. @param outputarg first argument passed to outputfn when it is called. @param dest If non-NULL, pointer to a CSSM_DATA that will hold the BER-encoded output. @param destpoolp Pool to allocate BER-encoded output in. @param pwfn callback function for getting token password for enveloped data content with a password recipient. @param pwfn_arg first argument passed to pwfn when it is called. @param encrypt_key_cb callback function for getting bulk key for encryptedData content. @param encrypt_key_cb_arg first argument passed to encrypt_key_cb when it is called. @param detached_digestalgs digest algorithms in detached_digests @param detached_digests digests from detached content (one for every element in detached_digestalgs). @result On success a pointer to a SecCmsMessage containing the decoded message is returned. On failure returns NULL. Call PR_GetError() to find out what went wrong in this case. @availability 10.4 and later */ extern OSStatus SecCmsEncoderCreate(SecCmsMessageRef cmsg, SecCmsContentCallback outputfn, void *outputarg, CSSM_DATA_PTR dest, SecArenaPoolRef destpoolp, PK11PasswordFunc pwfn, void *pwfn_arg, SecCmsGetDecryptKeyCallback encrypt_key_cb, void *encrypt_key_cb_arg, SECAlgorithmID **detached_digestalgs, CSSM_DATA_PTR *detached_digests, SecCmsEncoderRef *outEncoder); /*! @function @abstract Take content data delivery from the user @param encoder encoder context @param data content data @param len length of content data @result On success 0 is returned. On failure returns non zero. Call PR_GetError() to find out what went wrong in this case. @availability 10.4 and later */ extern OSStatus SecCmsEncoderUpdate(SecCmsEncoderRef encoder, const void *data, CFIndex len); /*! @function @abstract Abort a (presumably failed) encoding process. @param encoder Pointer to a SecCmsEncoderContext created with SecCmsEncoderCreate(). @availability 10.4 and later */ extern void SecCmsEncoderDestroy(SecCmsEncoderRef encoder); /*! @function @abstract Signal the end of data. @discussion Walks down the chain of encoders and the finishes them from the innermost out. @param encoder Pointer to a SecCmsEncoder created with SecCmsEncoderCreate(). @result On success 0 is returned. On failure returns non zero. Call PR_GetError() to find out what went wrong in this case. @availability 10.4 and later */ extern OSStatus SecCmsEncoderFinish(SecCmsEncoderRef encoder); /*! @functiongroup One shot interface */ /*! @function @abstract BER Encode a CMS message. @discussion BER Encode a CMS message, with input being the plaintext message and outBer being the output, stored in arena's pool. */ extern OSStatus SecCmsMessageEncode(SecCmsMessageRef cmsg, const CSSM_DATA *input, SecArenaPoolRef arena, CSSM_DATA_PTR outBer); #if defined(__cplusplus) } #endif #endif /* _SECURITY_SECCMSENCODER_H_ */
The Effects of Probiotic Soymilk Fortified with Omega-3 on Blood Glucose, Lipid Profile, Haematological and Oxidative Stress, and Inflammatory Parameters in Streptozotocin Nicotinamide-Induced Diabetic Rats Objective. The aim of the present study was to evaluate the effects of probiotic soymilk fortified with omega-3 in diabetic rats. Methods. Soymilk (SM), fermented soymilk (FSM), and fermented soymilk fortified with omega-3 (FSM + omega-3) were prepared. Rats were randomly assigned to five groups of 13 animals per group. Diabetes was induced by a single injection of streptozotocin (STZ) 15 min after the intraperitoneal administration of nicotinamide (NA). Normal control (NC) and diabetic control (DC) rats received 1 mL/day of distilled water and three groups of diabetic rats were given 1 mL/day of SM, FSM, and FSM + omega-3 products by oral gavage for 28 days. Results. Three products significantly (P < 0.05) reduced blood glucose, total cholesterol (TC), triglyceride (TG), and malondialdehyde (MDA) concentrations compared to the DC group, with the maximum reduction seen in the FSM + omega-3 group. Body weight, red blood cells (RBC), haemoglobin (Hb), haematocrit, and superoxide dismutase (SOD) also significantly increased in the FSM + omega-3 group. In the FSM + omega-3 group, MDA level compared with the SM and FSM groups and high sensitivity C-reactive protein (hs-CRP) concentrations compared with the DC and FSM groups were significantly lower (P < 0.05). Conclusion. Fermented soymilk fortified with omega-3 may be beneficial in diabetes. Introduction Diabetes mellitus is a largely occurring endocrine disorder in many countries . In diabetes, due to defects in the production of insulin or its action, blood glucose levels become elevated. Also impaired is the functioning of the macronutrient metabolism, leading to long-term health complications . In addition, free radicals generated during long-term hyperglycaemia impair the body's antioxidant defence system . Diabetes treatment is based on pharmacological hypoglycaemic agents and insulin; however, the efficacy of these therapies is limited due to their many side effects. Therefore, finding natural compounds is essential for overcoming these problems . According to epidemiological studies, trends toward the use of soy products are growing, since soy consumption is associated with a decrease in certain diseases, including diabetes and atherosclerosis . Soymilk contains highquality proteins, dietary fibre, small quantities of saturated fatty acids, and no cholesterol and lactose, which make it suitable for people who are lactose intolerant. The antidiabetic and antiobesity effects of soymilk have also been highlighted . However, soy consumption remains limited due to factors such as its taste and oligosaccharides such as raffinose and stachyose, which often leads to bloating and stomach discomfort . Fermentation is a suitable method for improving the properties of isoflavones and peptides in soybean and this process increases the efficacy of these components in the treatment or prevention of type 2 diabetes . In fact, isoflavone glycosides are changed into isoflavone aglycones following fermentation, which seem to have greater physiological effects and are better absorbed . Probiotic food containing bifidobacteria results in the decreased total cholesterol and LDL-C and increased HDL-C . Therefore, fermentation of food products by these probiotic bacteria was increased . In addition, sugars in soymilk are suitable nutrients for bifidobacteria growth . The beneficial effect of fermentation is not only that it increases the bioavailability of isoflavones, but also that it aids in the digestion of protein, the solubility of calcium, and the enhancement of intestinal health and the immune system . The beneficial effects of soymilk fermented with Bifidobacterium on lipid profiles have been indicated in previous studies . Essential fatty acid levels in various tissues have been reduced in diabetes through the use of soymilk. This may be due to a reduction in the conversion of linolenic acid to EPA and DHA. Supplementation with omega-3 fatty acids in patients with diabetes has attracted significant attention and may be effective at reducing some of the complications associated with diabetes. However, due to their double bonds, these fatty acids are susceptible to oxidation and may thus increase oxidative stress . Human and animal studies have shown conflicting results regarding the effect of supplementation with fish oil omega-3 fatty acids in terms of oxidant/antioxidant status . Increased consumption of omega-3 fatty acids due to the adverse effects of inadequate intake is recommended. The fortification of food products, without extensive changes to eating habits, is a suitable method for increasing omega-3 content in the diet. Furthermore, this fatty acid can be used for potentiating probiotic effects in the small intestine by changing fatty acid composition . The present study therefore aimed to determine whether combining soymilk, probiotics, and omega-3 had additional effects on blood glucose, lipid profiles, haematological and oxidative stress, and the inflammatory parameters within a diabetic type 2 animal model. Preparation of Products. Soymilk was prepared according to the method described by previous studies . Initially, after soaking soybeans in distilled water overnight, the water was discarded and the soaked soybeans were combined with distilled water 10 times their weight, and the mixture was mixed in a blender for three minutes. Then, the mixture was passed through a filter to produce soymilk. The soymilk was then divided into three equal parts. All samples were sterilized at 121 ∘ C for 15 min and cooled to 37 ∘ C. One part was used as the control sample and 0.1 g/L of Bifidobacterium lactis (Christin Hansen, Denmark) was added to the other two samples. Omega-3 (SERVA, Feinbiochemica, Heidelberg, New York, USA) (1 g/L) was also added to one sample. Samples were inoculated at 37 ∘ C until their pH reached 4.7. The samples were then stored in a refrigerator. The fermented soymilk samples were prepared once a week under hygienic conditions. Experimental Animals. At the beginning of the experiment, 65 male Sprague-Dawley rats (weighing 200-300 g each) were purchased from the Laboratory Animals Research Center (Shiraz University of Medical Sciences, Iran). The animals were acclimatized to the laboratory for two weeks prior to starting the experiments and were fed a chow diet (Pars Dam Co., Tehran, Iran) and regular drinking water ad libitum during the study; rats were kept in stainless steel cages in groups of three animals per cage in a temperaturecontrolled (22-25 ∘ C) environment; lighting (12 hr light/dark cycles) and humidity (%50 ± 5) conditions were also controlled. Animal procedures in our study were carried out according to ethics stated in the Guide for the Care and Use of Laboratory Animals . Induction of Diabetes. In the present study, type 2 diabetes was induced intraperitoneally (IP) in the overnightfasted male Sprague-Dawley rats through the injection of freshly prepared streptozotocin (STZ) (65 mg/kg body weight; Sigma, USA), dissolved in a 0.1 mol/L citrate buffer (pH 4.5), 15 min following the IP administration of nicotinamide (NA) (110 mg/kg body weight; Merck, Germany) dissolved in normal saline . A glucometer (Accu-Chek Active, Roche, Germany) was used for the estimation of blood glucose levels. The stable blood glucose concentration seven days after STZ-NA injection was used for the confirmation of diabetes. Blood glucose levels above 150 mg dL were considered as criteria for diabetes. Experimental Design. Diabetic rats were divided randomly into four groups of 13 rats per group. One group was also considered as normal control rats. The treatment period for the study was 28 days. Products were administered to rats by oral gavage at a level of 1 mL/day. Group I included normal control (NC) rats given 1 mL of distilled water; Group II included diabetic control (DC) rats given 1 mL of distilled water; Group III included diabetic rats given 1 mL/day of soymilk (SM); Group IV included diabetic rats given 1 mL/day of fermented soymilk (FSM); Group V included diabetic rats given 1 mL/day of fermented soymilk fortified with omega-3 (FSM + omega-3). Determination of Biochemical Parameters. Rats were monitored weekly regarding body weight and blood glucose. On day 29, the rats were fasted for 12 hours and under anaesthesia (50 mg/kg ketamine plus 5 mg/kg diazepam administered intraperitoneally), approximately 5 mL of blood was collected by cardiac puncture, 1 mL collected into a tube containing EDTA for measuring haematological parameters, and the remaining centrifuged at 3500 rpm for 10 min for the separation of serum. Each serum sample was stored in clean sterile microcentrifuge tubes at −80 ∘ C until analysis. Analytical Measurements. The serum levels of total cholesterol (TC), triglyceride (TG), HDL cholesterol (HDL-C), and LDL cholesterol (LDL-C) were assayed for each rat with the aid of specific enzyme kits (Pars Azmoon Co., Tehran, Iran), which were used according to the manufacturer's instructions. A cell counter was used to measure haematological parameters. The malondialdehyde (MDA) level of serum was determined using the thiobarbituric acid (TBARS) colorimetric analysis method. Optical density was determined at 532 nm . A commercial kit (Ransod, Randox Laboratories Ltd.) was used for the measuring of superoxide dismutase (SOD) activities in erythrocyte according to manufacturer's instructions. Serum high-sensitive C-reactive protein (hs-CRP) concentrations were measured using the ELISA kit according to the manufacturer's instructions. Statistical Analysis. The data were represented as mean ± standard deviation (SD). The statistical analysis was performed using SPSS (version 19.0). One-way repeatedmeasures analysis of variance (ANOVA) was used to compare the mean blood glucose and body weight between groups at different measurement times. For other parameters at the end of the experiment, a one-way analysis of variance (ANOVA) procedure was used, followed by post hoc Duncan's multiple range tests. Blood Glucose. Results of ANOVA with repeated measures showed significant changes in the blood glucose levels of diabetic rats from the first week until the end of the experiment; however, the trend of decreasing blood glucose was different among the five groups (Table 1). NC Rats (Group I) maintained a normal blood glucose level during the study. The IP administration of STZ-NA to rats significantly increased the level of blood sugar compared to NC rats. Blood glucose level was increased from 114.08 to 162.58 mg/dl seven days following STZ-NA administration. At the end of experiment, blood glucose in all treated diabetic rats had been significantly reduced (P < 0.05) compared to the DC rats. FSM + omega-3, FSM and SM exhibited the greatest reduction -47.2%, 39.3% and 35.8%, respectively (Table 1). Body Weight. Results of ANOVA with repeated measures showed significant changes in the body weight of diabetic rats from the first week until the end of the experiments ( Table 2). There was no significant difference in the initial body weight among the five groups ( > 0.05). Diabetic rats showed a significant decrease in body weight compared to the control group ( < 0.05). Oral administration of three different products (SM, FSM and FSM + omega-3 ) for 28 days improved body weight significantly ( < 0.05) with the maximum weight gain seen in the FSM + omega-3 group compared to other groups (Table 2). Serum Lipids. The effects of treatments on serum lipids are shown in Table 3. TC and TG concentrations of all treated diabetic rats were significantly decreased ( < 0.05) compared to the DC group, with maximum reduction seen in the FSM + omega-3 group (20.8% for TC and 39.3% for TG). The FSM + omega-3 product also had a tendency to produce lower TG concentrations than FSM and SM products (6.2% relative to SM and 11.2% relative to FSM). Although no differences in HDL-C and LDL-C concentrations among the four diabetic groups were observed, when compared with the DC group, the FSM product had a tendency to produce greater HDL-C concentrations compared to the SM and FSM + omega-3 groups (24.8%, 20.4%, and 14.8%, resp.). According to statistical comparisons between treated groups and the NC group, there was no significant difference between SM, FSM, and FSM + omega-3 groups in terms of lowering LDL-C level (Table 3). Haematological Parameters. The effects of treatments on haematological parameters are shown in Table 4. Haemoglobin (Hb), mean corpuscular haemoglobin (MCH), and mean corpuscular haemoglobin concentration (MCHC) in the diabetic control group were significantly decreased compared to the NC group ( < 0.05). Hb, red blood cells (RBC), and haematocrit of the SM, FSM, and FSM + omega-3 groups and mean corpuscular volume (MCV) and MCH for the SM and FSM + omega-3 were significantly increased ( < 0.05) compared with the DC group, with the greatest increase seen in the FSM + omega-3 group (Table 4). Discussion In our study, experimentally induced diabetes significantly increased blood glucose levels by 197% higher than that of the control level. Uptake and utilization of glucose, as well as glucose metabolism, are disturbed in diabetes mellitus . For evaluating the hypoglycaemic effects of different compounds, the STZ-NA model appears to be a better model than its STZ counterpart, because the former manifests only a mild hyperglycaemic state . The glucose level (≥150 mg/dL) recorded in rats underscored the real diabetic status of the rats in our study. The treatment of diabetic rats with SM, FSM, and FSM + omega-3 products in our study had after 28 days significantly reduced blood glucose levels by 35.8%, 39.3%, and 47.2%, respectively, compared to the DC group. It appeared that the hypoglycaemic effects of these products observed in our study were due to soy protein and isoflavones present in soymilk, as the hypoglycaemic effects of these components have been reported in previous studies : hs-CRP concentrations within the five groups. NC = normal control, DC = diabetic control, SM = diabetic rats that received soymilk, FSM = diabetic rats that received fermented soymilk, and FSM + omega-3 = diabetic rats that received fermented soymilk fortified with omega-3. Each value is expressed as mean ± SD. Values with different letters are significantly different at < 0.05 as analyzed by Duncan's multiple range test. . Isoflavonoids and protein in soybean are connected to reducing insulin resistance and improving glycaemic control , although these results differ from some other published studies . The antidiabetic actions of isoflavonoids may potentially be exerted via oestrogen receptors and for this reason this activity of soy isoflavones is beneficial for improving glucose metabolism . Diabetic rats that received FSM were found to have reduced plasma glucose levels, more so than SM. The effects of fermented soymilk on glucose status have previously been evaluated . In 2005, Kawakami et al. demonstrated that isoflavonoid glucoside that changed into aglycones following fermentation had better activity than isoflavonoid glycones and that the intake of isoflavones aglycones significantly increased serum isoflavone concentration, compared to isoflavone glycoside . Thus, in our study, the additional effect of fermented soymilk for controlling the glucose metabolism may have been due to an increase in isoflavonoid aglycones. We also observed that FSM + omega-3 decreased blood glucose, more so than SM and FSM. In 1989, Linn et al. showed that an increasing intake of omega-3 fatty acids can reduce hyperglycaemia, as well as the risk of diabetes in rats . It was observed that the intake of SM, FSM, and FSM + omega-3 products results in an increase in body weight when compared with the DC group. Soybean isoflavones improve metabolism in the presence of diabetes, resulting not only in suppressing weight loss, but also in a weight increase . In this study, TG and TC concentrations were significantly increased in the DC group compared to concentrations in the NC group. These findings were in agreement with other findings that have shown that a plasma increase in these parameters was frequently observed in diabetes mellitus states. In the present study, we observed that the TC and TG concentrations of the SM, FSM, and FSM + omega-3 groups had been significantly decreased compared to those of the DC group. Our findings regarding the hypocholesterolaemic effects of soymilk and Bifidobacterium soymilk are in agreement with Kikuchi-Hayakawa's results , which showed that fermented soymilk suppresses cholesterol synthesis in rats. Moreover, the significant effect of fermented soymilk with Bifidobacterium on TC in rats has also been demonstrated in other studies . It could be assessed that not only the protein but also isoflavones in soymilk reduce the concentrations of serum lipids and can be seen as having antiatherogenic effects . With regard to the lowering TC and TG concentrations, it can be argued that soymilk may act according to its components such as isoflavones and soy protein by (1) decreasing insulin to glucagon ratio and finally reducing the expression of lipogenic genes ; (2) activating PPAR-(peroxisome proliferator-activated receptor gamma), with upregulation of adipogenesis ; (3) and binding of isoflavone to oestrogen receptors . In addition, amino acids, minerals, and phytic acid, as well as other soy bioactive components in soymilk are effective in the decrease of TC and TG . With regard to the effects on HDL-C levels, feeding SM, FSM, and FSM + omega-3 products to the DC group resulted in a higher HDL-C level, but this difference was not significant. The FSM product also had a tendency to produce greater HDL-C than SM and FSM + omega-3 products. These findings support the ideas of Rossi et al. (2000), who suggested that soy fermented products cause a decrease in TC and an increase in HDL-C concentrations . These beneficial effects of FSM + omega-3 on suppressing an increase in plasma TG levels were greater than those of SM and FSM, which revealed 6.2% and 11.2% inhibition for TG, respectively. This observation was similar to previous findings that have shown that omega-3 fatty acids decrease plasma triglyceride. This additional effect found in our study was likely due to the presence of omega-3 fatty acids, since one of the most noticeable effects of this fatty acid is a reduction in plasma triglycerides , possibly by decreasing triglyceride synthesis in the liver . In the present study, haematological parameters Hb, MCH, and MCH in the DC group were significantly decreased compared to the NC group. Even though other parameters were also decreased, their differences were not significant. This observation agrees with Baskar et al. 's report (2006), which reported the effect of Rubia cordifolia in diabetic rats . As a result of infections that occurred during diabetes, haematological parameters were reduced . These parameters resulted in anaemic conditions when altered . In diabetes mellitus, increased glycosylation of RBC membrane proteins causes anaemia and it has been reported that lipid peroxides produced in this state lead to haemolysis of RBC . Hb and RBC of the SM, FSM, and FSM + omega-3 groups and MCV and MCH of the SM and FSM + omega-3 groups were significantly increased compared to the DC group. These results were similar to the results of Ishimi et al. (1999), who showed that an intake of isoflavones in ovariectomized rats improved haematological parameters . Soung et al. (2006) also observed that an intake of soy products in postmenopausal women had a beneficial effect on some haematological parameters and that isoflavones may affect immune system functioning as a result of their receptors on lymphocytes . In our study, a significant decrease in SOD activity and an increase in lipid oxidation (MDA level) were observed in diabetic control rats' blood. Elevated levels of lipid peroxides and the reduction of antioxidant enzymes have been reported to occur in the diabetic state . In the hyperglycaemic state, sugars react with lipids and proteins that results in the generation of reactive oxygen species (ROS) . This ROS enhanced lipid peroxidation . In the current study, SM, FSM, and FSM + omega-3 groups showed significantly lower MDA and more SOD concentrations than the DC group. As was shown in our study, the antioxidant activities of soymilk and fermented soymilk have also been observed in other studies . The antioxidative abilities of soymilk are related to soy isoflavones, soy protein, and saponins . In our study, the significant effect of FSM products on the reduction of MDA when compared with the SM product was observed. This result showed that fermented soymilk had greater antioxidant and antimutagenic activities, relative to soymilk. An increase in the total antioxidant activity and antiradical effects of soymilk when fermented is also strongly supported by other studies . In the present study, a significant decrease in MDA concentration was observed following administration of the FSM + omega-3 product when compared with SM and FSM products. In addition, with regard to SOD concentration, the FSM + omega-3 product showed higher SOD levels than did the SM and FSM products. These findings are supported by several researchers who have reported a decreased production in MDA concentration in human subjects treated with omega-3 fatty acids . The above findings further support the research of Barbosa et al. (2003), who found that omega-3 fatty acid supplementation may induce antiradical activities . Omega-3 fatty acids can also prevent lipid peroxidation . Our results showed that, compared with the DC group, SM showed lower hs-CRP concentrations, but this reduction was not significant. This may have been related to a low amount of soymilk intake in the current study. Soy components have contradictory results in terms of inflammatory parameters and may be associated with lower levels of inflammatory parameters . In our study, hs-CRP concentrations in the FSM + omega-3 group were significantly decreased compared with those of the DC and FSM groups. It appears that the significant effects observed in our study were related to omega-3. The antiinflammatory activity of omega-3 fatty acids in many studies has previously been highlighted . The modulatory effects of omega-3 fatty acids on inflammation processes have also been found . In summary, the present study showed that soymilk may be beneficial in reducing the risk of the onset of diabetes and in reducing the complications associated with diabetes, including the prevention of weight loss, lower blood glucose, plasma lipids, oxidative stress, and inflammation. The components of soymilk including soy protein, fibre, saponins, peptides, and particularly isoflavones are responsible for the effects observed in this research. In addition the efficiency of soy milk is increased with fermentation. The most obvious finding to emerge from this study was that omega-3 fatty acids can strengthen the effects observed. Generally, combining soymilk, probiotics, and omega-3 is effective for reducing complications associated with diabetes. Future studies on the current topic are therefore recommended. Disclosure This paper was extracted from M.S. degree thesis approved by Shiraz University of Medical Sciences (no. 91-6074).
Newsletters worth subscribing to They might seem like a slightly anachronistic way to deliver and consume journalism. However, a newsletter done right still has a lot to offer, or, in fact, not so much, and that’s the point. It is probably no coincidence that newsletters have started to thrive again as our news consumption has become more realtime-oriented, more fragmented with bit-sized news from Twitter and notifications in general. Newsletters are finite, they comfort you by saying: This is all you need to know for now. Here’s a list of newsletters, both for actual news and content discovery beyond news, that I highly recommend. I’m subscribed to all of them. When it comes to actual news-letters, almost all publishers assume subscribers who are eager to learn about the most important news from (and only from) that very publication. Quartz’ Daily Brief is made for subscribers who trust Quartz to deliver them the most important news, irrespective of who has published them. It’s a completely different promise to its readers and positions the newsletter to be the one-stop-solution for anyone interested in a morning (business) news fix. Weekdays | 6am (time-zone adjusted) Subscribe The Guardian’s newsletter is probably the best to give you a broad overview of the latest from all sections. The Guardian Today is nicely designed and gives both the Guardian’s selection and the most popular pieces of the past 24 hours. Tailored for a British audience, alas, it arrives a little late in Central Europe. Weekdays | 9am CET Subscribe Next Draft by Steve Pell is more than just a news-letter. It follows the news agenda, but Pell is also very good at picking the most relevant stories to put the news in context. And if that’s not enough, he has his very own, pointed way to comment on the news. Almost daily | Nighttime CET Subscribe The Morning Memo is all about updating you on the news that broke «while you were sleeping». It provides a roundup on technology, science, business and UK news. Weekdays | 9am CET Subscribe Along with its longreads section, Buzzfeed has launched a weekly newsletter called Buzzreads. It delivers the latest longreads from Buzzfeed plus a handful of longreads from elsewhere they recommend. Sundays | Afternoon CET Subscribe Longreads, the service that popularised the curation of longform articles on the web, delivers five hand-picked reads every week. Usually excellent stuff. I often see pieces here for the second time, but only then decide to dedicate my time to them since they now have Longread’s «stamp of approval». This sort of trust in its judgement is the best a newsletter can achieve. Fridays | Nighttime CET Subscribe Medium – a platform for «Everyone’s stories and ideas» – has a weekly newsletter of what its editors consider the best couple of pieces published on Medium in the past week. Usually, at least one or two treasures can be found here. Fridays | Evening CET Subscribe Matter, a kickstarted longform publisher recently acquired by Medium, has its own weekly newsletter with interesting longreads from other publications. Fridays | Late Afternoon CET Subscribe Nautilus is such a fantastic magazine that they can send out a newsletter with nothing but their own stories twice a week and still have me click on at least one every time. A keeper. About twice a week | Noon CET Subscribe Five Intriguing Things by The Atlantic’s Alexis Madrigal delivers exactly that. Published daily, Madrigal only vaguely follows the news agenda in his selection of link that help to better understand technology and what it changes. He adds a fair amount of words himself to tease and comment on each piece he links to. Weekdays | Evening CET Subscribe The Daily Digg is your best way to catch up with the stories beyond news that get a lot of traction, keeping out the annoyingly viral stuff. Weekdays | 1pm CET Subscribe Instapaper Weekly delivers the most popular stories of the week from the read-later service. Rather similar to the Longreads Weekly, but since it’s based exclusively on the judgement of the crowd, it’s a good complimentary. Thursdays | Afternoon CET Subscribe Your Sunday Hi is how storytelling-platform Hi reaches your inbox. Every story on Hi starts with a photograph, attached to a place. Reading the Sunday Hi and the stories it links to feels like traveling to foreign places and listening to interesting stories from strangers. Sundays | Nighttime CET Subscribe NEW Austin Kleon (whom you might know as the inventor of the Newspaper Blackout poems) sends out a weekly newsletter with a new poem and ten links to inspirational writing and art projects. Fridays | Afternoon/Evening CET Subscribe Then, finally, my own. The Weekly Filet is a (you guessed it: weekly) compilation of 5 extraordinary pieces found on the web, carefully selected. Fridays | Early Afternoon CET Subscribe Which are your favourite newsletters? Drop me a line @davidbauer or leave a comment. Removed from earlier versions of this article:
import React, {useState, Fragment} from "react"; import {IVariation, IOption} from "types/models"; import { Button, Checkbox, Grid, Dropdown, DropdownItemProps, Input } from "semantic-ui-react"; import "./Variation.css"; interface IVariationProps { variation: IVariation; options: IOption[]; handle: (variation: IVariation, options: IOption[]) => void; cancel: () => void; disableVaryPrice: boolean; } export function Variation({ variation, handle, cancel, options, disableVaryPrice }: IVariationProps) { const [optionsDropdownList, setDropDownList] = useState<DropdownItemProps[]>(options.map(option => ({ key: option.id, value: option.id, text: option.name }))); const [selectedOptions, setSelectedOptions] = useState((variation.options || []).map(({id}) => id)); const [variationName, setVariationName] = useState(variation.variation); const [varyPrice, setVaryPrice] = useState(variation.varyPrice); const [search, setSearchQuery] = useState(''); // const deleteOption = (id: string) => () => { // setSelectedOptions(optionsDropdownList.filter(option => option.key !== id)); // }; const onKeyDown = (event: KeyboardEvent) => { const {value} = event.target as any; setSearchQuery(value); if (event.key === "Enter" && value) { const tempId = `temp_${Date.now()}`; const newOption: DropdownItemProps = { key: tempId, value: tempId, text: value }; setSelectedOptions(selectedOptions.concat(tempId)); setDropDownList(optionsDropdownList.concat(newOption)); setSearchQuery(''); } }; const handleUpdateVariation = () => { const updatedOptions: IOption[] = optionsDropdownList.map(({value, text}) => ({ id: value as string, name: text as string })); const updatedVariationOption: IOption[] = selectedOptions.map(id => updatedOptions.find(option => option.id === id) as IOption); handle({...variation, options: updatedVariationOption, variation: variationName, varyPrice}, updatedOptions); } const checkBoxClicked = () => { if (!disableVaryPrice) { setVaryPrice(!varyPrice); } } console.log(search, variation, selectedOptions, optionsDropdownList); return ( <Grid divided={true}> <Grid.Row> <Grid.Column> <Input onChange={(_, {value}) => setVariationName(value)} value={variationName}/> </Grid.Column> </Grid.Row> <Fragment> <Grid.Row> <Grid.Column> <Checkbox label={`Prices can vary for each ${variation.variation}`} checked={varyPrice} disabled={disableVaryPrice} onClick={checkBoxClicked} /> </Grid.Column> </Grid.Row> <Grid.Row> <Grid.Column> <Dropdown clearable={true} multiple={true} selection={true} search={true} searchQuery={search} // onAddItem={console.log} onChange={(_, {value}) => setSelectedOptions(value as string[])} onSearchChange={(_, {searchQuery}) => setSearchQuery(searchQuery)} className="input-fluid" options={optionsDropdownList} value={selectedOptions} onKeyDown={onKeyDown} onKeyUp={onKeyDown} /> </Grid.Column> </Grid.Row> <Grid.Row> <Grid.Column> <Button color="blue" onClick={handleUpdateVariation} > Update Variation </Button> <Button color="red" onClick={cancel} > Cancel </Button> </Grid.Column> </Grid.Row> </Fragment> </Grid> ); }
<gh_stars>1-10 package io.github.jspinak.brobot.actions.actionExecution; import io.github.jspinak.brobot.actions.actionOptions.ActionOptions; import io.github.jspinak.brobot.datatypes.primitives.match.Matches; import io.github.jspinak.brobot.datatypes.state.ObjectCollection; /** * Actions that are run from the Action class need to follow this interface. */ public interface ActionInterface { Matches perform( ActionOptions actionOptions, ObjectCollection... objectCollections); }
Don’t Focus On Your Situation, Focus On Your Trajectory All of us have goals. There’s something we want to do, someplace we want to go, someone we want to be. It appears that we don’t intentionally plan to be stagnant. But are you progressing towards your intended destination? Or are you drifting away unknowingly? What does your life trajectory look like? Most of us are terrible at assessing our current progress. That’s okay because in reality, not everything is measurable. Objective metrics are difficult to come across for every situation and often inconvenient. The problem is that we consciously choose not to take action despite being fully in control of our situation. We take the path of least resistance because we’re programmed that way. We avoid pain and maximise pleasure, to our detriment. Fog Of War One of the reasons why we don’t do what’s best for us is because we don’t immediately feel the consequences of our actions. It’s a common occurrence: pleasure in the present becomes pain in the future. Consider how most people gain weight. Ice cream in the present seems fine because you’re not overweight. It makes sense then, that you have allowance for the occasional treat. But over time, this additional treat becomes a caloric surplus that leads to weight gain. That’s why fitness experts recommend calorie tracking when the aim is to lose or maintain weight. But life doesn’t work like that. It’s not possible to always measure the impacts of your actions. Once you’ve set into motion a series of events, it becomes impossible to immediately stop the effects from coming into play. You can mitigate it and prevent that from happening in the future, but you can’t retract what you’ve done. We judge ourselves and base our actions based on the present, but we should really be looking at the future. At any time, our life is on a trajectory that is trending either upwards or downwards — you are the one who decides where that goes. Rome Wasn’t Built In A Day One small problem will always become a large one over time. I learnt this the hard way during my time in the army. Navigating through thick vegetation, my team walked in the general direction of our objective. That general direction was just a few degrees off the actual coordinates of our intended checkpoint, but it landed us a few kilometres away. That cost us a good number of hours and squandered away our effort. The same thing will happen to you if you cannot say with certain where you’re headed. Being off course by just a few degrees will cost you dearly in the long run. Trying to figure out things on the fly sounds like a great idea, but often doesn’t pan out. There is no equivalent of the GPS when you’re trying to assess how you’re doing in life. “This is a fundamental irony of most people’s lives. They don’t quite know what they want to do with their lives. Yet they are very active.” — Ryan Holiday Contrast this method of mindless living with the alternative of discipline and purpose. While it takes infinitely more effort, it certainly pays off in the long run. General George Marshall kept a little black book for most of his career. In it, he would write down the names of officers who impressed him or seemed to show talent. He knew that one day he would need to call on them. That day came during World War II, where he promoted and advocated for generals such as Omar Bradley and Dwight Eisenhower. The outcome of that war could have been different without Marshall’s foresight. Control Your Life Trajectory You can decide where you end up a year from now. Are your values aligned with who you want to be? Are your habits aligned with what you want to do? Are your actions aligned with where you want to go? These are the things that we have direct control over. At any point in time, we have the power to alter our life trajectory by making small adjustments to ourselves. Unfortunately, we often aren’t thinking far ahead enough for ourselves. Every move we make has a residual impact on where we’re going to be. The most important thing you can do for yourself today is to invest in yourself. That is an investment that will pay for itself multiple times over; it is by far the most profitable business to be in. Make sure that you are becoming the strongest version of yourself. It doesn’t have to be a fixed five-year plan. Tim Ferris toys with short-term experiments, which gives him the opportunity to explore new ideas and chase his passions. But in the end, these experiments are aligned with his mission of chasing peak performance. He’s living up to the label he’s given himself: that of the world’s human guinea pig. Whatever you do, hold yourself accountable to it. Self-justification is a portrayal of the brain that, despite its stated goals or desires, is not interested in truth, but rather self-preservation. It’ll distort the number of mistakes you’ve made in order to protect the narrative about yourself. Don’t give in. Live Mindfully As Mae West said, “You only live once, but if you do it right, once is enough”. Make sure you know what you’re after and actively chase it. Don’t go through life simply reacting to your circumstances. If you don’t know where you’re going then you have to stop. Anyone that’s been lost before knows this — finding your bearings doesn’t just work itself out. So what’s your life trajectory like? Are you trending upwards or downwards?
/** * Removes the converter for the parsed java type * * @param type * the java type * @return the removed converter or <code>null</code> if none was registered for the parsed type. */ @SuppressWarnings("unchecked") public <T> TypeConverter<T> removeConverter(Class<T> type) { if (type == null) { return null; } TypeConverter<T> converter; if (type.isInterface()) { if (javaInterfaceConverters.containsKey(type)) { Map<Class<?>,TypeConverter<?>> javaInterfaceConverterMap = new HashMap<Class<?>,TypeConverter<?>>( this.javaInterfaceConverters); converter = (TypeConverter<T>) javaInterfaceConverterMap.remove(type); this.javaInterfaceConverters = javaInterfaceConverterMap; } else { converter = null; } } else { converter = (TypeConverter<T>) javaClassConverters.remove(type); } return converter; }
<filename>src/kt84/openmesh/append_quad_strip.hh #pragma once #include <vector> #include <utility> #include <functional> #include <OpenMesh/Core/Mesh/PolyMesh_ArrayKernelT.hh> #include <OpenMesh/Core/Mesh/Handles.hh> namespace kt84 { template <class MeshTrait> inline bool append_quad_strip( // Return: true if successful OpenMesh::PolyMesh_ArrayKernelT<MeshTrait>& mesh, // Target mesh OpenMesh::HalfedgeHandle & boundary_h_front, // Halfedge where the first quad is attached to. Updated to the corresponding newly generated halfedge. OpenMesh::HalfedgeHandle & boundary_h_back) // Halfedge where the last quad is attached to. Updated to the corresponding newly generated halfedge. { typedef OpenMesh::PolyMesh_ArrayKernelT<MeshTrait> Mesh; if (!mesh.is_boundary(boundary_h_front) || !mesh.is_boundary(boundary_h_back)) return false; OpenMesh::VertexHandle v_prev, v_next; for (auto h = boundary_h_front; ; ) { auto h_next = mesh.next_halfedge_handle(h); auto v_from = mesh.from_vertex_handle(h); auto v_to = mesh.to_vertex_handle (h); if (h == boundary_h_front) v_prev = mesh.add_vertex(typename Mesh::Point()); v_next = mesh.add_vertex(typename Mesh::Point()); mesh.add_face(v_from, v_to, v_next, v_prev); if (h == boundary_h_back) break; v_prev = v_next; h = h_next; } boundary_h_front = mesh.opposite_halfedge_handle(mesh.next_halfedge_handle(mesh.next_halfedge_handle(boundary_h_front))); boundary_h_back = mesh.opposite_halfedge_handle(mesh.next_halfedge_handle(mesh.next_halfedge_handle(boundary_h_back ))); return true; } }
China denounced what it called a “dangerous and provocative” act Tuesday after an American warship sailed within 12 nautical miles of a Chinese-built artificial island at the center of a regional dispute over maritime territory and sea routes. The incident reflects rising tensions between the United States and China over Beijing’s aggressive program of land reclamation and construction on rocks and reefs in the Spratly archipelago in the South China Sea, whose shores include Vietnam, Taiwan and the Philippines. The U.S. naval action was intended to uphold the principle of freedom of navigation in international waters, American officials said, and underscores that Washington does not accept China’s claim to territorial waters around the man-made islands. [U.S. warship sails within 12 miles of Chinese-built island] Analysts said it also was aimed at reassuring nervous American allies that Washington will not allow Beijing to throw its weight around in the region unchallenged. But there is a risk it could have military consequences. A U.S. guided-missile destroyer is challenging Beijing's territorial claims in the South China Sea. China's foreign minister, Wang Yi, warned the United States to "think again and not to act blindly or make trouble from nothing," but more patrols are reportedly planned for the coming weeks. (Reuters) China said it viewed the move as an infringement on its sovereignty and claimed it would damage regional peace and stability. The Foreign Ministry warned that Beijing might respond by speeding up its construction program. More ominously, the Chinese navy said further U.S. missions of this sort could “trigger eventualities” but did not elaborate. China said it had followed the USS Lassen as it passed close to Subi Reef, sending out a missile destroyer and a patrol boat. But a U.S. defense official said the mission had been completed “without incident.” The Lassen, a guided-missile destroyer, was accompanied by Navy surveillance planes, the official said, speaking on the condition of anonymity to discuss details of a sensitive military operation. The decision to send the U.S. ship followed months of debate in Washington over how to find a balance between standing up to China and provoking a spiral of confrontation and regional militarization. [Navy told China to expect ship] Last month, Beijing warned that it would “never allow any country” to violate what it considers its territorial waters and airspace around the islands. Foreign Ministry spokesman Lu Kang said the U.S. vessel entered Chinese waters “illegally,” adding that Chinese authorities had tracked it and warned it as it passed. “The action by the U.S. warship has threatened China’s sovereignty and security interests, endangered the safety of personnel and facilities on the islands and damaged regional peace and stability,” he said, urging the United States not to take further “dangerous and provocative actions.” Referring to the United States, the spokesman said at a news conference, “If certain parties continue to stir up trouble and create tension, then China may be forced to come to the conclusion that it is indeed necessary for us to speed up and strengthen relevant capacity building” on the islands. Hours later, China’s vice foreign minister, Zhang Yesui, summoned U.S. Ambassador Max Baucus to deliver a protest. China claims almost all of the South China Sea as its territory, including the main islands and reefs. The Philippines, Vietnam, Malaysia, Brunei and Taiwan also have overlapping claims, and several of them occupy different islands, reefs and rocks. China has carried out a massive program of land reclamation and construction on several islands since 2014, upsetting ties with the United States and several of those rival claimants. [As island expands, so do diplomatic complications] This week’s naval mission is partly intended to test a pledge made by President Xi Jinping during his visit to Washington last month that Beijing would not militarize the islands. Subi Reef, which lies close to the Philippines, used to be submerged at high tide before China began a dredging project to turn it into an island. It is now big enough to potentially host an airstrip. Satellite images show what looks like a surveillance tower and multiple satellite antennas on Subi Reef, according to the Asia Maritime Transparency Initiative, part of the Center for Strategic and International Studies (CSIS). Under the international law of the sea, turning such reefs into artificial islands does not imply any rights to territorial waters around them — although countries can claim a “safety zone” of 500 meters, or about 1,500 feet, around previously submerged reefs. A Chinese airstrip is under construction at Fiery Cross Reef, and experts say another could soon be built at Mischief Reef. China says the construction work is primarily designed for civilian use and will not affect freedom of navigation in the South China Sea. Four other countries already have airstrips in the Spratlys. [Photos: China’s rapid island-building strategy continues] Bonnie Glaser, an expert on the Chinese military at CSIS, said there were good reasons for the U.S. move. “Some parts of the administration believed this would make things even more difficult, that China would become even more obstreperous, more difficult to deal with,” she said, “and others thought this wasn’t something we should do before Xi Jinping came to Washington.” But in Beijing, retired Rear Adm. Yang Yi, a researcher at the Institute of National Strategic Studies at People’s Liberation Army National Defense University, said it would damage U.S-China relations and encourage China to speed up construction on the islands, and possibly militarize them. “The act is reckless, dangerous and irresponsible,” he said. “It shows the United States has the mentality of the big brother but the temper of a child. If it becomes a regular thing, military conflict in the region is inevitable and the U.S. would be the one who started it.” [Chinese activity in South China Sea poses complications for Obama] At September’s summit, President Obama told Xi that the United States would fly or sail anywhere that international law allows. Additional patrols will follow in coming weeks and also could be conducted around islands that have been built up by Vietnam and the Philippines in the Spratlys, a U.S. defense official told the Reuters news agency. “This is something that will be a regular occurrence, not a one-off event,” said the official, also speaking on the condition of anonymity. “It’s not something that’s unique to China.” Yanmei Xie, senior China researcher with the International Crisis Group in Beijing, said that there was a risk of miscalculation and accidental clashes in such actions but added that doing nothing also would have consequences. “Inaction would amount to acquiescing to the practice of taking unilateral actions to change the status quo in disregard of the international law of the sea,” she said. “It would undermine the credibility of the U.S. as an underwriter of regional security. It would sow doubt among American allies and friends about the U.S. commitment and the durability of its presence in the region.” Xu Yangjingjing in Beijing and Craig Whitlock and Steven Mufson in Washington contributed to this report. Read more: U.S. Navy alarmed at Beijing’s ‘Great Wall of Sand’ in South China Sea Tougher U.S. stance in the South China Sea could make it a crisis zone Today’s coverage from Post correspondents around the world
<reponame>sholaj/kubeform<gh_stars>1-10 /* Copyright The Kubeform Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by lister-gen. DO NOT EDIT. package v1alpha1 // AdvancedThreatProtectionListerExpansion allows custom methods to be added to // AdvancedThreatProtectionLister. type AdvancedThreatProtectionListerExpansion interface{} // AdvancedThreatProtectionNamespaceListerExpansion allows custom methods to be added to // AdvancedThreatProtectionNamespaceLister. type AdvancedThreatProtectionNamespaceListerExpansion interface{} // AnalysisServicesServerListerExpansion allows custom methods to be added to // AnalysisServicesServerLister. type AnalysisServicesServerListerExpansion interface{} // AnalysisServicesServerNamespaceListerExpansion allows custom methods to be added to // AnalysisServicesServerNamespaceLister. type AnalysisServicesServerNamespaceListerExpansion interface{} // ApiManagementListerExpansion allows custom methods to be added to // ApiManagementLister. type ApiManagementListerExpansion interface{} // ApiManagementNamespaceListerExpansion allows custom methods to be added to // ApiManagementNamespaceLister. type ApiManagementNamespaceListerExpansion interface{} // ApiManagementAPIListerExpansion allows custom methods to be added to // ApiManagementAPILister. type ApiManagementAPIListerExpansion interface{} // ApiManagementAPINamespaceListerExpansion allows custom methods to be added to // ApiManagementAPINamespaceLister. type ApiManagementAPINamespaceListerExpansion interface{} // ApiManagementAPIOperationListerExpansion allows custom methods to be added to // ApiManagementAPIOperationLister. type ApiManagementAPIOperationListerExpansion interface{} // ApiManagementAPIOperationNamespaceListerExpansion allows custom methods to be added to // ApiManagementAPIOperationNamespaceLister. type ApiManagementAPIOperationNamespaceListerExpansion interface{} // ApiManagementAPIOperationPolicyListerExpansion allows custom methods to be added to // ApiManagementAPIOperationPolicyLister. type ApiManagementAPIOperationPolicyListerExpansion interface{} // ApiManagementAPIOperationPolicyNamespaceListerExpansion allows custom methods to be added to // ApiManagementAPIOperationPolicyNamespaceLister. type ApiManagementAPIOperationPolicyNamespaceListerExpansion interface{} // ApiManagementAPIPolicyListerExpansion allows custom methods to be added to // ApiManagementAPIPolicyLister. type ApiManagementAPIPolicyListerExpansion interface{} // ApiManagementAPIPolicyNamespaceListerExpansion allows custom methods to be added to // ApiManagementAPIPolicyNamespaceLister. type ApiManagementAPIPolicyNamespaceListerExpansion interface{} // ApiManagementAPISchemaListerExpansion allows custom methods to be added to // ApiManagementAPISchemaLister. type ApiManagementAPISchemaListerExpansion interface{} // ApiManagementAPISchemaNamespaceListerExpansion allows custom methods to be added to // ApiManagementAPISchemaNamespaceLister. type ApiManagementAPISchemaNamespaceListerExpansion interface{} // ApiManagementAPIVersionSetListerExpansion allows custom methods to be added to // ApiManagementAPIVersionSetLister. type ApiManagementAPIVersionSetListerExpansion interface{} // ApiManagementAPIVersionSetNamespaceListerExpansion allows custom methods to be added to // ApiManagementAPIVersionSetNamespaceLister. type ApiManagementAPIVersionSetNamespaceListerExpansion interface{} // ApiManagementAuthorizationServerListerExpansion allows custom methods to be added to // ApiManagementAuthorizationServerLister. type ApiManagementAuthorizationServerListerExpansion interface{} // ApiManagementAuthorizationServerNamespaceListerExpansion allows custom methods to be added to // ApiManagementAuthorizationServerNamespaceLister. type ApiManagementAuthorizationServerNamespaceListerExpansion interface{} // ApiManagementBackendListerExpansion allows custom methods to be added to // ApiManagementBackendLister. type ApiManagementBackendListerExpansion interface{} // ApiManagementBackendNamespaceListerExpansion allows custom methods to be added to // ApiManagementBackendNamespaceLister. type ApiManagementBackendNamespaceListerExpansion interface{} // ApiManagementCertificateListerExpansion allows custom methods to be added to // ApiManagementCertificateLister. type ApiManagementCertificateListerExpansion interface{} // ApiManagementCertificateNamespaceListerExpansion allows custom methods to be added to // ApiManagementCertificateNamespaceLister. type ApiManagementCertificateNamespaceListerExpansion interface{} // ApiManagementDiagnosticListerExpansion allows custom methods to be added to // ApiManagementDiagnosticLister. type ApiManagementDiagnosticListerExpansion interface{} // ApiManagementDiagnosticNamespaceListerExpansion allows custom methods to be added to // ApiManagementDiagnosticNamespaceLister. type ApiManagementDiagnosticNamespaceListerExpansion interface{} // ApiManagementGroupListerExpansion allows custom methods to be added to // ApiManagementGroupLister. type ApiManagementGroupListerExpansion interface{} // ApiManagementGroupNamespaceListerExpansion allows custom methods to be added to // ApiManagementGroupNamespaceLister. type ApiManagementGroupNamespaceListerExpansion interface{} // ApiManagementGroupUserListerExpansion allows custom methods to be added to // ApiManagementGroupUserLister. type ApiManagementGroupUserListerExpansion interface{} // ApiManagementGroupUserNamespaceListerExpansion allows custom methods to be added to // ApiManagementGroupUserNamespaceLister. type ApiManagementGroupUserNamespaceListerExpansion interface{} // ApiManagementIdentityProviderAadListerExpansion allows custom methods to be added to // ApiManagementIdentityProviderAadLister. type ApiManagementIdentityProviderAadListerExpansion interface{} // ApiManagementIdentityProviderAadNamespaceListerExpansion allows custom methods to be added to // ApiManagementIdentityProviderAadNamespaceLister. type ApiManagementIdentityProviderAadNamespaceListerExpansion interface{} // ApiManagementIdentityProviderFacebookListerExpansion allows custom methods to be added to // ApiManagementIdentityProviderFacebookLister. type ApiManagementIdentityProviderFacebookListerExpansion interface{} // ApiManagementIdentityProviderFacebookNamespaceListerExpansion allows custom methods to be added to // ApiManagementIdentityProviderFacebookNamespaceLister. type ApiManagementIdentityProviderFacebookNamespaceListerExpansion interface{} // ApiManagementIdentityProviderGoogleListerExpansion allows custom methods to be added to // ApiManagementIdentityProviderGoogleLister. type ApiManagementIdentityProviderGoogleListerExpansion interface{} // ApiManagementIdentityProviderGoogleNamespaceListerExpansion allows custom methods to be added to // ApiManagementIdentityProviderGoogleNamespaceLister. type ApiManagementIdentityProviderGoogleNamespaceListerExpansion interface{} // ApiManagementIdentityProviderMicrosoftListerExpansion allows custom methods to be added to // ApiManagementIdentityProviderMicrosoftLister. type ApiManagementIdentityProviderMicrosoftListerExpansion interface{} // ApiManagementIdentityProviderMicrosoftNamespaceListerExpansion allows custom methods to be added to // ApiManagementIdentityProviderMicrosoftNamespaceLister. type ApiManagementIdentityProviderMicrosoftNamespaceListerExpansion interface{} // ApiManagementIdentityProviderTwitterListerExpansion allows custom methods to be added to // ApiManagementIdentityProviderTwitterLister. type ApiManagementIdentityProviderTwitterListerExpansion interface{} // ApiManagementIdentityProviderTwitterNamespaceListerExpansion allows custom methods to be added to // ApiManagementIdentityProviderTwitterNamespaceLister. type ApiManagementIdentityProviderTwitterNamespaceListerExpansion interface{} // ApiManagementLoggerListerExpansion allows custom methods to be added to // ApiManagementLoggerLister. type ApiManagementLoggerListerExpansion interface{} // ApiManagementLoggerNamespaceListerExpansion allows custom methods to be added to // ApiManagementLoggerNamespaceLister. type ApiManagementLoggerNamespaceListerExpansion interface{} // ApiManagementOpenidConnectProviderListerExpansion allows custom methods to be added to // ApiManagementOpenidConnectProviderLister. type ApiManagementOpenidConnectProviderListerExpansion interface{} // ApiManagementOpenidConnectProviderNamespaceListerExpansion allows custom methods to be added to // ApiManagementOpenidConnectProviderNamespaceLister. type ApiManagementOpenidConnectProviderNamespaceListerExpansion interface{} // ApiManagementProductListerExpansion allows custom methods to be added to // ApiManagementProductLister. type ApiManagementProductListerExpansion interface{} // ApiManagementProductNamespaceListerExpansion allows custom methods to be added to // ApiManagementProductNamespaceLister. type ApiManagementProductNamespaceListerExpansion interface{} // ApiManagementProductAPIListerExpansion allows custom methods to be added to // ApiManagementProductAPILister. type ApiManagementProductAPIListerExpansion interface{} // ApiManagementProductAPINamespaceListerExpansion allows custom methods to be added to // ApiManagementProductAPINamespaceLister. type ApiManagementProductAPINamespaceListerExpansion interface{} // ApiManagementProductGroupListerExpansion allows custom methods to be added to // ApiManagementProductGroupLister. type ApiManagementProductGroupListerExpansion interface{} // ApiManagementProductGroupNamespaceListerExpansion allows custom methods to be added to // ApiManagementProductGroupNamespaceLister. type ApiManagementProductGroupNamespaceListerExpansion interface{} // ApiManagementProductPolicyListerExpansion allows custom methods to be added to // ApiManagementProductPolicyLister. type ApiManagementProductPolicyListerExpansion interface{} // ApiManagementProductPolicyNamespaceListerExpansion allows custom methods to be added to // ApiManagementProductPolicyNamespaceLister. type ApiManagementProductPolicyNamespaceListerExpansion interface{} // ApiManagementPropertyListerExpansion allows custom methods to be added to // ApiManagementPropertyLister. type ApiManagementPropertyListerExpansion interface{} // ApiManagementPropertyNamespaceListerExpansion allows custom methods to be added to // ApiManagementPropertyNamespaceLister. type ApiManagementPropertyNamespaceListerExpansion interface{} // ApiManagementSubscriptionListerExpansion allows custom methods to be added to // ApiManagementSubscriptionLister. type ApiManagementSubscriptionListerExpansion interface{} // ApiManagementSubscriptionNamespaceListerExpansion allows custom methods to be added to // ApiManagementSubscriptionNamespaceLister. type ApiManagementSubscriptionNamespaceListerExpansion interface{} // ApiManagementUserListerExpansion allows custom methods to be added to // ApiManagementUserLister. type ApiManagementUserListerExpansion interface{} // ApiManagementUserNamespaceListerExpansion allows custom methods to be added to // ApiManagementUserNamespaceLister. type ApiManagementUserNamespaceListerExpansion interface{} // AppConfigurationListerExpansion allows custom methods to be added to // AppConfigurationLister. type AppConfigurationListerExpansion interface{} // AppConfigurationNamespaceListerExpansion allows custom methods to be added to // AppConfigurationNamespaceLister. type AppConfigurationNamespaceListerExpansion interface{} // AppServiceListerExpansion allows custom methods to be added to // AppServiceLister. type AppServiceListerExpansion interface{} // AppServiceNamespaceListerExpansion allows custom methods to be added to // AppServiceNamespaceLister. type AppServiceNamespaceListerExpansion interface{} // AppServiceActiveSlotListerExpansion allows custom methods to be added to // AppServiceActiveSlotLister. type AppServiceActiveSlotListerExpansion interface{} // AppServiceActiveSlotNamespaceListerExpansion allows custom methods to be added to // AppServiceActiveSlotNamespaceLister. type AppServiceActiveSlotNamespaceListerExpansion interface{} // AppServiceCertificateListerExpansion allows custom methods to be added to // AppServiceCertificateLister. type AppServiceCertificateListerExpansion interface{} // AppServiceCertificateNamespaceListerExpansion allows custom methods to be added to // AppServiceCertificateNamespaceLister. type AppServiceCertificateNamespaceListerExpansion interface{} // AppServiceCertificateOrderListerExpansion allows custom methods to be added to // AppServiceCertificateOrderLister. type AppServiceCertificateOrderListerExpansion interface{} // AppServiceCertificateOrderNamespaceListerExpansion allows custom methods to be added to // AppServiceCertificateOrderNamespaceLister. type AppServiceCertificateOrderNamespaceListerExpansion interface{} // AppServiceCustomHostnameBindingListerExpansion allows custom methods to be added to // AppServiceCustomHostnameBindingLister. type AppServiceCustomHostnameBindingListerExpansion interface{} // AppServiceCustomHostnameBindingNamespaceListerExpansion allows custom methods to be added to // AppServiceCustomHostnameBindingNamespaceLister. type AppServiceCustomHostnameBindingNamespaceListerExpansion interface{} // AppServicePlanListerExpansion allows custom methods to be added to // AppServicePlanLister. type AppServicePlanListerExpansion interface{} // AppServicePlanNamespaceListerExpansion allows custom methods to be added to // AppServicePlanNamespaceLister. type AppServicePlanNamespaceListerExpansion interface{} // AppServiceSlotListerExpansion allows custom methods to be added to // AppServiceSlotLister. type AppServiceSlotListerExpansion interface{} // AppServiceSlotNamespaceListerExpansion allows custom methods to be added to // AppServiceSlotNamespaceLister. type AppServiceSlotNamespaceListerExpansion interface{} // AppServiceSourceControlTokenListerExpansion allows custom methods to be added to // AppServiceSourceControlTokenLister. type AppServiceSourceControlTokenListerExpansion interface{} // AppServiceSourceControlTokenNamespaceListerExpansion allows custom methods to be added to // AppServiceSourceControlTokenNamespaceLister. type AppServiceSourceControlTokenNamespaceListerExpansion interface{} // AppServiceVirtualNetworkSwiftConnectionListerExpansion allows custom methods to be added to // AppServiceVirtualNetworkSwiftConnectionLister. type AppServiceVirtualNetworkSwiftConnectionListerExpansion interface{} // AppServiceVirtualNetworkSwiftConnectionNamespaceListerExpansion allows custom methods to be added to // AppServiceVirtualNetworkSwiftConnectionNamespaceLister. type AppServiceVirtualNetworkSwiftConnectionNamespaceListerExpansion interface{} // ApplicationGatewayListerExpansion allows custom methods to be added to // ApplicationGatewayLister. type ApplicationGatewayListerExpansion interface{} // ApplicationGatewayNamespaceListerExpansion allows custom methods to be added to // ApplicationGatewayNamespaceLister. type ApplicationGatewayNamespaceListerExpansion interface{} // ApplicationInsightsListerExpansion allows custom methods to be added to // ApplicationInsightsLister. type ApplicationInsightsListerExpansion interface{} // ApplicationInsightsNamespaceListerExpansion allows custom methods to be added to // ApplicationInsightsNamespaceLister. type ApplicationInsightsNamespaceListerExpansion interface{} // ApplicationInsightsAPIKeyListerExpansion allows custom methods to be added to // ApplicationInsightsAPIKeyLister. type ApplicationInsightsAPIKeyListerExpansion interface{} // ApplicationInsightsAPIKeyNamespaceListerExpansion allows custom methods to be added to // ApplicationInsightsAPIKeyNamespaceLister. type ApplicationInsightsAPIKeyNamespaceListerExpansion interface{} // ApplicationInsightsAnalyticsItemListerExpansion allows custom methods to be added to // ApplicationInsightsAnalyticsItemLister. type ApplicationInsightsAnalyticsItemListerExpansion interface{} // ApplicationInsightsAnalyticsItemNamespaceListerExpansion allows custom methods to be added to // ApplicationInsightsAnalyticsItemNamespaceLister. type ApplicationInsightsAnalyticsItemNamespaceListerExpansion interface{} // ApplicationInsightsWebTestListerExpansion allows custom methods to be added to // ApplicationInsightsWebTestLister. type ApplicationInsightsWebTestListerExpansion interface{} // ApplicationInsightsWebTestNamespaceListerExpansion allows custom methods to be added to // ApplicationInsightsWebTestNamespaceLister. type ApplicationInsightsWebTestNamespaceListerExpansion interface{} // ApplicationSecurityGroupListerExpansion allows custom methods to be added to // ApplicationSecurityGroupLister. type ApplicationSecurityGroupListerExpansion interface{} // ApplicationSecurityGroupNamespaceListerExpansion allows custom methods to be added to // ApplicationSecurityGroupNamespaceLister. type ApplicationSecurityGroupNamespaceListerExpansion interface{} // AutomationAccountListerExpansion allows custom methods to be added to // AutomationAccountLister. type AutomationAccountListerExpansion interface{} // AutomationAccountNamespaceListerExpansion allows custom methods to be added to // AutomationAccountNamespaceLister. type AutomationAccountNamespaceListerExpansion interface{} // AutomationCertificateListerExpansion allows custom methods to be added to // AutomationCertificateLister. type AutomationCertificateListerExpansion interface{} // AutomationCertificateNamespaceListerExpansion allows custom methods to be added to // AutomationCertificateNamespaceLister. type AutomationCertificateNamespaceListerExpansion interface{} // AutomationCredentialListerExpansion allows custom methods to be added to // AutomationCredentialLister. type AutomationCredentialListerExpansion interface{} // AutomationCredentialNamespaceListerExpansion allows custom methods to be added to // AutomationCredentialNamespaceLister. type AutomationCredentialNamespaceListerExpansion interface{} // AutomationDscConfigurationListerExpansion allows custom methods to be added to // AutomationDscConfigurationLister. type AutomationDscConfigurationListerExpansion interface{} // AutomationDscConfigurationNamespaceListerExpansion allows custom methods to be added to // AutomationDscConfigurationNamespaceLister. type AutomationDscConfigurationNamespaceListerExpansion interface{} // AutomationDscNodeconfigurationListerExpansion allows custom methods to be added to // AutomationDscNodeconfigurationLister. type AutomationDscNodeconfigurationListerExpansion interface{} // AutomationDscNodeconfigurationNamespaceListerExpansion allows custom methods to be added to // AutomationDscNodeconfigurationNamespaceLister. type AutomationDscNodeconfigurationNamespaceListerExpansion interface{} // AutomationJobScheduleListerExpansion allows custom methods to be added to // AutomationJobScheduleLister. type AutomationJobScheduleListerExpansion interface{} // AutomationJobScheduleNamespaceListerExpansion allows custom methods to be added to // AutomationJobScheduleNamespaceLister. type AutomationJobScheduleNamespaceListerExpansion interface{} // AutomationModuleListerExpansion allows custom methods to be added to // AutomationModuleLister. type AutomationModuleListerExpansion interface{} // AutomationModuleNamespaceListerExpansion allows custom methods to be added to // AutomationModuleNamespaceLister. type AutomationModuleNamespaceListerExpansion interface{} // AutomationRunbookListerExpansion allows custom methods to be added to // AutomationRunbookLister. type AutomationRunbookListerExpansion interface{} // AutomationRunbookNamespaceListerExpansion allows custom methods to be added to // AutomationRunbookNamespaceLister. type AutomationRunbookNamespaceListerExpansion interface{} // AutomationScheduleListerExpansion allows custom methods to be added to // AutomationScheduleLister. type AutomationScheduleListerExpansion interface{} // AutomationScheduleNamespaceListerExpansion allows custom methods to be added to // AutomationScheduleNamespaceLister. type AutomationScheduleNamespaceListerExpansion interface{} // AutomationVariableBoolListerExpansion allows custom methods to be added to // AutomationVariableBoolLister. type AutomationVariableBoolListerExpansion interface{} // AutomationVariableBoolNamespaceListerExpansion allows custom methods to be added to // AutomationVariableBoolNamespaceLister. type AutomationVariableBoolNamespaceListerExpansion interface{} // AutomationVariableDatetimeListerExpansion allows custom methods to be added to // AutomationVariableDatetimeLister. type AutomationVariableDatetimeListerExpansion interface{} // AutomationVariableDatetimeNamespaceListerExpansion allows custom methods to be added to // AutomationVariableDatetimeNamespaceLister. type AutomationVariableDatetimeNamespaceListerExpansion interface{} // AutomationVariableIntListerExpansion allows custom methods to be added to // AutomationVariableIntLister. type AutomationVariableIntListerExpansion interface{} // AutomationVariableIntNamespaceListerExpansion allows custom methods to be added to // AutomationVariableIntNamespaceLister. type AutomationVariableIntNamespaceListerExpansion interface{} // AutomationVariableStringListerExpansion allows custom methods to be added to // AutomationVariableStringLister. type AutomationVariableStringListerExpansion interface{} // AutomationVariableStringNamespaceListerExpansion allows custom methods to be added to // AutomationVariableStringNamespaceLister. type AutomationVariableStringNamespaceListerExpansion interface{} // AutoscaleSettingListerExpansion allows custom methods to be added to // AutoscaleSettingLister. type AutoscaleSettingListerExpansion interface{} // AutoscaleSettingNamespaceListerExpansion allows custom methods to be added to // AutoscaleSettingNamespaceLister. type AutoscaleSettingNamespaceListerExpansion interface{} // AvailabilitySetListerExpansion allows custom methods to be added to // AvailabilitySetLister. type AvailabilitySetListerExpansion interface{} // AvailabilitySetNamespaceListerExpansion allows custom methods to be added to // AvailabilitySetNamespaceLister. type AvailabilitySetNamespaceListerExpansion interface{} // AzureadApplicationListerExpansion allows custom methods to be added to // AzureadApplicationLister. type AzureadApplicationListerExpansion interface{} // AzureadApplicationNamespaceListerExpansion allows custom methods to be added to // AzureadApplicationNamespaceLister. type AzureadApplicationNamespaceListerExpansion interface{} // AzureadServicePrincipalListerExpansion allows custom methods to be added to // AzureadServicePrincipalLister. type AzureadServicePrincipalListerExpansion interface{} // AzureadServicePrincipalNamespaceListerExpansion allows custom methods to be added to // AzureadServicePrincipalNamespaceLister. type AzureadServicePrincipalNamespaceListerExpansion interface{} // AzureadServicePrincipalPasswordListerExpansion allows custom methods to be added to // AzureadServicePrincipalPasswordLister. type AzureadServicePrincipalPasswordListerExpansion interface{} // AzureadServicePrincipalPasswordNamespaceListerExpansion allows custom methods to be added to // AzureadServicePrincipalPasswordNamespaceLister. type AzureadServicePrincipalPasswordNamespaceListerExpansion interface{} // BackupContainerStorageAccountListerExpansion allows custom methods to be added to // BackupContainerStorageAccountLister. type BackupContainerStorageAccountListerExpansion interface{} // BackupContainerStorageAccountNamespaceListerExpansion allows custom methods to be added to // BackupContainerStorageAccountNamespaceLister. type BackupContainerStorageAccountNamespaceListerExpansion interface{} // BackupPolicyFileShareListerExpansion allows custom methods to be added to // BackupPolicyFileShareLister. type BackupPolicyFileShareListerExpansion interface{} // BackupPolicyFileShareNamespaceListerExpansion allows custom methods to be added to // BackupPolicyFileShareNamespaceLister. type BackupPolicyFileShareNamespaceListerExpansion interface{} // BackupPolicyVmListerExpansion allows custom methods to be added to // BackupPolicyVmLister. type BackupPolicyVmListerExpansion interface{} // BackupPolicyVmNamespaceListerExpansion allows custom methods to be added to // BackupPolicyVmNamespaceLister. type BackupPolicyVmNamespaceListerExpansion interface{} // BackupProtectedFileShareListerExpansion allows custom methods to be added to // BackupProtectedFileShareLister. type BackupProtectedFileShareListerExpansion interface{} // BackupProtectedFileShareNamespaceListerExpansion allows custom methods to be added to // BackupProtectedFileShareNamespaceLister. type BackupProtectedFileShareNamespaceListerExpansion interface{} // BackupProtectedVmListerExpansion allows custom methods to be added to // BackupProtectedVmLister. type BackupProtectedVmListerExpansion interface{} // BackupProtectedVmNamespaceListerExpansion allows custom methods to be added to // BackupProtectedVmNamespaceLister. type BackupProtectedVmNamespaceListerExpansion interface{} // BastionHostListerExpansion allows custom methods to be added to // BastionHostLister. type BastionHostListerExpansion interface{} // BastionHostNamespaceListerExpansion allows custom methods to be added to // BastionHostNamespaceLister. type BastionHostNamespaceListerExpansion interface{} // BatchAccountListerExpansion allows custom methods to be added to // BatchAccountLister. type BatchAccountListerExpansion interface{} // BatchAccountNamespaceListerExpansion allows custom methods to be added to // BatchAccountNamespaceLister. type BatchAccountNamespaceListerExpansion interface{} // BatchApplicationListerExpansion allows custom methods to be added to // BatchApplicationLister. type BatchApplicationListerExpansion interface{} // BatchApplicationNamespaceListerExpansion allows custom methods to be added to // BatchApplicationNamespaceLister. type BatchApplicationNamespaceListerExpansion interface{} // BatchCertificateListerExpansion allows custom methods to be added to // BatchCertificateLister. type BatchCertificateListerExpansion interface{} // BatchCertificateNamespaceListerExpansion allows custom methods to be added to // BatchCertificateNamespaceLister. type BatchCertificateNamespaceListerExpansion interface{} // BatchPoolListerExpansion allows custom methods to be added to // BatchPoolLister. type BatchPoolListerExpansion interface{} // BatchPoolNamespaceListerExpansion allows custom methods to be added to // BatchPoolNamespaceLister. type BatchPoolNamespaceListerExpansion interface{} // BotChannelEmailListerExpansion allows custom methods to be added to // BotChannelEmailLister. type BotChannelEmailListerExpansion interface{} // BotChannelEmailNamespaceListerExpansion allows custom methods to be added to // BotChannelEmailNamespaceLister. type BotChannelEmailNamespaceListerExpansion interface{} // BotChannelMsTeamsListerExpansion allows custom methods to be added to // BotChannelMsTeamsLister. type BotChannelMsTeamsListerExpansion interface{} // BotChannelMsTeamsNamespaceListerExpansion allows custom methods to be added to // BotChannelMsTeamsNamespaceLister. type BotChannelMsTeamsNamespaceListerExpansion interface{} // BotChannelSlackListerExpansion allows custom methods to be added to // BotChannelSlackLister. type BotChannelSlackListerExpansion interface{} // BotChannelSlackNamespaceListerExpansion allows custom methods to be added to // BotChannelSlackNamespaceLister. type BotChannelSlackNamespaceListerExpansion interface{} // BotChannelsRegistrationListerExpansion allows custom methods to be added to // BotChannelsRegistrationLister. type BotChannelsRegistrationListerExpansion interface{} // BotChannelsRegistrationNamespaceListerExpansion allows custom methods to be added to // BotChannelsRegistrationNamespaceLister. type BotChannelsRegistrationNamespaceListerExpansion interface{} // BotConnectionListerExpansion allows custom methods to be added to // BotConnectionLister. type BotConnectionListerExpansion interface{} // BotConnectionNamespaceListerExpansion allows custom methods to be added to // BotConnectionNamespaceLister. type BotConnectionNamespaceListerExpansion interface{} // BotWebAppListerExpansion allows custom methods to be added to // BotWebAppLister. type BotWebAppListerExpansion interface{} // BotWebAppNamespaceListerExpansion allows custom methods to be added to // BotWebAppNamespaceLister. type BotWebAppNamespaceListerExpansion interface{} // CdnEndpointListerExpansion allows custom methods to be added to // CdnEndpointLister. type CdnEndpointListerExpansion interface{} // CdnEndpointNamespaceListerExpansion allows custom methods to be added to // CdnEndpointNamespaceLister. type CdnEndpointNamespaceListerExpansion interface{} // CdnProfileListerExpansion allows custom methods to be added to // CdnProfileLister. type CdnProfileListerExpansion interface{} // CdnProfileNamespaceListerExpansion allows custom methods to be added to // CdnProfileNamespaceLister. type CdnProfileNamespaceListerExpansion interface{} // CognitiveAccountListerExpansion allows custom methods to be added to // CognitiveAccountLister. type CognitiveAccountListerExpansion interface{} // CognitiveAccountNamespaceListerExpansion allows custom methods to be added to // CognitiveAccountNamespaceLister. type CognitiveAccountNamespaceListerExpansion interface{} // ConnectionMonitorListerExpansion allows custom methods to be added to // ConnectionMonitorLister. type ConnectionMonitorListerExpansion interface{} // ConnectionMonitorNamespaceListerExpansion allows custom methods to be added to // ConnectionMonitorNamespaceLister. type ConnectionMonitorNamespaceListerExpansion interface{} // ContainerGroupListerExpansion allows custom methods to be added to // ContainerGroupLister. type ContainerGroupListerExpansion interface{} // ContainerGroupNamespaceListerExpansion allows custom methods to be added to // ContainerGroupNamespaceLister. type ContainerGroupNamespaceListerExpansion interface{} // ContainerRegistryListerExpansion allows custom methods to be added to // ContainerRegistryLister. type ContainerRegistryListerExpansion interface{} // ContainerRegistryNamespaceListerExpansion allows custom methods to be added to // ContainerRegistryNamespaceLister. type ContainerRegistryNamespaceListerExpansion interface{} // ContainerRegistryWebhookListerExpansion allows custom methods to be added to // ContainerRegistryWebhookLister. type ContainerRegistryWebhookListerExpansion interface{} // ContainerRegistryWebhookNamespaceListerExpansion allows custom methods to be added to // ContainerRegistryWebhookNamespaceLister. type ContainerRegistryWebhookNamespaceListerExpansion interface{} // ContainerServiceListerExpansion allows custom methods to be added to // ContainerServiceLister. type ContainerServiceListerExpansion interface{} // ContainerServiceNamespaceListerExpansion allows custom methods to be added to // ContainerServiceNamespaceLister. type ContainerServiceNamespaceListerExpansion interface{} // CosmosdbAccountListerExpansion allows custom methods to be added to // CosmosdbAccountLister. type CosmosdbAccountListerExpansion interface{} // CosmosdbAccountNamespaceListerExpansion allows custom methods to be added to // CosmosdbAccountNamespaceLister. type CosmosdbAccountNamespaceListerExpansion interface{} // CosmosdbCassandraKeyspaceListerExpansion allows custom methods to be added to // CosmosdbCassandraKeyspaceLister. type CosmosdbCassandraKeyspaceListerExpansion interface{} // CosmosdbCassandraKeyspaceNamespaceListerExpansion allows custom methods to be added to // CosmosdbCassandraKeyspaceNamespaceLister. type CosmosdbCassandraKeyspaceNamespaceListerExpansion interface{} // CosmosdbGremlinDatabaseListerExpansion allows custom methods to be added to // CosmosdbGremlinDatabaseLister. type CosmosdbGremlinDatabaseListerExpansion interface{} // CosmosdbGremlinDatabaseNamespaceListerExpansion allows custom methods to be added to // CosmosdbGremlinDatabaseNamespaceLister. type CosmosdbGremlinDatabaseNamespaceListerExpansion interface{} // CosmosdbGremlinGraphListerExpansion allows custom methods to be added to // CosmosdbGremlinGraphLister. type CosmosdbGremlinGraphListerExpansion interface{} // CosmosdbGremlinGraphNamespaceListerExpansion allows custom methods to be added to // CosmosdbGremlinGraphNamespaceLister. type CosmosdbGremlinGraphNamespaceListerExpansion interface{} // CosmosdbMongoCollectionListerExpansion allows custom methods to be added to // CosmosdbMongoCollectionLister. type CosmosdbMongoCollectionListerExpansion interface{} // CosmosdbMongoCollectionNamespaceListerExpansion allows custom methods to be added to // CosmosdbMongoCollectionNamespaceLister. type CosmosdbMongoCollectionNamespaceListerExpansion interface{} // CosmosdbMongoDatabaseListerExpansion allows custom methods to be added to // CosmosdbMongoDatabaseLister. type CosmosdbMongoDatabaseListerExpansion interface{} // CosmosdbMongoDatabaseNamespaceListerExpansion allows custom methods to be added to // CosmosdbMongoDatabaseNamespaceLister. type CosmosdbMongoDatabaseNamespaceListerExpansion interface{} // CosmosdbSQLContainerListerExpansion allows custom methods to be added to // CosmosdbSQLContainerLister. type CosmosdbSQLContainerListerExpansion interface{} // CosmosdbSQLContainerNamespaceListerExpansion allows custom methods to be added to // CosmosdbSQLContainerNamespaceLister. type CosmosdbSQLContainerNamespaceListerExpansion interface{} // CosmosdbSQLDatabaseListerExpansion allows custom methods to be added to // CosmosdbSQLDatabaseLister. type CosmosdbSQLDatabaseListerExpansion interface{} // CosmosdbSQLDatabaseNamespaceListerExpansion allows custom methods to be added to // CosmosdbSQLDatabaseNamespaceLister. type CosmosdbSQLDatabaseNamespaceListerExpansion interface{} // CosmosdbTableListerExpansion allows custom methods to be added to // CosmosdbTableLister. type CosmosdbTableListerExpansion interface{} // CosmosdbTableNamespaceListerExpansion allows custom methods to be added to // CosmosdbTableNamespaceLister. type CosmosdbTableNamespaceListerExpansion interface{} // DashboardListerExpansion allows custom methods to be added to // DashboardLister. type DashboardListerExpansion interface{} // DashboardNamespaceListerExpansion allows custom methods to be added to // DashboardNamespaceLister. type DashboardNamespaceListerExpansion interface{} // DataFactoryListerExpansion allows custom methods to be added to // DataFactoryLister. type DataFactoryListerExpansion interface{} // DataFactoryNamespaceListerExpansion allows custom methods to be added to // DataFactoryNamespaceLister. type DataFactoryNamespaceListerExpansion interface{} // DataFactoryDatasetMysqlListerExpansion allows custom methods to be added to // DataFactoryDatasetMysqlLister. type DataFactoryDatasetMysqlListerExpansion interface{} // DataFactoryDatasetMysqlNamespaceListerExpansion allows custom methods to be added to // DataFactoryDatasetMysqlNamespaceLister. type DataFactoryDatasetMysqlNamespaceListerExpansion interface{} // DataFactoryDatasetPostgresqlListerExpansion allows custom methods to be added to // DataFactoryDatasetPostgresqlLister. type DataFactoryDatasetPostgresqlListerExpansion interface{} // DataFactoryDatasetPostgresqlNamespaceListerExpansion allows custom methods to be added to // DataFactoryDatasetPostgresqlNamespaceLister. type DataFactoryDatasetPostgresqlNamespaceListerExpansion interface{} // DataFactoryDatasetSQLServerTableListerExpansion allows custom methods to be added to // DataFactoryDatasetSQLServerTableLister. type DataFactoryDatasetSQLServerTableListerExpansion interface{} // DataFactoryDatasetSQLServerTableNamespaceListerExpansion allows custom methods to be added to // DataFactoryDatasetSQLServerTableNamespaceLister. type DataFactoryDatasetSQLServerTableNamespaceListerExpansion interface{} // DataFactoryIntegrationRuntimeManagedListerExpansion allows custom methods to be added to // DataFactoryIntegrationRuntimeManagedLister. type DataFactoryIntegrationRuntimeManagedListerExpansion interface{} // DataFactoryIntegrationRuntimeManagedNamespaceListerExpansion allows custom methods to be added to // DataFactoryIntegrationRuntimeManagedNamespaceLister. type DataFactoryIntegrationRuntimeManagedNamespaceListerExpansion interface{} // DataFactoryLinkedServiceDataLakeStorageGen2ListerExpansion allows custom methods to be added to // DataFactoryLinkedServiceDataLakeStorageGen2Lister. type DataFactoryLinkedServiceDataLakeStorageGen2ListerExpansion interface{} // DataFactoryLinkedServiceDataLakeStorageGen2NamespaceListerExpansion allows custom methods to be added to // DataFactoryLinkedServiceDataLakeStorageGen2NamespaceLister. type DataFactoryLinkedServiceDataLakeStorageGen2NamespaceListerExpansion interface{} // DataFactoryLinkedServiceMysqlListerExpansion allows custom methods to be added to // DataFactoryLinkedServiceMysqlLister. type DataFactoryLinkedServiceMysqlListerExpansion interface{} // DataFactoryLinkedServiceMysqlNamespaceListerExpansion allows custom methods to be added to // DataFactoryLinkedServiceMysqlNamespaceLister. type DataFactoryLinkedServiceMysqlNamespaceListerExpansion interface{} // DataFactoryLinkedServicePostgresqlListerExpansion allows custom methods to be added to // DataFactoryLinkedServicePostgresqlLister. type DataFactoryLinkedServicePostgresqlListerExpansion interface{} // DataFactoryLinkedServicePostgresqlNamespaceListerExpansion allows custom methods to be added to // DataFactoryLinkedServicePostgresqlNamespaceLister. type DataFactoryLinkedServicePostgresqlNamespaceListerExpansion interface{} // DataFactoryLinkedServiceSQLServerListerExpansion allows custom methods to be added to // DataFactoryLinkedServiceSQLServerLister. type DataFactoryLinkedServiceSQLServerListerExpansion interface{} // DataFactoryLinkedServiceSQLServerNamespaceListerExpansion allows custom methods to be added to // DataFactoryLinkedServiceSQLServerNamespaceLister. type DataFactoryLinkedServiceSQLServerNamespaceListerExpansion interface{} // DataFactoryPipelineListerExpansion allows custom methods to be added to // DataFactoryPipelineLister. type DataFactoryPipelineListerExpansion interface{} // DataFactoryPipelineNamespaceListerExpansion allows custom methods to be added to // DataFactoryPipelineNamespaceLister. type DataFactoryPipelineNamespaceListerExpansion interface{} // DataFactoryTriggerScheduleListerExpansion allows custom methods to be added to // DataFactoryTriggerScheduleLister. type DataFactoryTriggerScheduleListerExpansion interface{} // DataFactoryTriggerScheduleNamespaceListerExpansion allows custom methods to be added to // DataFactoryTriggerScheduleNamespaceLister. type DataFactoryTriggerScheduleNamespaceListerExpansion interface{} // DataLakeAnalyticsAccountListerExpansion allows custom methods to be added to // DataLakeAnalyticsAccountLister. type DataLakeAnalyticsAccountListerExpansion interface{} // DataLakeAnalyticsAccountNamespaceListerExpansion allows custom methods to be added to // DataLakeAnalyticsAccountNamespaceLister. type DataLakeAnalyticsAccountNamespaceListerExpansion interface{} // DataLakeAnalyticsFirewallRuleListerExpansion allows custom methods to be added to // DataLakeAnalyticsFirewallRuleLister. type DataLakeAnalyticsFirewallRuleListerExpansion interface{} // DataLakeAnalyticsFirewallRuleNamespaceListerExpansion allows custom methods to be added to // DataLakeAnalyticsFirewallRuleNamespaceLister. type DataLakeAnalyticsFirewallRuleNamespaceListerExpansion interface{} // DataLakeStoreListerExpansion allows custom methods to be added to // DataLakeStoreLister. type DataLakeStoreListerExpansion interface{} // DataLakeStoreNamespaceListerExpansion allows custom methods to be added to // DataLakeStoreNamespaceLister. type DataLakeStoreNamespaceListerExpansion interface{} // DataLakeStoreFileListerExpansion allows custom methods to be added to // DataLakeStoreFileLister. type DataLakeStoreFileListerExpansion interface{} // DataLakeStoreFileNamespaceListerExpansion allows custom methods to be added to // DataLakeStoreFileNamespaceLister. type DataLakeStoreFileNamespaceListerExpansion interface{} // DataLakeStoreFirewallRuleListerExpansion allows custom methods to be added to // DataLakeStoreFirewallRuleLister. type DataLakeStoreFirewallRuleListerExpansion interface{} // DataLakeStoreFirewallRuleNamespaceListerExpansion allows custom methods to be added to // DataLakeStoreFirewallRuleNamespaceLister. type DataLakeStoreFirewallRuleNamespaceListerExpansion interface{} // DatabricksWorkspaceListerExpansion allows custom methods to be added to // DatabricksWorkspaceLister. type DatabricksWorkspaceListerExpansion interface{} // DatabricksWorkspaceNamespaceListerExpansion allows custom methods to be added to // DatabricksWorkspaceNamespaceLister. type DatabricksWorkspaceNamespaceListerExpansion interface{} // DdosProtectionPlanListerExpansion allows custom methods to be added to // DdosProtectionPlanLister. type DdosProtectionPlanListerExpansion interface{} // DdosProtectionPlanNamespaceListerExpansion allows custom methods to be added to // DdosProtectionPlanNamespaceLister. type DdosProtectionPlanNamespaceListerExpansion interface{} // DedicatedHostListerExpansion allows custom methods to be added to // DedicatedHostLister. type DedicatedHostListerExpansion interface{} // DedicatedHostNamespaceListerExpansion allows custom methods to be added to // DedicatedHostNamespaceLister. type DedicatedHostNamespaceListerExpansion interface{} // DedicatedHostGroupListerExpansion allows custom methods to be added to // DedicatedHostGroupLister. type DedicatedHostGroupListerExpansion interface{} // DedicatedHostGroupNamespaceListerExpansion allows custom methods to be added to // DedicatedHostGroupNamespaceLister. type DedicatedHostGroupNamespaceListerExpansion interface{} // DevTestLabListerExpansion allows custom methods to be added to // DevTestLabLister. type DevTestLabListerExpansion interface{} // DevTestLabNamespaceListerExpansion allows custom methods to be added to // DevTestLabNamespaceLister. type DevTestLabNamespaceListerExpansion interface{} // DevTestLinuxVirtualMachineListerExpansion allows custom methods to be added to // DevTestLinuxVirtualMachineLister. type DevTestLinuxVirtualMachineListerExpansion interface{} // DevTestLinuxVirtualMachineNamespaceListerExpansion allows custom methods to be added to // DevTestLinuxVirtualMachineNamespaceLister. type DevTestLinuxVirtualMachineNamespaceListerExpansion interface{} // DevTestPolicyListerExpansion allows custom methods to be added to // DevTestPolicyLister. type DevTestPolicyListerExpansion interface{} // DevTestPolicyNamespaceListerExpansion allows custom methods to be added to // DevTestPolicyNamespaceLister. type DevTestPolicyNamespaceListerExpansion interface{} // DevTestScheduleListerExpansion allows custom methods to be added to // DevTestScheduleLister. type DevTestScheduleListerExpansion interface{} // DevTestScheduleNamespaceListerExpansion allows custom methods to be added to // DevTestScheduleNamespaceLister. type DevTestScheduleNamespaceListerExpansion interface{} // DevTestVirtualNetworkListerExpansion allows custom methods to be added to // DevTestVirtualNetworkLister. type DevTestVirtualNetworkListerExpansion interface{} // DevTestVirtualNetworkNamespaceListerExpansion allows custom methods to be added to // DevTestVirtualNetworkNamespaceLister. type DevTestVirtualNetworkNamespaceListerExpansion interface{} // DevTestWindowsVirtualMachineListerExpansion allows custom methods to be added to // DevTestWindowsVirtualMachineLister. type DevTestWindowsVirtualMachineListerExpansion interface{} // DevTestWindowsVirtualMachineNamespaceListerExpansion allows custom methods to be added to // DevTestWindowsVirtualMachineNamespaceLister. type DevTestWindowsVirtualMachineNamespaceListerExpansion interface{} // DevspaceControllerListerExpansion allows custom methods to be added to // DevspaceControllerLister. type DevspaceControllerListerExpansion interface{} // DevspaceControllerNamespaceListerExpansion allows custom methods to be added to // DevspaceControllerNamespaceLister. type DevspaceControllerNamespaceListerExpansion interface{} // DiskEncryptionSetListerExpansion allows custom methods to be added to // DiskEncryptionSetLister. type DiskEncryptionSetListerExpansion interface{} // DiskEncryptionSetNamespaceListerExpansion allows custom methods to be added to // DiskEncryptionSetNamespaceLister. type DiskEncryptionSetNamespaceListerExpansion interface{} // DnsARecordListerExpansion allows custom methods to be added to // DnsARecordLister. type DnsARecordListerExpansion interface{} // DnsARecordNamespaceListerExpansion allows custom methods to be added to // DnsARecordNamespaceLister. type DnsARecordNamespaceListerExpansion interface{} // DnsAaaaRecordListerExpansion allows custom methods to be added to // DnsAaaaRecordLister. type DnsAaaaRecordListerExpansion interface{} // DnsAaaaRecordNamespaceListerExpansion allows custom methods to be added to // DnsAaaaRecordNamespaceLister. type DnsAaaaRecordNamespaceListerExpansion interface{} // DnsCaaRecordListerExpansion allows custom methods to be added to // DnsCaaRecordLister. type DnsCaaRecordListerExpansion interface{} // DnsCaaRecordNamespaceListerExpansion allows custom methods to be added to // DnsCaaRecordNamespaceLister. type DnsCaaRecordNamespaceListerExpansion interface{} // DnsCnameRecordListerExpansion allows custom methods to be added to // DnsCnameRecordLister. type DnsCnameRecordListerExpansion interface{} // DnsCnameRecordNamespaceListerExpansion allows custom methods to be added to // DnsCnameRecordNamespaceLister. type DnsCnameRecordNamespaceListerExpansion interface{} // DnsMxRecordListerExpansion allows custom methods to be added to // DnsMxRecordLister. type DnsMxRecordListerExpansion interface{} // DnsMxRecordNamespaceListerExpansion allows custom methods to be added to // DnsMxRecordNamespaceLister. type DnsMxRecordNamespaceListerExpansion interface{} // DnsNsRecordListerExpansion allows custom methods to be added to // DnsNsRecordLister. type DnsNsRecordListerExpansion interface{} // DnsNsRecordNamespaceListerExpansion allows custom methods to be added to // DnsNsRecordNamespaceLister. type DnsNsRecordNamespaceListerExpansion interface{} // DnsPtrRecordListerExpansion allows custom methods to be added to // DnsPtrRecordLister. type DnsPtrRecordListerExpansion interface{} // DnsPtrRecordNamespaceListerExpansion allows custom methods to be added to // DnsPtrRecordNamespaceLister. type DnsPtrRecordNamespaceListerExpansion interface{} // DnsSrvRecordListerExpansion allows custom methods to be added to // DnsSrvRecordLister. type DnsSrvRecordListerExpansion interface{} // DnsSrvRecordNamespaceListerExpansion allows custom methods to be added to // DnsSrvRecordNamespaceLister. type DnsSrvRecordNamespaceListerExpansion interface{} // DnsTxtRecordListerExpansion allows custom methods to be added to // DnsTxtRecordLister. type DnsTxtRecordListerExpansion interface{} // DnsTxtRecordNamespaceListerExpansion allows custom methods to be added to // DnsTxtRecordNamespaceLister. type DnsTxtRecordNamespaceListerExpansion interface{} // DnsZoneListerExpansion allows custom methods to be added to // DnsZoneLister. type DnsZoneListerExpansion interface{} // DnsZoneNamespaceListerExpansion allows custom methods to be added to // DnsZoneNamespaceLister. type DnsZoneNamespaceListerExpansion interface{} // EventgridDomainListerExpansion allows custom methods to be added to // EventgridDomainLister. type EventgridDomainListerExpansion interface{} // EventgridDomainNamespaceListerExpansion allows custom methods to be added to // EventgridDomainNamespaceLister. type EventgridDomainNamespaceListerExpansion interface{} // EventgridEventSubscriptionListerExpansion allows custom methods to be added to // EventgridEventSubscriptionLister. type EventgridEventSubscriptionListerExpansion interface{} // EventgridEventSubscriptionNamespaceListerExpansion allows custom methods to be added to // EventgridEventSubscriptionNamespaceLister. type EventgridEventSubscriptionNamespaceListerExpansion interface{} // EventgridTopicListerExpansion allows custom methods to be added to // EventgridTopicLister. type EventgridTopicListerExpansion interface{} // EventgridTopicNamespaceListerExpansion allows custom methods to be added to // EventgridTopicNamespaceLister. type EventgridTopicNamespaceListerExpansion interface{} // EventhubListerExpansion allows custom methods to be added to // EventhubLister. type EventhubListerExpansion interface{} // EventhubNamespaceListerExpansion allows custom methods to be added to // EventhubNamespaceLister. type EventhubNamespaceListerExpansion interface{} // EventhubAuthorizationRuleListerExpansion allows custom methods to be added to // EventhubAuthorizationRuleLister. type EventhubAuthorizationRuleListerExpansion interface{} // EventhubAuthorizationRuleNamespaceListerExpansion allows custom methods to be added to // EventhubAuthorizationRuleNamespaceLister. type EventhubAuthorizationRuleNamespaceListerExpansion interface{} // EventhubConsumerGroupListerExpansion allows custom methods to be added to // EventhubConsumerGroupLister. type EventhubConsumerGroupListerExpansion interface{} // EventhubConsumerGroupNamespaceListerExpansion allows custom methods to be added to // EventhubConsumerGroupNamespaceLister. type EventhubConsumerGroupNamespaceListerExpansion interface{} // EventhubNamespaceAuthorizationRuleListerExpansion allows custom methods to be added to // EventhubNamespaceAuthorizationRuleLister. type EventhubNamespaceAuthorizationRuleListerExpansion interface{} // EventhubNamespaceAuthorizationRuleNamespaceListerExpansion allows custom methods to be added to // EventhubNamespaceAuthorizationRuleNamespaceLister. type EventhubNamespaceAuthorizationRuleNamespaceListerExpansion interface{} // EventhubNamespaceDisasterRecoveryConfigListerExpansion allows custom methods to be added to // EventhubNamespaceDisasterRecoveryConfigLister. type EventhubNamespaceDisasterRecoveryConfigListerExpansion interface{} // EventhubNamespaceDisasterRecoveryConfigNamespaceListerExpansion allows custom methods to be added to // EventhubNamespaceDisasterRecoveryConfigNamespaceLister. type EventhubNamespaceDisasterRecoveryConfigNamespaceListerExpansion interface{} // EventhubNamespace_ListerExpansion allows custom methods to be added to // EventhubNamespace_Lister. type EventhubNamespace_ListerExpansion interface{} // EventhubNamespace_NamespaceListerExpansion allows custom methods to be added to // EventhubNamespace_NamespaceLister. type EventhubNamespace_NamespaceListerExpansion interface{} // ExpressRouteCircuitListerExpansion allows custom methods to be added to // ExpressRouteCircuitLister. type ExpressRouteCircuitListerExpansion interface{} // ExpressRouteCircuitNamespaceListerExpansion allows custom methods to be added to // ExpressRouteCircuitNamespaceLister. type ExpressRouteCircuitNamespaceListerExpansion interface{} // ExpressRouteCircuitAuthorizationListerExpansion allows custom methods to be added to // ExpressRouteCircuitAuthorizationLister. type ExpressRouteCircuitAuthorizationListerExpansion interface{} // ExpressRouteCircuitAuthorizationNamespaceListerExpansion allows custom methods to be added to // ExpressRouteCircuitAuthorizationNamespaceLister. type ExpressRouteCircuitAuthorizationNamespaceListerExpansion interface{} // ExpressRouteCircuitPeeringListerExpansion allows custom methods to be added to // ExpressRouteCircuitPeeringLister. type ExpressRouteCircuitPeeringListerExpansion interface{} // ExpressRouteCircuitPeeringNamespaceListerExpansion allows custom methods to be added to // ExpressRouteCircuitPeeringNamespaceLister. type ExpressRouteCircuitPeeringNamespaceListerExpansion interface{} // FirewallListerExpansion allows custom methods to be added to // FirewallLister. type FirewallListerExpansion interface{} // FirewallNamespaceListerExpansion allows custom methods to be added to // FirewallNamespaceLister. type FirewallNamespaceListerExpansion interface{} // FirewallApplicationRuleCollectionListerExpansion allows custom methods to be added to // FirewallApplicationRuleCollectionLister. type FirewallApplicationRuleCollectionListerExpansion interface{} // FirewallApplicationRuleCollectionNamespaceListerExpansion allows custom methods to be added to // FirewallApplicationRuleCollectionNamespaceLister. type FirewallApplicationRuleCollectionNamespaceListerExpansion interface{} // FirewallNATRuleCollectionListerExpansion allows custom methods to be added to // FirewallNATRuleCollectionLister. type FirewallNATRuleCollectionListerExpansion interface{} // FirewallNATRuleCollectionNamespaceListerExpansion allows custom methods to be added to // FirewallNATRuleCollectionNamespaceLister. type FirewallNATRuleCollectionNamespaceListerExpansion interface{} // FirewallNetworkRuleCollectionListerExpansion allows custom methods to be added to // FirewallNetworkRuleCollectionLister. type FirewallNetworkRuleCollectionListerExpansion interface{} // FirewallNetworkRuleCollectionNamespaceListerExpansion allows custom methods to be added to // FirewallNetworkRuleCollectionNamespaceLister. type FirewallNetworkRuleCollectionNamespaceListerExpansion interface{} // FrontdoorListerExpansion allows custom methods to be added to // FrontdoorLister. type FrontdoorListerExpansion interface{} // FrontdoorNamespaceListerExpansion allows custom methods to be added to // FrontdoorNamespaceLister. type FrontdoorNamespaceListerExpansion interface{} // FrontdoorFirewallPolicyListerExpansion allows custom methods to be added to // FrontdoorFirewallPolicyLister. type FrontdoorFirewallPolicyListerExpansion interface{} // FrontdoorFirewallPolicyNamespaceListerExpansion allows custom methods to be added to // FrontdoorFirewallPolicyNamespaceLister. type FrontdoorFirewallPolicyNamespaceListerExpansion interface{} // FunctionAppListerExpansion allows custom methods to be added to // FunctionAppLister. type FunctionAppListerExpansion interface{} // FunctionAppNamespaceListerExpansion allows custom methods to be added to // FunctionAppNamespaceLister. type FunctionAppNamespaceListerExpansion interface{} // HdinsightHadoopClusterListerExpansion allows custom methods to be added to // HdinsightHadoopClusterLister. type HdinsightHadoopClusterListerExpansion interface{} // HdinsightHadoopClusterNamespaceListerExpansion allows custom methods to be added to // HdinsightHadoopClusterNamespaceLister. type HdinsightHadoopClusterNamespaceListerExpansion interface{} // HdinsightHbaseClusterListerExpansion allows custom methods to be added to // HdinsightHbaseClusterLister. type HdinsightHbaseClusterListerExpansion interface{} // HdinsightHbaseClusterNamespaceListerExpansion allows custom methods to be added to // HdinsightHbaseClusterNamespaceLister. type HdinsightHbaseClusterNamespaceListerExpansion interface{} // HdinsightInteractiveQueryClusterListerExpansion allows custom methods to be added to // HdinsightInteractiveQueryClusterLister. type HdinsightInteractiveQueryClusterListerExpansion interface{} // HdinsightInteractiveQueryClusterNamespaceListerExpansion allows custom methods to be added to // HdinsightInteractiveQueryClusterNamespaceLister. type HdinsightInteractiveQueryClusterNamespaceListerExpansion interface{} // HdinsightKafkaClusterListerExpansion allows custom methods to be added to // HdinsightKafkaClusterLister. type HdinsightKafkaClusterListerExpansion interface{} // HdinsightKafkaClusterNamespaceListerExpansion allows custom methods to be added to // HdinsightKafkaClusterNamespaceLister. type HdinsightKafkaClusterNamespaceListerExpansion interface{} // HdinsightMlServicesClusterListerExpansion allows custom methods to be added to // HdinsightMlServicesClusterLister. type HdinsightMlServicesClusterListerExpansion interface{} // HdinsightMlServicesClusterNamespaceListerExpansion allows custom methods to be added to // HdinsightMlServicesClusterNamespaceLister. type HdinsightMlServicesClusterNamespaceListerExpansion interface{} // HdinsightRserverClusterListerExpansion allows custom methods to be added to // HdinsightRserverClusterLister. type HdinsightRserverClusterListerExpansion interface{} // HdinsightRserverClusterNamespaceListerExpansion allows custom methods to be added to // HdinsightRserverClusterNamespaceLister. type HdinsightRserverClusterNamespaceListerExpansion interface{} // HdinsightSparkClusterListerExpansion allows custom methods to be added to // HdinsightSparkClusterLister. type HdinsightSparkClusterListerExpansion interface{} // HdinsightSparkClusterNamespaceListerExpansion allows custom methods to be added to // HdinsightSparkClusterNamespaceLister. type HdinsightSparkClusterNamespaceListerExpansion interface{} // HdinsightStormClusterListerExpansion allows custom methods to be added to // HdinsightStormClusterLister. type HdinsightStormClusterListerExpansion interface{} // HdinsightStormClusterNamespaceListerExpansion allows custom methods to be added to // HdinsightStormClusterNamespaceLister. type HdinsightStormClusterNamespaceListerExpansion interface{} // HealthcareServiceListerExpansion allows custom methods to be added to // HealthcareServiceLister. type HealthcareServiceListerExpansion interface{} // HealthcareServiceNamespaceListerExpansion allows custom methods to be added to // HealthcareServiceNamespaceLister. type HealthcareServiceNamespaceListerExpansion interface{} // ImageListerExpansion allows custom methods to be added to // ImageLister. type ImageListerExpansion interface{} // ImageNamespaceListerExpansion allows custom methods to be added to // ImageNamespaceLister. type ImageNamespaceListerExpansion interface{} // IotDpsListerExpansion allows custom methods to be added to // IotDpsLister. type IotDpsListerExpansion interface{} // IotDpsNamespaceListerExpansion allows custom methods to be added to // IotDpsNamespaceLister. type IotDpsNamespaceListerExpansion interface{} // IotDpsCertificateListerExpansion allows custom methods to be added to // IotDpsCertificateLister. type IotDpsCertificateListerExpansion interface{} // IotDpsCertificateNamespaceListerExpansion allows custom methods to be added to // IotDpsCertificateNamespaceLister. type IotDpsCertificateNamespaceListerExpansion interface{} // IothubListerExpansion allows custom methods to be added to // IothubLister. type IothubListerExpansion interface{} // IothubNamespaceListerExpansion allows custom methods to be added to // IothubNamespaceLister. type IothubNamespaceListerExpansion interface{} // IothubConsumerGroupListerExpansion allows custom methods to be added to // IothubConsumerGroupLister. type IothubConsumerGroupListerExpansion interface{} // IothubConsumerGroupNamespaceListerExpansion allows custom methods to be added to // IothubConsumerGroupNamespaceLister. type IothubConsumerGroupNamespaceListerExpansion interface{} // IothubDpsListerExpansion allows custom methods to be added to // IothubDpsLister. type IothubDpsListerExpansion interface{} // IothubDpsNamespaceListerExpansion allows custom methods to be added to // IothubDpsNamespaceLister. type IothubDpsNamespaceListerExpansion interface{} // IothubDpsCertificateListerExpansion allows custom methods to be added to // IothubDpsCertificateLister. type IothubDpsCertificateListerExpansion interface{} // IothubDpsCertificateNamespaceListerExpansion allows custom methods to be added to // IothubDpsCertificateNamespaceLister. type IothubDpsCertificateNamespaceListerExpansion interface{} // IothubDpsSharedAccessPolicyListerExpansion allows custom methods to be added to // IothubDpsSharedAccessPolicyLister. type IothubDpsSharedAccessPolicyListerExpansion interface{} // IothubDpsSharedAccessPolicyNamespaceListerExpansion allows custom methods to be added to // IothubDpsSharedAccessPolicyNamespaceLister. type IothubDpsSharedAccessPolicyNamespaceListerExpansion interface{} // IothubEndpointEventhubListerExpansion allows custom methods to be added to // IothubEndpointEventhubLister. type IothubEndpointEventhubListerExpansion interface{} // IothubEndpointEventhubNamespaceListerExpansion allows custom methods to be added to // IothubEndpointEventhubNamespaceLister. type IothubEndpointEventhubNamespaceListerExpansion interface{} // IothubEndpointServicebusQueueListerExpansion allows custom methods to be added to // IothubEndpointServicebusQueueLister. type IothubEndpointServicebusQueueListerExpansion interface{} // IothubEndpointServicebusQueueNamespaceListerExpansion allows custom methods to be added to // IothubEndpointServicebusQueueNamespaceLister. type IothubEndpointServicebusQueueNamespaceListerExpansion interface{} // IothubEndpointServicebusTopicListerExpansion allows custom methods to be added to // IothubEndpointServicebusTopicLister. type IothubEndpointServicebusTopicListerExpansion interface{} // IothubEndpointServicebusTopicNamespaceListerExpansion allows custom methods to be added to // IothubEndpointServicebusTopicNamespaceLister. type IothubEndpointServicebusTopicNamespaceListerExpansion interface{} // IothubEndpointStorageContainerListerExpansion allows custom methods to be added to // IothubEndpointStorageContainerLister. type IothubEndpointStorageContainerListerExpansion interface{} // IothubEndpointStorageContainerNamespaceListerExpansion allows custom methods to be added to // IothubEndpointStorageContainerNamespaceLister. type IothubEndpointStorageContainerNamespaceListerExpansion interface{} // IothubFallbackRouteListerExpansion allows custom methods to be added to // IothubFallbackRouteLister. type IothubFallbackRouteListerExpansion interface{} // IothubFallbackRouteNamespaceListerExpansion allows custom methods to be added to // IothubFallbackRouteNamespaceLister. type IothubFallbackRouteNamespaceListerExpansion interface{} // IothubRouteListerExpansion allows custom methods to be added to // IothubRouteLister. type IothubRouteListerExpansion interface{} // IothubRouteNamespaceListerExpansion allows custom methods to be added to // IothubRouteNamespaceLister. type IothubRouteNamespaceListerExpansion interface{} // IothubSharedAccessPolicyListerExpansion allows custom methods to be added to // IothubSharedAccessPolicyLister. type IothubSharedAccessPolicyListerExpansion interface{} // IothubSharedAccessPolicyNamespaceListerExpansion allows custom methods to be added to // IothubSharedAccessPolicyNamespaceLister. type IothubSharedAccessPolicyNamespaceListerExpansion interface{} // KeyVaultListerExpansion allows custom methods to be added to // KeyVaultLister. type KeyVaultListerExpansion interface{} // KeyVaultNamespaceListerExpansion allows custom methods to be added to // KeyVaultNamespaceLister. type KeyVaultNamespaceListerExpansion interface{} // KeyVaultAccessPolicyListerExpansion allows custom methods to be added to // KeyVaultAccessPolicyLister. type KeyVaultAccessPolicyListerExpansion interface{} // KeyVaultAccessPolicyNamespaceListerExpansion allows custom methods to be added to // KeyVaultAccessPolicyNamespaceLister. type KeyVaultAccessPolicyNamespaceListerExpansion interface{} // KeyVaultCertificateListerExpansion allows custom methods to be added to // KeyVaultCertificateLister. type KeyVaultCertificateListerExpansion interface{} // KeyVaultCertificateNamespaceListerExpansion allows custom methods to be added to // KeyVaultCertificateNamespaceLister. type KeyVaultCertificateNamespaceListerExpansion interface{} // KeyVaultKeyListerExpansion allows custom methods to be added to // KeyVaultKeyLister. type KeyVaultKeyListerExpansion interface{} // KeyVaultKeyNamespaceListerExpansion allows custom methods to be added to // KeyVaultKeyNamespaceLister. type KeyVaultKeyNamespaceListerExpansion interface{} // KeyVaultSecretListerExpansion allows custom methods to be added to // KeyVaultSecretLister. type KeyVaultSecretListerExpansion interface{} // KeyVaultSecretNamespaceListerExpansion allows custom methods to be added to // KeyVaultSecretNamespaceLister. type KeyVaultSecretNamespaceListerExpansion interface{} // KubernetesClusterListerExpansion allows custom methods to be added to // KubernetesClusterLister. type KubernetesClusterListerExpansion interface{} // KubernetesClusterNamespaceListerExpansion allows custom methods to be added to // KubernetesClusterNamespaceLister. type KubernetesClusterNamespaceListerExpansion interface{} // KubernetesClusterNodePoolListerExpansion allows custom methods to be added to // KubernetesClusterNodePoolLister. type KubernetesClusterNodePoolListerExpansion interface{} // KubernetesClusterNodePoolNamespaceListerExpansion allows custom methods to be added to // KubernetesClusterNodePoolNamespaceLister. type KubernetesClusterNodePoolNamespaceListerExpansion interface{} // KustoClusterListerExpansion allows custom methods to be added to // KustoClusterLister. type KustoClusterListerExpansion interface{} // KustoClusterNamespaceListerExpansion allows custom methods to be added to // KustoClusterNamespaceLister. type KustoClusterNamespaceListerExpansion interface{} // KustoDatabaseListerExpansion allows custom methods to be added to // KustoDatabaseLister. type KustoDatabaseListerExpansion interface{} // KustoDatabaseNamespaceListerExpansion allows custom methods to be added to // KustoDatabaseNamespaceLister. type KustoDatabaseNamespaceListerExpansion interface{} // KustoDatabasePrincipalListerExpansion allows custom methods to be added to // KustoDatabasePrincipalLister. type KustoDatabasePrincipalListerExpansion interface{} // KustoDatabasePrincipalNamespaceListerExpansion allows custom methods to be added to // KustoDatabasePrincipalNamespaceLister. type KustoDatabasePrincipalNamespaceListerExpansion interface{} // KustoEventhubDataConnectionListerExpansion allows custom methods to be added to // KustoEventhubDataConnectionLister. type KustoEventhubDataConnectionListerExpansion interface{} // KustoEventhubDataConnectionNamespaceListerExpansion allows custom methods to be added to // KustoEventhubDataConnectionNamespaceLister. type KustoEventhubDataConnectionNamespaceListerExpansion interface{} // LbListerExpansion allows custom methods to be added to // LbLister. type LbListerExpansion interface{} // LbNamespaceListerExpansion allows custom methods to be added to // LbNamespaceLister. type LbNamespaceListerExpansion interface{} // LbBackendAddressPoolListerExpansion allows custom methods to be added to // LbBackendAddressPoolLister. type LbBackendAddressPoolListerExpansion interface{} // LbBackendAddressPoolNamespaceListerExpansion allows custom methods to be added to // LbBackendAddressPoolNamespaceLister. type LbBackendAddressPoolNamespaceListerExpansion interface{} // LbNATPoolListerExpansion allows custom methods to be added to // LbNATPoolLister. type LbNATPoolListerExpansion interface{} // LbNATPoolNamespaceListerExpansion allows custom methods to be added to // LbNATPoolNamespaceLister. type LbNATPoolNamespaceListerExpansion interface{} // LbNATRuleListerExpansion allows custom methods to be added to // LbNATRuleLister. type LbNATRuleListerExpansion interface{} // LbNATRuleNamespaceListerExpansion allows custom methods to be added to // LbNATRuleNamespaceLister. type LbNATRuleNamespaceListerExpansion interface{} // LbOutboundRuleListerExpansion allows custom methods to be added to // LbOutboundRuleLister. type LbOutboundRuleListerExpansion interface{} // LbOutboundRuleNamespaceListerExpansion allows custom methods to be added to // LbOutboundRuleNamespaceLister. type LbOutboundRuleNamespaceListerExpansion interface{} // LbProbeListerExpansion allows custom methods to be added to // LbProbeLister. type LbProbeListerExpansion interface{} // LbProbeNamespaceListerExpansion allows custom methods to be added to // LbProbeNamespaceLister. type LbProbeNamespaceListerExpansion interface{} // LbRuleListerExpansion allows custom methods to be added to // LbRuleLister. type LbRuleListerExpansion interface{} // LbRuleNamespaceListerExpansion allows custom methods to be added to // LbRuleNamespaceLister. type LbRuleNamespaceListerExpansion interface{} // LocalNetworkGatewayListerExpansion allows custom methods to be added to // LocalNetworkGatewayLister. type LocalNetworkGatewayListerExpansion interface{} // LocalNetworkGatewayNamespaceListerExpansion allows custom methods to be added to // LocalNetworkGatewayNamespaceLister. type LocalNetworkGatewayNamespaceListerExpansion interface{} // LogAnalyticsLinkedServiceListerExpansion allows custom methods to be added to // LogAnalyticsLinkedServiceLister. type LogAnalyticsLinkedServiceListerExpansion interface{} // LogAnalyticsLinkedServiceNamespaceListerExpansion allows custom methods to be added to // LogAnalyticsLinkedServiceNamespaceLister. type LogAnalyticsLinkedServiceNamespaceListerExpansion interface{} // LogAnalyticsSolutionListerExpansion allows custom methods to be added to // LogAnalyticsSolutionLister. type LogAnalyticsSolutionListerExpansion interface{} // LogAnalyticsSolutionNamespaceListerExpansion allows custom methods to be added to // LogAnalyticsSolutionNamespaceLister. type LogAnalyticsSolutionNamespaceListerExpansion interface{} // LogAnalyticsWorkspaceListerExpansion allows custom methods to be added to // LogAnalyticsWorkspaceLister. type LogAnalyticsWorkspaceListerExpansion interface{} // LogAnalyticsWorkspaceNamespaceListerExpansion allows custom methods to be added to // LogAnalyticsWorkspaceNamespaceLister. type LogAnalyticsWorkspaceNamespaceListerExpansion interface{} // LogAnalyticsWorkspaceLinkedServiceListerExpansion allows custom methods to be added to // LogAnalyticsWorkspaceLinkedServiceLister. type LogAnalyticsWorkspaceLinkedServiceListerExpansion interface{} // LogAnalyticsWorkspaceLinkedServiceNamespaceListerExpansion allows custom methods to be added to // LogAnalyticsWorkspaceLinkedServiceNamespaceLister. type LogAnalyticsWorkspaceLinkedServiceNamespaceListerExpansion interface{} // LogicAppActionCustomListerExpansion allows custom methods to be added to // LogicAppActionCustomLister. type LogicAppActionCustomListerExpansion interface{} // LogicAppActionCustomNamespaceListerExpansion allows custom methods to be added to // LogicAppActionCustomNamespaceLister. type LogicAppActionCustomNamespaceListerExpansion interface{} // LogicAppActionHTTPListerExpansion allows custom methods to be added to // LogicAppActionHTTPLister. type LogicAppActionHTTPListerExpansion interface{} // LogicAppActionHTTPNamespaceListerExpansion allows custom methods to be added to // LogicAppActionHTTPNamespaceLister. type LogicAppActionHTTPNamespaceListerExpansion interface{} // LogicAppTriggerCustomListerExpansion allows custom methods to be added to // LogicAppTriggerCustomLister. type LogicAppTriggerCustomListerExpansion interface{} // LogicAppTriggerCustomNamespaceListerExpansion allows custom methods to be added to // LogicAppTriggerCustomNamespaceLister. type LogicAppTriggerCustomNamespaceListerExpansion interface{} // LogicAppTriggerHTTPRequestListerExpansion allows custom methods to be added to // LogicAppTriggerHTTPRequestLister. type LogicAppTriggerHTTPRequestListerExpansion interface{} // LogicAppTriggerHTTPRequestNamespaceListerExpansion allows custom methods to be added to // LogicAppTriggerHTTPRequestNamespaceLister. type LogicAppTriggerHTTPRequestNamespaceListerExpansion interface{} // LogicAppTriggerRecurrenceListerExpansion allows custom methods to be added to // LogicAppTriggerRecurrenceLister. type LogicAppTriggerRecurrenceListerExpansion interface{} // LogicAppTriggerRecurrenceNamespaceListerExpansion allows custom methods to be added to // LogicAppTriggerRecurrenceNamespaceLister. type LogicAppTriggerRecurrenceNamespaceListerExpansion interface{} // LogicAppWorkflowListerExpansion allows custom methods to be added to // LogicAppWorkflowLister. type LogicAppWorkflowListerExpansion interface{} // LogicAppWorkflowNamespaceListerExpansion allows custom methods to be added to // LogicAppWorkflowNamespaceLister. type LogicAppWorkflowNamespaceListerExpansion interface{} // ManagedDiskListerExpansion allows custom methods to be added to // ManagedDiskLister. type ManagedDiskListerExpansion interface{} // ManagedDiskNamespaceListerExpansion allows custom methods to be added to // ManagedDiskNamespaceLister. type ManagedDiskNamespaceListerExpansion interface{} // ManagementGroupListerExpansion allows custom methods to be added to // ManagementGroupLister. type ManagementGroupListerExpansion interface{} // ManagementGroupNamespaceListerExpansion allows custom methods to be added to // ManagementGroupNamespaceLister. type ManagementGroupNamespaceListerExpansion interface{} // ManagementLockListerExpansion allows custom methods to be added to // ManagementLockLister. type ManagementLockListerExpansion interface{} // ManagementLockNamespaceListerExpansion allows custom methods to be added to // ManagementLockNamespaceLister. type ManagementLockNamespaceListerExpansion interface{} // MapsAccountListerExpansion allows custom methods to be added to // MapsAccountLister. type MapsAccountListerExpansion interface{} // MapsAccountNamespaceListerExpansion allows custom methods to be added to // MapsAccountNamespaceLister. type MapsAccountNamespaceListerExpansion interface{} // MariadbConfigurationListerExpansion allows custom methods to be added to // MariadbConfigurationLister. type MariadbConfigurationListerExpansion interface{} // MariadbConfigurationNamespaceListerExpansion allows custom methods to be added to // MariadbConfigurationNamespaceLister. type MariadbConfigurationNamespaceListerExpansion interface{} // MariadbDatabaseListerExpansion allows custom methods to be added to // MariadbDatabaseLister. type MariadbDatabaseListerExpansion interface{} // MariadbDatabaseNamespaceListerExpansion allows custom methods to be added to // MariadbDatabaseNamespaceLister. type MariadbDatabaseNamespaceListerExpansion interface{} // MariadbFirewallRuleListerExpansion allows custom methods to be added to // MariadbFirewallRuleLister. type MariadbFirewallRuleListerExpansion interface{} // MariadbFirewallRuleNamespaceListerExpansion allows custom methods to be added to // MariadbFirewallRuleNamespaceLister. type MariadbFirewallRuleNamespaceListerExpansion interface{} // MariadbServerListerExpansion allows custom methods to be added to // MariadbServerLister. type MariadbServerListerExpansion interface{} // MariadbServerNamespaceListerExpansion allows custom methods to be added to // MariadbServerNamespaceLister. type MariadbServerNamespaceListerExpansion interface{} // MariadbVirtualNetworkRuleListerExpansion allows custom methods to be added to // MariadbVirtualNetworkRuleLister. type MariadbVirtualNetworkRuleListerExpansion interface{} // MariadbVirtualNetworkRuleNamespaceListerExpansion allows custom methods to be added to // MariadbVirtualNetworkRuleNamespaceLister. type MariadbVirtualNetworkRuleNamespaceListerExpansion interface{} // MarketplaceAgreementListerExpansion allows custom methods to be added to // MarketplaceAgreementLister. type MarketplaceAgreementListerExpansion interface{} // MarketplaceAgreementNamespaceListerExpansion allows custom methods to be added to // MarketplaceAgreementNamespaceLister. type MarketplaceAgreementNamespaceListerExpansion interface{} // MediaServicesAccountListerExpansion allows custom methods to be added to // MediaServicesAccountLister. type MediaServicesAccountListerExpansion interface{} // MediaServicesAccountNamespaceListerExpansion allows custom methods to be added to // MediaServicesAccountNamespaceLister. type MediaServicesAccountNamespaceListerExpansion interface{} // MetricAlertruleListerExpansion allows custom methods to be added to // MetricAlertruleLister. type MetricAlertruleListerExpansion interface{} // MetricAlertruleNamespaceListerExpansion allows custom methods to be added to // MetricAlertruleNamespaceLister. type MetricAlertruleNamespaceListerExpansion interface{} // MonitorActionGroupListerExpansion allows custom methods to be added to // MonitorActionGroupLister. type MonitorActionGroupListerExpansion interface{} // MonitorActionGroupNamespaceListerExpansion allows custom methods to be added to // MonitorActionGroupNamespaceLister. type MonitorActionGroupNamespaceListerExpansion interface{} // MonitorActivityLogAlertListerExpansion allows custom methods to be added to // MonitorActivityLogAlertLister. type MonitorActivityLogAlertListerExpansion interface{} // MonitorActivityLogAlertNamespaceListerExpansion allows custom methods to be added to // MonitorActivityLogAlertNamespaceLister. type MonitorActivityLogAlertNamespaceListerExpansion interface{} // MonitorAutoscaleSettingListerExpansion allows custom methods to be added to // MonitorAutoscaleSettingLister. type MonitorAutoscaleSettingListerExpansion interface{} // MonitorAutoscaleSettingNamespaceListerExpansion allows custom methods to be added to // MonitorAutoscaleSettingNamespaceLister. type MonitorAutoscaleSettingNamespaceListerExpansion interface{} // MonitorDiagnosticSettingListerExpansion allows custom methods to be added to // MonitorDiagnosticSettingLister. type MonitorDiagnosticSettingListerExpansion interface{} // MonitorDiagnosticSettingNamespaceListerExpansion allows custom methods to be added to // MonitorDiagnosticSettingNamespaceLister. type MonitorDiagnosticSettingNamespaceListerExpansion interface{} // MonitorLogProfileListerExpansion allows custom methods to be added to // MonitorLogProfileLister. type MonitorLogProfileListerExpansion interface{} // MonitorLogProfileNamespaceListerExpansion allows custom methods to be added to // MonitorLogProfileNamespaceLister. type MonitorLogProfileNamespaceListerExpansion interface{} // MonitorMetricAlertListerExpansion allows custom methods to be added to // MonitorMetricAlertLister. type MonitorMetricAlertListerExpansion interface{} // MonitorMetricAlertNamespaceListerExpansion allows custom methods to be added to // MonitorMetricAlertNamespaceLister. type MonitorMetricAlertNamespaceListerExpansion interface{} // MonitorMetricAlertruleListerExpansion allows custom methods to be added to // MonitorMetricAlertruleLister. type MonitorMetricAlertruleListerExpansion interface{} // MonitorMetricAlertruleNamespaceListerExpansion allows custom methods to be added to // MonitorMetricAlertruleNamespaceLister. type MonitorMetricAlertruleNamespaceListerExpansion interface{} // MssqlDatabaseVulnerabilityAssessmentRuleBaselineListerExpansion allows custom methods to be added to // MssqlDatabaseVulnerabilityAssessmentRuleBaselineLister. type MssqlDatabaseVulnerabilityAssessmentRuleBaselineListerExpansion interface{} // MssqlDatabaseVulnerabilityAssessmentRuleBaselineNamespaceListerExpansion allows custom methods to be added to // MssqlDatabaseVulnerabilityAssessmentRuleBaselineNamespaceLister. type MssqlDatabaseVulnerabilityAssessmentRuleBaselineNamespaceListerExpansion interface{} // MssqlElasticpoolListerExpansion allows custom methods to be added to // MssqlElasticpoolLister. type MssqlElasticpoolListerExpansion interface{} // MssqlElasticpoolNamespaceListerExpansion allows custom methods to be added to // MssqlElasticpoolNamespaceLister. type MssqlElasticpoolNamespaceListerExpansion interface{} // MssqlServerSecurityAlertPolicyListerExpansion allows custom methods to be added to // MssqlServerSecurityAlertPolicyLister. type MssqlServerSecurityAlertPolicyListerExpansion interface{} // MssqlServerSecurityAlertPolicyNamespaceListerExpansion allows custom methods to be added to // MssqlServerSecurityAlertPolicyNamespaceLister. type MssqlServerSecurityAlertPolicyNamespaceListerExpansion interface{} // MssqlServerVulnerabilityAssessmentListerExpansion allows custom methods to be added to // MssqlServerVulnerabilityAssessmentLister. type MssqlServerVulnerabilityAssessmentListerExpansion interface{} // MssqlServerVulnerabilityAssessmentNamespaceListerExpansion allows custom methods to be added to // MssqlServerVulnerabilityAssessmentNamespaceLister. type MssqlServerVulnerabilityAssessmentNamespaceListerExpansion interface{} // MysqlConfigurationListerExpansion allows custom methods to be added to // MysqlConfigurationLister. type MysqlConfigurationListerExpansion interface{} // MysqlConfigurationNamespaceListerExpansion allows custom methods to be added to // MysqlConfigurationNamespaceLister. type MysqlConfigurationNamespaceListerExpansion interface{} // MysqlDatabaseListerExpansion allows custom methods to be added to // MysqlDatabaseLister. type MysqlDatabaseListerExpansion interface{} // MysqlDatabaseNamespaceListerExpansion allows custom methods to be added to // MysqlDatabaseNamespaceLister. type MysqlDatabaseNamespaceListerExpansion interface{} // MysqlFirewallRuleListerExpansion allows custom methods to be added to // MysqlFirewallRuleLister. type MysqlFirewallRuleListerExpansion interface{} // MysqlFirewallRuleNamespaceListerExpansion allows custom methods to be added to // MysqlFirewallRuleNamespaceLister. type MysqlFirewallRuleNamespaceListerExpansion interface{} // MysqlServerListerExpansion allows custom methods to be added to // MysqlServerLister. type MysqlServerListerExpansion interface{} // MysqlServerNamespaceListerExpansion allows custom methods to be added to // MysqlServerNamespaceLister. type MysqlServerNamespaceListerExpansion interface{} // MysqlVirtualNetworkRuleListerExpansion allows custom methods to be added to // MysqlVirtualNetworkRuleLister. type MysqlVirtualNetworkRuleListerExpansion interface{} // MysqlVirtualNetworkRuleNamespaceListerExpansion allows custom methods to be added to // MysqlVirtualNetworkRuleNamespaceLister. type MysqlVirtualNetworkRuleNamespaceListerExpansion interface{} // NatGatewayListerExpansion allows custom methods to be added to // NatGatewayLister. type NatGatewayListerExpansion interface{} // NatGatewayNamespaceListerExpansion allows custom methods to be added to // NatGatewayNamespaceLister. type NatGatewayNamespaceListerExpansion interface{} // NetappAccountListerExpansion allows custom methods to be added to // NetappAccountLister. type NetappAccountListerExpansion interface{} // NetappAccountNamespaceListerExpansion allows custom methods to be added to // NetappAccountNamespaceLister. type NetappAccountNamespaceListerExpansion interface{} // NetappPoolListerExpansion allows custom methods to be added to // NetappPoolLister. type NetappPoolListerExpansion interface{} // NetappPoolNamespaceListerExpansion allows custom methods to be added to // NetappPoolNamespaceLister. type NetappPoolNamespaceListerExpansion interface{} // NetappSnapshotListerExpansion allows custom methods to be added to // NetappSnapshotLister. type NetappSnapshotListerExpansion interface{} // NetappSnapshotNamespaceListerExpansion allows custom methods to be added to // NetappSnapshotNamespaceLister. type NetappSnapshotNamespaceListerExpansion interface{} // NetappVolumeListerExpansion allows custom methods to be added to // NetappVolumeLister. type NetappVolumeListerExpansion interface{} // NetappVolumeNamespaceListerExpansion allows custom methods to be added to // NetappVolumeNamespaceLister. type NetappVolumeNamespaceListerExpansion interface{} // NetworkConnectionMonitorListerExpansion allows custom methods to be added to // NetworkConnectionMonitorLister. type NetworkConnectionMonitorListerExpansion interface{} // NetworkConnectionMonitorNamespaceListerExpansion allows custom methods to be added to // NetworkConnectionMonitorNamespaceLister. type NetworkConnectionMonitorNamespaceListerExpansion interface{} // NetworkDdosProtectionPlanListerExpansion allows custom methods to be added to // NetworkDdosProtectionPlanLister. type NetworkDdosProtectionPlanListerExpansion interface{} // NetworkDdosProtectionPlanNamespaceListerExpansion allows custom methods to be added to // NetworkDdosProtectionPlanNamespaceLister. type NetworkDdosProtectionPlanNamespaceListerExpansion interface{} // NetworkInterfaceListerExpansion allows custom methods to be added to // NetworkInterfaceLister. type NetworkInterfaceListerExpansion interface{} // NetworkInterfaceNamespaceListerExpansion allows custom methods to be added to // NetworkInterfaceNamespaceLister. type NetworkInterfaceNamespaceListerExpansion interface{} // NetworkInterfaceApplicationGatewayBackendAddressPoolAssociationListerExpansion allows custom methods to be added to // NetworkInterfaceApplicationGatewayBackendAddressPoolAssociationLister. type NetworkInterfaceApplicationGatewayBackendAddressPoolAssociationListerExpansion interface{} // NetworkInterfaceApplicationGatewayBackendAddressPoolAssociationNamespaceListerExpansion allows custom methods to be added to // NetworkInterfaceApplicationGatewayBackendAddressPoolAssociationNamespaceLister. type NetworkInterfaceApplicationGatewayBackendAddressPoolAssociationNamespaceListerExpansion interface{} // NetworkInterfaceApplicationSecurityGroupAssociationListerExpansion allows custom methods to be added to // NetworkInterfaceApplicationSecurityGroupAssociationLister. type NetworkInterfaceApplicationSecurityGroupAssociationListerExpansion interface{} // NetworkInterfaceApplicationSecurityGroupAssociationNamespaceListerExpansion allows custom methods to be added to // NetworkInterfaceApplicationSecurityGroupAssociationNamespaceLister. type NetworkInterfaceApplicationSecurityGroupAssociationNamespaceListerExpansion interface{} // NetworkInterfaceBackendAddressPoolAssociationListerExpansion allows custom methods to be added to // NetworkInterfaceBackendAddressPoolAssociationLister. type NetworkInterfaceBackendAddressPoolAssociationListerExpansion interface{} // NetworkInterfaceBackendAddressPoolAssociationNamespaceListerExpansion allows custom methods to be added to // NetworkInterfaceBackendAddressPoolAssociationNamespaceLister. type NetworkInterfaceBackendAddressPoolAssociationNamespaceListerExpansion interface{} // NetworkInterfaceNATRuleAssociationListerExpansion allows custom methods to be added to // NetworkInterfaceNATRuleAssociationLister. type NetworkInterfaceNATRuleAssociationListerExpansion interface{} // NetworkInterfaceNATRuleAssociationNamespaceListerExpansion allows custom methods to be added to // NetworkInterfaceNATRuleAssociationNamespaceLister. type NetworkInterfaceNATRuleAssociationNamespaceListerExpansion interface{} // NetworkPacketCaptureListerExpansion allows custom methods to be added to // NetworkPacketCaptureLister. type NetworkPacketCaptureListerExpansion interface{} // NetworkPacketCaptureNamespaceListerExpansion allows custom methods to be added to // NetworkPacketCaptureNamespaceLister. type NetworkPacketCaptureNamespaceListerExpansion interface{} // NetworkProfileListerExpansion allows custom methods to be added to // NetworkProfileLister. type NetworkProfileListerExpansion interface{} // NetworkProfileNamespaceListerExpansion allows custom methods to be added to // NetworkProfileNamespaceLister. type NetworkProfileNamespaceListerExpansion interface{} // NetworkSecurityGroupListerExpansion allows custom methods to be added to // NetworkSecurityGroupLister. type NetworkSecurityGroupListerExpansion interface{} // NetworkSecurityGroupNamespaceListerExpansion allows custom methods to be added to // NetworkSecurityGroupNamespaceLister. type NetworkSecurityGroupNamespaceListerExpansion interface{} // NetworkSecurityRuleListerExpansion allows custom methods to be added to // NetworkSecurityRuleLister. type NetworkSecurityRuleListerExpansion interface{} // NetworkSecurityRuleNamespaceListerExpansion allows custom methods to be added to // NetworkSecurityRuleNamespaceLister. type NetworkSecurityRuleNamespaceListerExpansion interface{} // NetworkWatcherListerExpansion allows custom methods to be added to // NetworkWatcherLister. type NetworkWatcherListerExpansion interface{} // NetworkWatcherNamespaceListerExpansion allows custom methods to be added to // NetworkWatcherNamespaceLister. type NetworkWatcherNamespaceListerExpansion interface{} // NetworkWatcherFlowLogListerExpansion allows custom methods to be added to // NetworkWatcherFlowLogLister. type NetworkWatcherFlowLogListerExpansion interface{} // NetworkWatcherFlowLogNamespaceListerExpansion allows custom methods to be added to // NetworkWatcherFlowLogNamespaceLister. type NetworkWatcherFlowLogNamespaceListerExpansion interface{} // NotificationHubListerExpansion allows custom methods to be added to // NotificationHubLister. type NotificationHubListerExpansion interface{} // NotificationHubNamespaceListerExpansion allows custom methods to be added to // NotificationHubNamespaceLister. type NotificationHubNamespaceListerExpansion interface{} // NotificationHubAuthorizationRuleListerExpansion allows custom methods to be added to // NotificationHubAuthorizationRuleLister. type NotificationHubAuthorizationRuleListerExpansion interface{} // NotificationHubAuthorizationRuleNamespaceListerExpansion allows custom methods to be added to // NotificationHubAuthorizationRuleNamespaceLister. type NotificationHubAuthorizationRuleNamespaceListerExpansion interface{} // NotificationHubNamespace_ListerExpansion allows custom methods to be added to // NotificationHubNamespace_Lister. type NotificationHubNamespace_ListerExpansion interface{} // NotificationHubNamespace_NamespaceListerExpansion allows custom methods to be added to // NotificationHubNamespace_NamespaceLister. type NotificationHubNamespace_NamespaceListerExpansion interface{} // PacketCaptureListerExpansion allows custom methods to be added to // PacketCaptureLister. type PacketCaptureListerExpansion interface{} // PacketCaptureNamespaceListerExpansion allows custom methods to be added to // PacketCaptureNamespaceLister. type PacketCaptureNamespaceListerExpansion interface{} // PointToSiteVPNGatewayListerExpansion allows custom methods to be added to // PointToSiteVPNGatewayLister. type PointToSiteVPNGatewayListerExpansion interface{} // PointToSiteVPNGatewayNamespaceListerExpansion allows custom methods to be added to // PointToSiteVPNGatewayNamespaceLister. type PointToSiteVPNGatewayNamespaceListerExpansion interface{} // PolicyAssignmentListerExpansion allows custom methods to be added to // PolicyAssignmentLister. type PolicyAssignmentListerExpansion interface{} // PolicyAssignmentNamespaceListerExpansion allows custom methods to be added to // PolicyAssignmentNamespaceLister. type PolicyAssignmentNamespaceListerExpansion interface{} // PolicyDefinitionListerExpansion allows custom methods to be added to // PolicyDefinitionLister. type PolicyDefinitionListerExpansion interface{} // PolicyDefinitionNamespaceListerExpansion allows custom methods to be added to // PolicyDefinitionNamespaceLister. type PolicyDefinitionNamespaceListerExpansion interface{} // PolicySetDefinitionListerExpansion allows custom methods to be added to // PolicySetDefinitionLister. type PolicySetDefinitionListerExpansion interface{} // PolicySetDefinitionNamespaceListerExpansion allows custom methods to be added to // PolicySetDefinitionNamespaceLister. type PolicySetDefinitionNamespaceListerExpansion interface{} // PostgresqlConfigurationListerExpansion allows custom methods to be added to // PostgresqlConfigurationLister. type PostgresqlConfigurationListerExpansion interface{} // PostgresqlConfigurationNamespaceListerExpansion allows custom methods to be added to // PostgresqlConfigurationNamespaceLister. type PostgresqlConfigurationNamespaceListerExpansion interface{} // PostgresqlDatabaseListerExpansion allows custom methods to be added to // PostgresqlDatabaseLister. type PostgresqlDatabaseListerExpansion interface{} // PostgresqlDatabaseNamespaceListerExpansion allows custom methods to be added to // PostgresqlDatabaseNamespaceLister. type PostgresqlDatabaseNamespaceListerExpansion interface{} // PostgresqlFirewallRuleListerExpansion allows custom methods to be added to // PostgresqlFirewallRuleLister. type PostgresqlFirewallRuleListerExpansion interface{} // PostgresqlFirewallRuleNamespaceListerExpansion allows custom methods to be added to // PostgresqlFirewallRuleNamespaceLister. type PostgresqlFirewallRuleNamespaceListerExpansion interface{} // PostgresqlServerListerExpansion allows custom methods to be added to // PostgresqlServerLister. type PostgresqlServerListerExpansion interface{} // PostgresqlServerNamespaceListerExpansion allows custom methods to be added to // PostgresqlServerNamespaceLister. type PostgresqlServerNamespaceListerExpansion interface{} // PostgresqlVirtualNetworkRuleListerExpansion allows custom methods to be added to // PostgresqlVirtualNetworkRuleLister. type PostgresqlVirtualNetworkRuleListerExpansion interface{} // PostgresqlVirtualNetworkRuleNamespaceListerExpansion allows custom methods to be added to // PostgresqlVirtualNetworkRuleNamespaceLister. type PostgresqlVirtualNetworkRuleNamespaceListerExpansion interface{} // PrivateDNSARecordListerExpansion allows custom methods to be added to // PrivateDNSARecordLister. type PrivateDNSARecordListerExpansion interface{} // PrivateDNSARecordNamespaceListerExpansion allows custom methods to be added to // PrivateDNSARecordNamespaceLister. type PrivateDNSARecordNamespaceListerExpansion interface{} // PrivateDNSAaaaRecordListerExpansion allows custom methods to be added to // PrivateDNSAaaaRecordLister. type PrivateDNSAaaaRecordListerExpansion interface{} // PrivateDNSAaaaRecordNamespaceListerExpansion allows custom methods to be added to // PrivateDNSAaaaRecordNamespaceLister. type PrivateDNSAaaaRecordNamespaceListerExpansion interface{} // PrivateDNSCnameRecordListerExpansion allows custom methods to be added to // PrivateDNSCnameRecordLister. type PrivateDNSCnameRecordListerExpansion interface{} // PrivateDNSCnameRecordNamespaceListerExpansion allows custom methods to be added to // PrivateDNSCnameRecordNamespaceLister. type PrivateDNSCnameRecordNamespaceListerExpansion interface{} // PrivateDNSMxRecordListerExpansion allows custom methods to be added to // PrivateDNSMxRecordLister. type PrivateDNSMxRecordListerExpansion interface{} // PrivateDNSMxRecordNamespaceListerExpansion allows custom methods to be added to // PrivateDNSMxRecordNamespaceLister. type PrivateDNSMxRecordNamespaceListerExpansion interface{} // PrivateDNSPtrRecordListerExpansion allows custom methods to be added to // PrivateDNSPtrRecordLister. type PrivateDNSPtrRecordListerExpansion interface{} // PrivateDNSPtrRecordNamespaceListerExpansion allows custom methods to be added to // PrivateDNSPtrRecordNamespaceLister. type PrivateDNSPtrRecordNamespaceListerExpansion interface{} // PrivateDNSSrvRecordListerExpansion allows custom methods to be added to // PrivateDNSSrvRecordLister. type PrivateDNSSrvRecordListerExpansion interface{} // PrivateDNSSrvRecordNamespaceListerExpansion allows custom methods to be added to // PrivateDNSSrvRecordNamespaceLister. type PrivateDNSSrvRecordNamespaceListerExpansion interface{} // PrivateDNSZoneListerExpansion allows custom methods to be added to // PrivateDNSZoneLister. type PrivateDNSZoneListerExpansion interface{} // PrivateDNSZoneNamespaceListerExpansion allows custom methods to be added to // PrivateDNSZoneNamespaceLister. type PrivateDNSZoneNamespaceListerExpansion interface{} // PrivateDNSZoneVirtualNetworkLinkListerExpansion allows custom methods to be added to // PrivateDNSZoneVirtualNetworkLinkLister. type PrivateDNSZoneVirtualNetworkLinkListerExpansion interface{} // PrivateDNSZoneVirtualNetworkLinkNamespaceListerExpansion allows custom methods to be added to // PrivateDNSZoneVirtualNetworkLinkNamespaceLister. type PrivateDNSZoneVirtualNetworkLinkNamespaceListerExpansion interface{} // PrivateEndpointListerExpansion allows custom methods to be added to // PrivateEndpointLister. type PrivateEndpointListerExpansion interface{} // PrivateEndpointNamespaceListerExpansion allows custom methods to be added to // PrivateEndpointNamespaceLister. type PrivateEndpointNamespaceListerExpansion interface{} // PrivateLinkEndpointListerExpansion allows custom methods to be added to // PrivateLinkEndpointLister. type PrivateLinkEndpointListerExpansion interface{} // PrivateLinkEndpointNamespaceListerExpansion allows custom methods to be added to // PrivateLinkEndpointNamespaceLister. type PrivateLinkEndpointNamespaceListerExpansion interface{} // PrivateLinkServiceListerExpansion allows custom methods to be added to // PrivateLinkServiceLister. type PrivateLinkServiceListerExpansion interface{} // PrivateLinkServiceNamespaceListerExpansion allows custom methods to be added to // PrivateLinkServiceNamespaceLister. type PrivateLinkServiceNamespaceListerExpansion interface{} // ProximityPlacementGroupListerExpansion allows custom methods to be added to // ProximityPlacementGroupLister. type ProximityPlacementGroupListerExpansion interface{} // ProximityPlacementGroupNamespaceListerExpansion allows custom methods to be added to // ProximityPlacementGroupNamespaceLister. type ProximityPlacementGroupNamespaceListerExpansion interface{} // PublicIPListerExpansion allows custom methods to be added to // PublicIPLister. type PublicIPListerExpansion interface{} // PublicIPNamespaceListerExpansion allows custom methods to be added to // PublicIPNamespaceLister. type PublicIPNamespaceListerExpansion interface{} // PublicIPPrefixListerExpansion allows custom methods to be added to // PublicIPPrefixLister. type PublicIPPrefixListerExpansion interface{} // PublicIPPrefixNamespaceListerExpansion allows custom methods to be added to // PublicIPPrefixNamespaceLister. type PublicIPPrefixNamespaceListerExpansion interface{} // RecoveryNetworkMappingListerExpansion allows custom methods to be added to // RecoveryNetworkMappingLister. type RecoveryNetworkMappingListerExpansion interface{} // RecoveryNetworkMappingNamespaceListerExpansion allows custom methods to be added to // RecoveryNetworkMappingNamespaceLister. type RecoveryNetworkMappingNamespaceListerExpansion interface{} // RecoveryReplicatedVmListerExpansion allows custom methods to be added to // RecoveryReplicatedVmLister. type RecoveryReplicatedVmListerExpansion interface{} // RecoveryReplicatedVmNamespaceListerExpansion allows custom methods to be added to // RecoveryReplicatedVmNamespaceLister. type RecoveryReplicatedVmNamespaceListerExpansion interface{} // RecoveryServicesFabricListerExpansion allows custom methods to be added to // RecoveryServicesFabricLister. type RecoveryServicesFabricListerExpansion interface{} // RecoveryServicesFabricNamespaceListerExpansion allows custom methods to be added to // RecoveryServicesFabricNamespaceLister. type RecoveryServicesFabricNamespaceListerExpansion interface{} // RecoveryServicesProtectedVmListerExpansion allows custom methods to be added to // RecoveryServicesProtectedVmLister. type RecoveryServicesProtectedVmListerExpansion interface{} // RecoveryServicesProtectedVmNamespaceListerExpansion allows custom methods to be added to // RecoveryServicesProtectedVmNamespaceLister. type RecoveryServicesProtectedVmNamespaceListerExpansion interface{} // RecoveryServicesProtectionContainerListerExpansion allows custom methods to be added to // RecoveryServicesProtectionContainerLister. type RecoveryServicesProtectionContainerListerExpansion interface{} // RecoveryServicesProtectionContainerNamespaceListerExpansion allows custom methods to be added to // RecoveryServicesProtectionContainerNamespaceLister. type RecoveryServicesProtectionContainerNamespaceListerExpansion interface{} // RecoveryServicesProtectionContainerMappingListerExpansion allows custom methods to be added to // RecoveryServicesProtectionContainerMappingLister. type RecoveryServicesProtectionContainerMappingListerExpansion interface{} // RecoveryServicesProtectionContainerMappingNamespaceListerExpansion allows custom methods to be added to // RecoveryServicesProtectionContainerMappingNamespaceLister. type RecoveryServicesProtectionContainerMappingNamespaceListerExpansion interface{} // RecoveryServicesProtectionPolicyVmListerExpansion allows custom methods to be added to // RecoveryServicesProtectionPolicyVmLister. type RecoveryServicesProtectionPolicyVmListerExpansion interface{} // RecoveryServicesProtectionPolicyVmNamespaceListerExpansion allows custom methods to be added to // RecoveryServicesProtectionPolicyVmNamespaceLister. type RecoveryServicesProtectionPolicyVmNamespaceListerExpansion interface{} // RecoveryServicesReplicationPolicyListerExpansion allows custom methods to be added to // RecoveryServicesReplicationPolicyLister. type RecoveryServicesReplicationPolicyListerExpansion interface{} // RecoveryServicesReplicationPolicyNamespaceListerExpansion allows custom methods to be added to // RecoveryServicesReplicationPolicyNamespaceLister. type RecoveryServicesReplicationPolicyNamespaceListerExpansion interface{} // RecoveryServicesVaultListerExpansion allows custom methods to be added to // RecoveryServicesVaultLister. type RecoveryServicesVaultListerExpansion interface{} // RecoveryServicesVaultNamespaceListerExpansion allows custom methods to be added to // RecoveryServicesVaultNamespaceLister. type RecoveryServicesVaultNamespaceListerExpansion interface{} // RedisCacheListerExpansion allows custom methods to be added to // RedisCacheLister. type RedisCacheListerExpansion interface{} // RedisCacheNamespaceListerExpansion allows custom methods to be added to // RedisCacheNamespaceLister. type RedisCacheNamespaceListerExpansion interface{} // RedisFirewallRuleListerExpansion allows custom methods to be added to // RedisFirewallRuleLister. type RedisFirewallRuleListerExpansion interface{} // RedisFirewallRuleNamespaceListerExpansion allows custom methods to be added to // RedisFirewallRuleNamespaceLister. type RedisFirewallRuleNamespaceListerExpansion interface{} // RelayHybridConnectionListerExpansion allows custom methods to be added to // RelayHybridConnectionLister. type RelayHybridConnectionListerExpansion interface{} // RelayHybridConnectionNamespaceListerExpansion allows custom methods to be added to // RelayHybridConnectionNamespaceLister. type RelayHybridConnectionNamespaceListerExpansion interface{} // RelayNamespaceListerExpansion allows custom methods to be added to // RelayNamespaceLister. type RelayNamespaceListerExpansion interface{} // RelayNamespaceNamespaceListerExpansion allows custom methods to be added to // RelayNamespaceNamespaceLister. type RelayNamespaceNamespaceListerExpansion interface{} // ResourceGroupListerExpansion allows custom methods to be added to // ResourceGroupLister. type ResourceGroupListerExpansion interface{} // ResourceGroupNamespaceListerExpansion allows custom methods to be added to // ResourceGroupNamespaceLister. type ResourceGroupNamespaceListerExpansion interface{} // RoleAssignmentListerExpansion allows custom methods to be added to // RoleAssignmentLister. type RoleAssignmentListerExpansion interface{} // RoleAssignmentNamespaceListerExpansion allows custom methods to be added to // RoleAssignmentNamespaceLister. type RoleAssignmentNamespaceListerExpansion interface{} // RoleDefinitionListerExpansion allows custom methods to be added to // RoleDefinitionLister. type RoleDefinitionListerExpansion interface{} // RoleDefinitionNamespaceListerExpansion allows custom methods to be added to // RoleDefinitionNamespaceLister. type RoleDefinitionNamespaceListerExpansion interface{} // RouteListerExpansion allows custom methods to be added to // RouteLister. type RouteListerExpansion interface{} // RouteNamespaceListerExpansion allows custom methods to be added to // RouteNamespaceLister. type RouteNamespaceListerExpansion interface{} // RouteTableListerExpansion allows custom methods to be added to // RouteTableLister. type RouteTableListerExpansion interface{} // RouteTableNamespaceListerExpansion allows custom methods to be added to // RouteTableNamespaceLister. type RouteTableNamespaceListerExpansion interface{} // SchedulerJobListerExpansion allows custom methods to be added to // SchedulerJobLister. type SchedulerJobListerExpansion interface{} // SchedulerJobNamespaceListerExpansion allows custom methods to be added to // SchedulerJobNamespaceLister. type SchedulerJobNamespaceListerExpansion interface{} // SchedulerJobCollectionListerExpansion allows custom methods to be added to // SchedulerJobCollectionLister. type SchedulerJobCollectionListerExpansion interface{} // SchedulerJobCollectionNamespaceListerExpansion allows custom methods to be added to // SchedulerJobCollectionNamespaceLister. type SchedulerJobCollectionNamespaceListerExpansion interface{} // SearchServiceListerExpansion allows custom methods to be added to // SearchServiceLister. type SearchServiceListerExpansion interface{} // SearchServiceNamespaceListerExpansion allows custom methods to be added to // SearchServiceNamespaceLister. type SearchServiceNamespaceListerExpansion interface{} // SecurityCenterContactListerExpansion allows custom methods to be added to // SecurityCenterContactLister. type SecurityCenterContactListerExpansion interface{} // SecurityCenterContactNamespaceListerExpansion allows custom methods to be added to // SecurityCenterContactNamespaceLister. type SecurityCenterContactNamespaceListerExpansion interface{} // SecurityCenterSubscriptionPricingListerExpansion allows custom methods to be added to // SecurityCenterSubscriptionPricingLister. type SecurityCenterSubscriptionPricingListerExpansion interface{} // SecurityCenterSubscriptionPricingNamespaceListerExpansion allows custom methods to be added to // SecurityCenterSubscriptionPricingNamespaceLister. type SecurityCenterSubscriptionPricingNamespaceListerExpansion interface{} // SecurityCenterWorkspaceListerExpansion allows custom methods to be added to // SecurityCenterWorkspaceLister. type SecurityCenterWorkspaceListerExpansion interface{} // SecurityCenterWorkspaceNamespaceListerExpansion allows custom methods to be added to // SecurityCenterWorkspaceNamespaceLister. type SecurityCenterWorkspaceNamespaceListerExpansion interface{} // ServiceFabricClusterListerExpansion allows custom methods to be added to // ServiceFabricClusterLister. type ServiceFabricClusterListerExpansion interface{} // ServiceFabricClusterNamespaceListerExpansion allows custom methods to be added to // ServiceFabricClusterNamespaceLister. type ServiceFabricClusterNamespaceListerExpansion interface{} // ServicebusNamespaceListerExpansion allows custom methods to be added to // ServicebusNamespaceLister. type ServicebusNamespaceListerExpansion interface{} // ServicebusNamespaceNamespaceListerExpansion allows custom methods to be added to // ServicebusNamespaceNamespaceLister. type ServicebusNamespaceNamespaceListerExpansion interface{} // ServicebusNamespaceAuthorizationRuleListerExpansion allows custom methods to be added to // ServicebusNamespaceAuthorizationRuleLister. type ServicebusNamespaceAuthorizationRuleListerExpansion interface{} // ServicebusNamespaceAuthorizationRuleNamespaceListerExpansion allows custom methods to be added to // ServicebusNamespaceAuthorizationRuleNamespaceLister. type ServicebusNamespaceAuthorizationRuleNamespaceListerExpansion interface{} // ServicebusQueueListerExpansion allows custom methods to be added to // ServicebusQueueLister. type ServicebusQueueListerExpansion interface{} // ServicebusQueueNamespaceListerExpansion allows custom methods to be added to // ServicebusQueueNamespaceLister. type ServicebusQueueNamespaceListerExpansion interface{} // ServicebusQueueAuthorizationRuleListerExpansion allows custom methods to be added to // ServicebusQueueAuthorizationRuleLister. type ServicebusQueueAuthorizationRuleListerExpansion interface{} // ServicebusQueueAuthorizationRuleNamespaceListerExpansion allows custom methods to be added to // ServicebusQueueAuthorizationRuleNamespaceLister. type ServicebusQueueAuthorizationRuleNamespaceListerExpansion interface{} // ServicebusSubscriptionListerExpansion allows custom methods to be added to // ServicebusSubscriptionLister. type ServicebusSubscriptionListerExpansion interface{} // ServicebusSubscriptionNamespaceListerExpansion allows custom methods to be added to // ServicebusSubscriptionNamespaceLister. type ServicebusSubscriptionNamespaceListerExpansion interface{} // ServicebusSubscriptionRuleListerExpansion allows custom methods to be added to // ServicebusSubscriptionRuleLister. type ServicebusSubscriptionRuleListerExpansion interface{} // ServicebusSubscriptionRuleNamespaceListerExpansion allows custom methods to be added to // ServicebusSubscriptionRuleNamespaceLister. type ServicebusSubscriptionRuleNamespaceListerExpansion interface{} // ServicebusTopicListerExpansion allows custom methods to be added to // ServicebusTopicLister. type ServicebusTopicListerExpansion interface{} // ServicebusTopicNamespaceListerExpansion allows custom methods to be added to // ServicebusTopicNamespaceLister. type ServicebusTopicNamespaceListerExpansion interface{} // ServicebusTopicAuthorizationRuleListerExpansion allows custom methods to be added to // ServicebusTopicAuthorizationRuleLister. type ServicebusTopicAuthorizationRuleListerExpansion interface{} // ServicebusTopicAuthorizationRuleNamespaceListerExpansion allows custom methods to be added to // ServicebusTopicAuthorizationRuleNamespaceLister. type ServicebusTopicAuthorizationRuleNamespaceListerExpansion interface{} // SharedImageListerExpansion allows custom methods to be added to // SharedImageLister. type SharedImageListerExpansion interface{} // SharedImageNamespaceListerExpansion allows custom methods to be added to // SharedImageNamespaceLister. type SharedImageNamespaceListerExpansion interface{} // SharedImageGalleryListerExpansion allows custom methods to be added to // SharedImageGalleryLister. type SharedImageGalleryListerExpansion interface{} // SharedImageGalleryNamespaceListerExpansion allows custom methods to be added to // SharedImageGalleryNamespaceLister. type SharedImageGalleryNamespaceListerExpansion interface{} // SharedImageVersionListerExpansion allows custom methods to be added to // SharedImageVersionLister. type SharedImageVersionListerExpansion interface{} // SharedImageVersionNamespaceListerExpansion allows custom methods to be added to // SharedImageVersionNamespaceLister. type SharedImageVersionNamespaceListerExpansion interface{} // SignalrServiceListerExpansion allows custom methods to be added to // SignalrServiceLister. type SignalrServiceListerExpansion interface{} // SignalrServiceNamespaceListerExpansion allows custom methods to be added to // SignalrServiceNamespaceLister. type SignalrServiceNamespaceListerExpansion interface{} // SiteRecoveryFabricListerExpansion allows custom methods to be added to // SiteRecoveryFabricLister. type SiteRecoveryFabricListerExpansion interface{} // SiteRecoveryFabricNamespaceListerExpansion allows custom methods to be added to // SiteRecoveryFabricNamespaceLister. type SiteRecoveryFabricNamespaceListerExpansion interface{} // SiteRecoveryNetworkMappingListerExpansion allows custom methods to be added to // SiteRecoveryNetworkMappingLister. type SiteRecoveryNetworkMappingListerExpansion interface{} // SiteRecoveryNetworkMappingNamespaceListerExpansion allows custom methods to be added to // SiteRecoveryNetworkMappingNamespaceLister. type SiteRecoveryNetworkMappingNamespaceListerExpansion interface{} // SiteRecoveryProtectionContainerListerExpansion allows custom methods to be added to // SiteRecoveryProtectionContainerLister. type SiteRecoveryProtectionContainerListerExpansion interface{} // SiteRecoveryProtectionContainerNamespaceListerExpansion allows custom methods to be added to // SiteRecoveryProtectionContainerNamespaceLister. type SiteRecoveryProtectionContainerNamespaceListerExpansion interface{} // SiteRecoveryProtectionContainerMappingListerExpansion allows custom methods to be added to // SiteRecoveryProtectionContainerMappingLister. type SiteRecoveryProtectionContainerMappingListerExpansion interface{} // SiteRecoveryProtectionContainerMappingNamespaceListerExpansion allows custom methods to be added to // SiteRecoveryProtectionContainerMappingNamespaceLister. type SiteRecoveryProtectionContainerMappingNamespaceListerExpansion interface{} // SiteRecoveryReplicatedVmListerExpansion allows custom methods to be added to // SiteRecoveryReplicatedVmLister. type SiteRecoveryReplicatedVmListerExpansion interface{} // SiteRecoveryReplicatedVmNamespaceListerExpansion allows custom methods to be added to // SiteRecoveryReplicatedVmNamespaceLister. type SiteRecoveryReplicatedVmNamespaceListerExpansion interface{} // SiteRecoveryReplicationPolicyListerExpansion allows custom methods to be added to // SiteRecoveryReplicationPolicyLister. type SiteRecoveryReplicationPolicyListerExpansion interface{} // SiteRecoveryReplicationPolicyNamespaceListerExpansion allows custom methods to be added to // SiteRecoveryReplicationPolicyNamespaceLister. type SiteRecoveryReplicationPolicyNamespaceListerExpansion interface{} // SnapshotListerExpansion allows custom methods to be added to // SnapshotLister. type SnapshotListerExpansion interface{} // SnapshotNamespaceListerExpansion allows custom methods to be added to // SnapshotNamespaceLister. type SnapshotNamespaceListerExpansion interface{} // SqlActiveDirectoryAdministratorListerExpansion allows custom methods to be added to // SqlActiveDirectoryAdministratorLister. type SqlActiveDirectoryAdministratorListerExpansion interface{} // SqlActiveDirectoryAdministratorNamespaceListerExpansion allows custom methods to be added to // SqlActiveDirectoryAdministratorNamespaceLister. type SqlActiveDirectoryAdministratorNamespaceListerExpansion interface{} // SqlDatabaseListerExpansion allows custom methods to be added to // SqlDatabaseLister. type SqlDatabaseListerExpansion interface{} // SqlDatabaseNamespaceListerExpansion allows custom methods to be added to // SqlDatabaseNamespaceLister. type SqlDatabaseNamespaceListerExpansion interface{} // SqlElasticpoolListerExpansion allows custom methods to be added to // SqlElasticpoolLister. type SqlElasticpoolListerExpansion interface{} // SqlElasticpoolNamespaceListerExpansion allows custom methods to be added to // SqlElasticpoolNamespaceLister. type SqlElasticpoolNamespaceListerExpansion interface{} // SqlFailoverGroupListerExpansion allows custom methods to be added to // SqlFailoverGroupLister. type SqlFailoverGroupListerExpansion interface{} // SqlFailoverGroupNamespaceListerExpansion allows custom methods to be added to // SqlFailoverGroupNamespaceLister. type SqlFailoverGroupNamespaceListerExpansion interface{} // SqlFirewallRuleListerExpansion allows custom methods to be added to // SqlFirewallRuleLister. type SqlFirewallRuleListerExpansion interface{} // SqlFirewallRuleNamespaceListerExpansion allows custom methods to be added to // SqlFirewallRuleNamespaceLister. type SqlFirewallRuleNamespaceListerExpansion interface{} // SqlServerListerExpansion allows custom methods to be added to // SqlServerLister. type SqlServerListerExpansion interface{} // SqlServerNamespaceListerExpansion allows custom methods to be added to // SqlServerNamespaceLister. type SqlServerNamespaceListerExpansion interface{} // SqlVirtualNetworkRuleListerExpansion allows custom methods to be added to // SqlVirtualNetworkRuleLister. type SqlVirtualNetworkRuleListerExpansion interface{} // SqlVirtualNetworkRuleNamespaceListerExpansion allows custom methods to be added to // SqlVirtualNetworkRuleNamespaceLister. type SqlVirtualNetworkRuleNamespaceListerExpansion interface{} // StorageAccountListerExpansion allows custom methods to be added to // StorageAccountLister. type StorageAccountListerExpansion interface{} // StorageAccountNamespaceListerExpansion allows custom methods to be added to // StorageAccountNamespaceLister. type StorageAccountNamespaceListerExpansion interface{} // StorageAccountNetworkRulesListerExpansion allows custom methods to be added to // StorageAccountNetworkRulesLister. type StorageAccountNetworkRulesListerExpansion interface{} // StorageAccountNetworkRulesNamespaceListerExpansion allows custom methods to be added to // StorageAccountNetworkRulesNamespaceLister. type StorageAccountNetworkRulesNamespaceListerExpansion interface{} // StorageBlobListerExpansion allows custom methods to be added to // StorageBlobLister. type StorageBlobListerExpansion interface{} // StorageBlobNamespaceListerExpansion allows custom methods to be added to // StorageBlobNamespaceLister. type StorageBlobNamespaceListerExpansion interface{} // StorageContainerListerExpansion allows custom methods to be added to // StorageContainerLister. type StorageContainerListerExpansion interface{} // StorageContainerNamespaceListerExpansion allows custom methods to be added to // StorageContainerNamespaceLister. type StorageContainerNamespaceListerExpansion interface{} // StorageDataLakeGen2FilesystemListerExpansion allows custom methods to be added to // StorageDataLakeGen2FilesystemLister. type StorageDataLakeGen2FilesystemListerExpansion interface{} // StorageDataLakeGen2FilesystemNamespaceListerExpansion allows custom methods to be added to // StorageDataLakeGen2FilesystemNamespaceLister. type StorageDataLakeGen2FilesystemNamespaceListerExpansion interface{} // StorageManagementPolicyListerExpansion allows custom methods to be added to // StorageManagementPolicyLister. type StorageManagementPolicyListerExpansion interface{} // StorageManagementPolicyNamespaceListerExpansion allows custom methods to be added to // StorageManagementPolicyNamespaceLister. type StorageManagementPolicyNamespaceListerExpansion interface{} // StorageQueueListerExpansion allows custom methods to be added to // StorageQueueLister. type StorageQueueListerExpansion interface{} // StorageQueueNamespaceListerExpansion allows custom methods to be added to // StorageQueueNamespaceLister. type StorageQueueNamespaceListerExpansion interface{} // StorageShareListerExpansion allows custom methods to be added to // StorageShareLister. type StorageShareListerExpansion interface{} // StorageShareNamespaceListerExpansion allows custom methods to be added to // StorageShareNamespaceLister. type StorageShareNamespaceListerExpansion interface{} // StorageShareDirectoryListerExpansion allows custom methods to be added to // StorageShareDirectoryLister. type StorageShareDirectoryListerExpansion interface{} // StorageShareDirectoryNamespaceListerExpansion allows custom methods to be added to // StorageShareDirectoryNamespaceLister. type StorageShareDirectoryNamespaceListerExpansion interface{} // StorageTableListerExpansion allows custom methods to be added to // StorageTableLister. type StorageTableListerExpansion interface{} // StorageTableNamespaceListerExpansion allows custom methods to be added to // StorageTableNamespaceLister. type StorageTableNamespaceListerExpansion interface{} // StorageTableEntityListerExpansion allows custom methods to be added to // StorageTableEntityLister. type StorageTableEntityListerExpansion interface{} // StorageTableEntityNamespaceListerExpansion allows custom methods to be added to // StorageTableEntityNamespaceLister. type StorageTableEntityNamespaceListerExpansion interface{} // StreamAnalyticsFunctionJavascriptUdfListerExpansion allows custom methods to be added to // StreamAnalyticsFunctionJavascriptUdfLister. type StreamAnalyticsFunctionJavascriptUdfListerExpansion interface{} // StreamAnalyticsFunctionJavascriptUdfNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsFunctionJavascriptUdfNamespaceLister. type StreamAnalyticsFunctionJavascriptUdfNamespaceListerExpansion interface{} // StreamAnalyticsJobListerExpansion allows custom methods to be added to // StreamAnalyticsJobLister. type StreamAnalyticsJobListerExpansion interface{} // StreamAnalyticsJobNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsJobNamespaceLister. type StreamAnalyticsJobNamespaceListerExpansion interface{} // StreamAnalyticsOutputBlobListerExpansion allows custom methods to be added to // StreamAnalyticsOutputBlobLister. type StreamAnalyticsOutputBlobListerExpansion interface{} // StreamAnalyticsOutputBlobNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsOutputBlobNamespaceLister. type StreamAnalyticsOutputBlobNamespaceListerExpansion interface{} // StreamAnalyticsOutputEventhubListerExpansion allows custom methods to be added to // StreamAnalyticsOutputEventhubLister. type StreamAnalyticsOutputEventhubListerExpansion interface{} // StreamAnalyticsOutputEventhubNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsOutputEventhubNamespaceLister. type StreamAnalyticsOutputEventhubNamespaceListerExpansion interface{} // StreamAnalyticsOutputMssqlListerExpansion allows custom methods to be added to // StreamAnalyticsOutputMssqlLister. type StreamAnalyticsOutputMssqlListerExpansion interface{} // StreamAnalyticsOutputMssqlNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsOutputMssqlNamespaceLister. type StreamAnalyticsOutputMssqlNamespaceListerExpansion interface{} // StreamAnalyticsOutputServicebusQueueListerExpansion allows custom methods to be added to // StreamAnalyticsOutputServicebusQueueLister. type StreamAnalyticsOutputServicebusQueueListerExpansion interface{} // StreamAnalyticsOutputServicebusQueueNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsOutputServicebusQueueNamespaceLister. type StreamAnalyticsOutputServicebusQueueNamespaceListerExpansion interface{} // StreamAnalyticsOutputServicebusTopicListerExpansion allows custom methods to be added to // StreamAnalyticsOutputServicebusTopicLister. type StreamAnalyticsOutputServicebusTopicListerExpansion interface{} // StreamAnalyticsOutputServicebusTopicNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsOutputServicebusTopicNamespaceLister. type StreamAnalyticsOutputServicebusTopicNamespaceListerExpansion interface{} // StreamAnalyticsReferenceInputBlobListerExpansion allows custom methods to be added to // StreamAnalyticsReferenceInputBlobLister. type StreamAnalyticsReferenceInputBlobListerExpansion interface{} // StreamAnalyticsReferenceInputBlobNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsReferenceInputBlobNamespaceLister. type StreamAnalyticsReferenceInputBlobNamespaceListerExpansion interface{} // StreamAnalyticsStreamInputBlobListerExpansion allows custom methods to be added to // StreamAnalyticsStreamInputBlobLister. type StreamAnalyticsStreamInputBlobListerExpansion interface{} // StreamAnalyticsStreamInputBlobNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsStreamInputBlobNamespaceLister. type StreamAnalyticsStreamInputBlobNamespaceListerExpansion interface{} // StreamAnalyticsStreamInputEventhubListerExpansion allows custom methods to be added to // StreamAnalyticsStreamInputEventhubLister. type StreamAnalyticsStreamInputEventhubListerExpansion interface{} // StreamAnalyticsStreamInputEventhubNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsStreamInputEventhubNamespaceLister. type StreamAnalyticsStreamInputEventhubNamespaceListerExpansion interface{} // StreamAnalyticsStreamInputIothubListerExpansion allows custom methods to be added to // StreamAnalyticsStreamInputIothubLister. type StreamAnalyticsStreamInputIothubListerExpansion interface{} // StreamAnalyticsStreamInputIothubNamespaceListerExpansion allows custom methods to be added to // StreamAnalyticsStreamInputIothubNamespaceLister. type StreamAnalyticsStreamInputIothubNamespaceListerExpansion interface{} // SubnetListerExpansion allows custom methods to be added to // SubnetLister. type SubnetListerExpansion interface{} // SubnetNamespaceListerExpansion allows custom methods to be added to // SubnetNamespaceLister. type SubnetNamespaceListerExpansion interface{} // SubnetNATGatewayAssociationListerExpansion allows custom methods to be added to // SubnetNATGatewayAssociationLister. type SubnetNATGatewayAssociationListerExpansion interface{} // SubnetNATGatewayAssociationNamespaceListerExpansion allows custom methods to be added to // SubnetNATGatewayAssociationNamespaceLister. type SubnetNATGatewayAssociationNamespaceListerExpansion interface{} // SubnetNetworkSecurityGroupAssociationListerExpansion allows custom methods to be added to // SubnetNetworkSecurityGroupAssociationLister. type SubnetNetworkSecurityGroupAssociationListerExpansion interface{} // SubnetNetworkSecurityGroupAssociationNamespaceListerExpansion allows custom methods to be added to // SubnetNetworkSecurityGroupAssociationNamespaceLister. type SubnetNetworkSecurityGroupAssociationNamespaceListerExpansion interface{} // SubnetRouteTableAssociationListerExpansion allows custom methods to be added to // SubnetRouteTableAssociationLister. type SubnetRouteTableAssociationListerExpansion interface{} // SubnetRouteTableAssociationNamespaceListerExpansion allows custom methods to be added to // SubnetRouteTableAssociationNamespaceLister. type SubnetRouteTableAssociationNamespaceListerExpansion interface{} // TemplateDeploymentListerExpansion allows custom methods to be added to // TemplateDeploymentLister. type TemplateDeploymentListerExpansion interface{} // TemplateDeploymentNamespaceListerExpansion allows custom methods to be added to // TemplateDeploymentNamespaceLister. type TemplateDeploymentNamespaceListerExpansion interface{} // TrafficManagerEndpointListerExpansion allows custom methods to be added to // TrafficManagerEndpointLister. type TrafficManagerEndpointListerExpansion interface{} // TrafficManagerEndpointNamespaceListerExpansion allows custom methods to be added to // TrafficManagerEndpointNamespaceLister. type TrafficManagerEndpointNamespaceListerExpansion interface{} // TrafficManagerProfileListerExpansion allows custom methods to be added to // TrafficManagerProfileLister. type TrafficManagerProfileListerExpansion interface{} // TrafficManagerProfileNamespaceListerExpansion allows custom methods to be added to // TrafficManagerProfileNamespaceLister. type TrafficManagerProfileNamespaceListerExpansion interface{} // UserAssignedIdentityListerExpansion allows custom methods to be added to // UserAssignedIdentityLister. type UserAssignedIdentityListerExpansion interface{} // UserAssignedIdentityNamespaceListerExpansion allows custom methods to be added to // UserAssignedIdentityNamespaceLister. type UserAssignedIdentityNamespaceListerExpansion interface{} // VirtualHubListerExpansion allows custom methods to be added to // VirtualHubLister. type VirtualHubListerExpansion interface{} // VirtualHubNamespaceListerExpansion allows custom methods to be added to // VirtualHubNamespaceLister. type VirtualHubNamespaceListerExpansion interface{} // VirtualMachineListerExpansion allows custom methods to be added to // VirtualMachineLister. type VirtualMachineListerExpansion interface{} // VirtualMachineNamespaceListerExpansion allows custom methods to be added to // VirtualMachineNamespaceLister. type VirtualMachineNamespaceListerExpansion interface{} // VirtualMachineDataDiskAttachmentListerExpansion allows custom methods to be added to // VirtualMachineDataDiskAttachmentLister. type VirtualMachineDataDiskAttachmentListerExpansion interface{} // VirtualMachineDataDiskAttachmentNamespaceListerExpansion allows custom methods to be added to // VirtualMachineDataDiskAttachmentNamespaceLister. type VirtualMachineDataDiskAttachmentNamespaceListerExpansion interface{} // VirtualMachineExtensionListerExpansion allows custom methods to be added to // VirtualMachineExtensionLister. type VirtualMachineExtensionListerExpansion interface{} // VirtualMachineExtensionNamespaceListerExpansion allows custom methods to be added to // VirtualMachineExtensionNamespaceLister. type VirtualMachineExtensionNamespaceListerExpansion interface{} // VirtualMachineScaleSetListerExpansion allows custom methods to be added to // VirtualMachineScaleSetLister. type VirtualMachineScaleSetListerExpansion interface{} // VirtualMachineScaleSetNamespaceListerExpansion allows custom methods to be added to // VirtualMachineScaleSetNamespaceLister. type VirtualMachineScaleSetNamespaceListerExpansion interface{} // VirtualNetworkListerExpansion allows custom methods to be added to // VirtualNetworkLister. type VirtualNetworkListerExpansion interface{} // VirtualNetworkNamespaceListerExpansion allows custom methods to be added to // VirtualNetworkNamespaceLister. type VirtualNetworkNamespaceListerExpansion interface{} // VirtualNetworkGatewayListerExpansion allows custom methods to be added to // VirtualNetworkGatewayLister. type VirtualNetworkGatewayListerExpansion interface{} // VirtualNetworkGatewayNamespaceListerExpansion allows custom methods to be added to // VirtualNetworkGatewayNamespaceLister. type VirtualNetworkGatewayNamespaceListerExpansion interface{} // VirtualNetworkGatewayConnectionListerExpansion allows custom methods to be added to // VirtualNetworkGatewayConnectionLister. type VirtualNetworkGatewayConnectionListerExpansion interface{} // VirtualNetworkGatewayConnectionNamespaceListerExpansion allows custom methods to be added to // VirtualNetworkGatewayConnectionNamespaceLister. type VirtualNetworkGatewayConnectionNamespaceListerExpansion interface{} // VirtualNetworkPeeringListerExpansion allows custom methods to be added to // VirtualNetworkPeeringLister. type VirtualNetworkPeeringListerExpansion interface{} // VirtualNetworkPeeringNamespaceListerExpansion allows custom methods to be added to // VirtualNetworkPeeringNamespaceLister. type VirtualNetworkPeeringNamespaceListerExpansion interface{} // VirtualWANListerExpansion allows custom methods to be added to // VirtualWANLister. type VirtualWANListerExpansion interface{} // VirtualWANNamespaceListerExpansion allows custom methods to be added to // VirtualWANNamespaceLister. type VirtualWANNamespaceListerExpansion interface{} // VpnGatewayListerExpansion allows custom methods to be added to // VpnGatewayLister. type VpnGatewayListerExpansion interface{} // VpnGatewayNamespaceListerExpansion allows custom methods to be added to // VpnGatewayNamespaceLister. type VpnGatewayNamespaceListerExpansion interface{} // VpnServerConfigurationListerExpansion allows custom methods to be added to // VpnServerConfigurationLister. type VpnServerConfigurationListerExpansion interface{} // VpnServerConfigurationNamespaceListerExpansion allows custom methods to be added to // VpnServerConfigurationNamespaceLister. type VpnServerConfigurationNamespaceListerExpansion interface{} // WebApplicationFirewallPolicyListerExpansion allows custom methods to be added to // WebApplicationFirewallPolicyLister. type WebApplicationFirewallPolicyListerExpansion interface{} // WebApplicationFirewallPolicyNamespaceListerExpansion allows custom methods to be added to // WebApplicationFirewallPolicyNamespaceLister. type WebApplicationFirewallPolicyNamespaceListerExpansion interface{}
<filename>poodle/src/components/Info/RowType/UserImg.tsx import React, { FC, useCallback, useEffect, useState } from 'react'; import { useDispatch, useSelector } from 'react-redux'; import { InfoPicture } from '../../../styles/Info'; import { modalOn, REDERRORMODAL } from '@/core/redux/actions/Modal'; import { setPictureCall, setPictureUrl } from '@/core/redux/actions/Info'; import { useReGenerateTokenAndDoCallback } from '@/lib/utils/function'; import { RootState } from '@/core/redux/reducer'; interface Props { valueChangeHandler: (value: string) => void; img: string; } const ACCEPT_FILE_TYPE = '.jpg,.png,.jpeg,.jpeg2000,.JPG,.PNG,.JPEG'; const UserImg: FC<Props> = () => { const [file, fileChange] = useState<File>(new File([], 'dummy.txt')); const dispatch = useDispatch(); const { setImgError, pictureUrl } = useSelector( (state: RootState) => state.InfoState, ); const setImgGenerateTokenAndDoCallback = useReGenerateTokenAndDoCallback(() => dispatch(setPictureCall({ picture: file })), ); const inputChangeHandler = useCallback((event: React.ChangeEvent) => { const target = event.target as HTMLInputElement; const files = target.files; if (!files || !files[0]) return; const file = files[0]; if (!isFileTypeAble(file)) { dispatch(modalOn(REDERRORMODAL)); return; } fileChange(file); const fileUrl = URL.createObjectURL(file); dispatch(setPictureUrl({ pictureUrl: fileUrl })); dispatch(setPictureCall({ picture: file })); }, []); const isFileTypeAble = useCallback((file: File) => { const fileName = getFileName(file); const acceptFileTypes = spliceAcceptFileTypeString(ACCEPT_FILE_TYPE); for (let acceptFileType of acceptFileTypes) { if (fileName.includes(acceptFileType)) { return true; } } return false; }, []); const spliceAcceptFileTypeString = useCallback( (acceptFileTypeString: string): string[] => { const acceptFileTypes: string[] = acceptFileTypeString.split(','); return acceptFileTypes; }, [], ); const getFileName = useCallback((file: File) => { return file.name; }, []); useEffect(() => { if (setImgError.status === 401) { setImgGenerateTokenAndDoCallback(); } }, [setImgError]); return ( <InfoPicture> <label> <input type='file' onChange={inputChangeHandler} accept='.gif, .jpg, .png, .jpeg, .jpeg2000' /> {pictureUrl ? ( <img src={pictureUrl} alt='사진' /> ) : ( <div> <p>증명사진을 첨부해주세요</p> <p>(JPG,JPEG,JPEG2000,PNG)</p> </div> )} </label> </InfoPicture> ); }; export default UserImg;
'Greenwashing" is the use of shameless PR campaigns by notorious polluters to portray their corporations as benign, Bambi-loving protectors of nature. But now comes a new phenomenon: "Greedwashing." And who's in greater need of a greed bath than Wall Street banksters? So, splish-splash and rub-a-dub, here came JPMorgan Chase with a holiday season PR campaign intended to scrub off its greed. The message was that Chase is not merely a bailed-out, fee-grubbing, casino huckster, but a generous philanthropist that wants only to benefit humanity. Rather than simply presenting this image makeover in an ad, however, Chase created a two-hour television special on NBC devoted to itself. Called the American Giving Awards, the show profiled community groups that had received charitable contributions from the Wall Street bank. Isn't that just precious? If any viewers missed the point that Chase is not really a financial behemoth, but a bunch of big-hearted bankers who give away money, the show was spiked with eight 30-second commercials about the bank, plus frequent reminders that the altruism being displayed was "presented by Chase." Well ... actually, this two-hour greedwash was also presented by us taxpayers, for the cost of the show and the donations to the groups are all deductible from the corporation's income taxes. Yet, we got no credit for our help. Probably an oversight. But the cheesiest part of the whole fraud is that the total amount of Chase's "magnanimous" donations to the five groups was $2 million. Come on — this outfit had $104 billion in revenues in the past year and showered its CEO with $23 million in pay, including a $5 million cash bonus. At JPMorgan Chase, charity really does begin at home, and its dab of trumped-up philanthropy won't wash off a speck of its greed. Jim Hightower is the best-selling author of Swim Against the Current: Even a Dead Fish Can Go With the Flow, on sale now from Wiley Publishing. For more information, visit jimhightower.com.
// IncrementUserAccountCount increments the number of UserAccount for the given member cluster in the cached counter func IncrementUserAccountCount(logger logr.Logger, clusterName string) { write(func() { cachedCounts.UserAccountsPerClusterCounts[clusterName]++ logger.Info("incremented UserAccountsPerClusterCounts", "clusterName", clusterName, "value", cachedCounts.UserAccountsPerClusterCounts[clusterName]) metrics.UserAccountGaugeVec.WithLabelValues(clusterName).Set(float64(cachedCounts.UserAccountsPerClusterCounts[clusterName])) }) }
#include <types.h> #include <sys/logging.h> #include <sys/console.h> #include <version.h> #include <stdio.h> #include <sys/multiboot.h> #include <sys/memory.h> #include <graphics/svga.h> #include <graphics/video.h> #include <string.h> extern uint32_t _kernel_start, _kernel_end; #if defined(__cplusplus) extern "C" /* Use C linkage. */ #endif int x86_init_descriptor_tables(); void pit_install(uint32_t frequency); void kmain(const multiboot_info_t * multiboot); void kbd_init(); /// The entry point for the x86 version of the NesOS Microkernel #if defined(__cplusplus) extern "C" /* Use C linkage for kernel_main. */ #endif void kernel_entry(int magic, const multiboot_info_t * multiboot) { video_init((svga_mode_info_t *)multiboot->vbe_mode_info); console_init(); //~ printk("ok", (svga_mode_info_t *)multiboot->vbe_mode_info->screenheight); console_printdiv(); printk("ok", NAME_S, VERSION_S); #ifdef DEBUG printk("debug", "kernel image(ram): 0x%X - 0x%X (", \ &_kernel_start, &_kernel_end, &_kernel_end - &_kernel_start); logging_printbestunit(&_kernel_end - &_kernel_start, 0); printf(")\n"); #endif if (magic != MULTIBOOT_BOOTLOADER_MAGIC) { printk("fail", "NesOS was booted improperly by the bootloader\n"); printk("info", "\\==> Kernel is now halting\n"); return; } printk("status", "Initialising the processor...\n"); x86_init_descriptor_tables(); printk("cpu", "Starting interrupts...\n"); printk("device", "Starting (basic) PIT...\n"); pit_install(1000); kbd_init(); asm("sti"); // Find the location of our initial ramdisk. assert(multiboot->mods_count > 0); u32 initrd_end = *(u32*)(multiboot->mods_addr+4); u32 placement=(u32)&_kernel_end; if(placement<initrd_end) placement=initrd_end; // Don't trample our module with placement accesses, please! init_kmalloc((uintptr_t)placement); printk("ok", "Starting PMM...\n"); init_pmm((uintptr_t)&_kernel_start, multiboot->mem_upper*1024); //~ multiboot_memory_map_t * mmap = (multiboot_memory_map_t*)multiboot->mmap_addr; //~ while ((uint32_t)mmap < (uint32_t)multiboot->mmap_addr + (uint32_t)multiboot->mmap_length) { //~ printf("0x%08X->0x%08X (type %d)\n",mmap->addr,mmap->len,mmap->type); //~ mmap = (multiboot_memory_map_t*) ( (unsigned int)mmap + mmap->size + sizeof(unsigned int) ); //~ } printk("debug", "Exiting Boot\n"); console_printdiv(); kmain(multiboot); return; }
// ===================================================================================================================== // Create a helper invocation query. Value* BuilderRecorder::CreateIsHelperInvocation( const Twine& instName) { return Record(Opcode::IsHelperInvocation, getInt1Ty(), {}, instName); }
A progressive view-dependent technique for interactive 3-D mesh transmission A view-dependent graphics streaming scheme is proposed in this work that facilitates interactive streaming and browsing of three-dimensional (3-D) graphics models. First, a 3-D model is split into several partitions. Second, each partition is simplified and coded independently. Finally, the compressed data is sent in order of relevance to the user's requests to maximize visual quality. Specifically, the server can transmit visible parts in detail, while cutting out invisible parts. Experimental results demonstrate that the proposed algorithm reduces the required transmission bandwidth, and provides an acceptable visual quality even at low bit rates.