content
stringlengths
7
2.61M
// isRefreshNeeded determines if the cached object should be refreshed. func (o *cacheObject) isRefreshNeeded() bool { if o.refreshNeeded { return true } if o.err == nil { return false } if o.nextRetryTime == 0 { return true } return o.nextRetryTime <= time.Now().UnixNano() }
import { Match, MatchTuple, Pattern } from '../../types'; import { MatchFunction } from '../types'; import { matchMapLike } from './mapLike'; import { hasKey } from '../../util'; export const matchObject = ( pattern: Pattern, object: Record<string, any>, match: MatchFunction, matches: Match, ): MatchTuple => { const has = (key: string) => hasKey(object, key); const get = (key: string) => object[key]; return matchMapLike(pattern, match, matches, has, get); };
<filename>poly_implementation/node.py #!/usr/bin/env python3 """ <NAME> MIT License 2020 """ class Node(object): """ A Node class for the poly-tree """ def __init__(self, board, points=0): self.board = board self.children = [] self.points = points def __str__(self): return str(self.board)
Chapin senior softball players Ariana Valles, Alyssa Lozoya and Nazareth Fierro have been working hard and have been hoping for a moment like they will face beginning Thursday in Seminole. The Huskies will play Canyon High in the Region 1-5A championship series with a trip to next week's Final Four on the line. The best two-of-three series gets going with a night game Thursday, followed by a morning game on Friday and a morning game on Saturday if necessary. This is the first-ever trip to the regional final for the Huskies. "We've talked about getting this far and now it's actually here," said the centerfielder Lozoya, who was also a standout for the Chapin volleyball team in the fall. "It's been a great season. We enjoy being around each other, we enjoy practice, we enjoy the games. We want to keep this going as long as we can." The last team from El Paso to reach the Final Four was Bel Air in 1994, when they finished as the runner-up. The Highlanders also went in 1993. Lozoya, Fierro and Valles each have their own stories and have each contributed mightily to the program. Fierro and Valles are four-year members of the varsity and Lozoya is in her third year on the varsity. Lozoya's older sister Deborah, was a standout on the team a few years ago, but Alyssa Lozoya didn't play her freshman year but returned because she said she missed the game. Alyssa Lozoya has been huge in the outfield and has hit two homers in this year's playoffs. Fierro has been steady all season, is hitting over .400 with 40 RBIs while playing left field. Valles, who signed with UTEP last fall, has been steady on defense at shortstop and has been a clutch hitter all year. "We're focused as a team and we've come a long way this season," Valles said. "This has really been a special season and we've continued to grow as the season has gone along." Valles, who also played basketball for the Huskies, and Lozoya have had plenty of postseason success in both softball and the other sports they have played. Both agree that postseason experience is paying off now. And while Chapin has had plenty of success in its softball program over the course of the past several years, this year's senior group will leave a legacy. "We've shown that hard work has paid off and we've shown that no matter what, you always have to keep working and never quit," Fierro said. Chapin coach Kevin Mills credits his three seniors for the success of this year's squad. "All of them have been so big for us and all of them are leaders for us," Mills said. "They are good kids and they have given us so much. Each of them contribute in their own way. It's nice to see them get rewarded like this." Chapin has defeated Hanks, Lubbock Cooper, Eastlake and Aledo to reach the regional finals. Canyon reached the regional final by eliminating 2016 state champion Birdville last weekend. Pitcher Kyra Lair leads the Lady Eagles with 25 wins in the circle, while Candain Callahan has hit 12 homers and Brynn Owen has eight homers. "This is an exciting time for our program and the kids are really enjoying this," Mills said. "We are getting a great deal of support from the whole city. We're going to give it our best effort. We've had a great effort throughout the entire playoffs. This team has been fun to coach. They are enjoying everything about this season."
<gh_stars>10-100 import { BufferTypes } from 'shared/util/byteSerializer/types'; import { schemaStore } from 'shared/util/byteSerializer/schema'; export enum SchemaName { Request = 0, ShortPeerInfo = 1, Headers = 2, FullHeaders = 3, BlockData = 4, Block = 5, Transaction = 6, TransactionBlock = 7, TransactionAssetRegister = 8, TransactionAssetSend = 9, TransactionAssetSignature = 10, TransactionAssetDelegate = 11, TransactionAssetStake = 12, TransactionAssetVote = 13, AirdropReward = 14, CommonBlockResponse = 15, RequestBlocks = 16, Empty = 17, } schemaStore.add(SchemaName.Request, { code: new BufferTypes.Utf8(), data: new BufferTypes.Buffer(), }); schemaStore.add(SchemaName.ShortPeerInfo, { // TODO can be stored in 4 byte in far future ip: new BufferTypes.Utf8(), port: new BufferTypes.Uint16(), peerCount: new BufferTypes.Uint16(), }); schemaStore.add(SchemaName.Headers, { height: new BufferTypes.Uint32(), broadhash: new BufferTypes.Utf8(), peerCount: new BufferTypes.Uint16() }); schemaStore.add(SchemaName.FullHeaders, { height: new BufferTypes.Uint32(), broadhash: new BufferTypes.Utf8(), blocksIds: new BufferTypes.Map(new BufferTypes.Uint32(), new BufferTypes.Utf8()), os: new BufferTypes.Utf8(), // TODO can be stored in 3 byte in far future version: new BufferTypes.Utf8(), // TODO can be stored in 3 byte in far future minVersion: new BufferTypes.Utf8(), peerCount: new BufferTypes.Uint16(), }); schemaStore.add(SchemaName.BlockData, { height: new BufferTypes.Uint32(), id: new BufferTypes.Utf8(), }); schemaStore.add(SchemaName.Block, { height: new BufferTypes.Uint32(), id: new BufferTypes.Utf8(), version: new BufferTypes.Uint8(), createdAt: new BufferTypes.Uint32(), previousBlockId: new BufferTypes.Utf8(), transactionCount: new BufferTypes.Uint16(), amount: new BufferTypes.Number64(), fee: new BufferTypes.Number64(), payloadHash: new BufferTypes.Utf8(), generatorPublicKey: new BufferTypes.Utf8(), signature: new BufferTypes.Utf8(), relay: new BufferTypes.Uint8(), transactions: new BufferTypes.Buffer(), }); schemaStore.add(SchemaName.Transaction, { id: new BufferTypes.Utf8(), type: new BufferTypes.Uint8(), createdAt: new BufferTypes.Uint32(), senderPublicKey: new BufferTypes.Utf8(), senderAddress: new BufferTypes.Uint64(), signature: new BufferTypes.Utf8(), secondSignature: new BufferTypes.Utf8(), fee: new BufferTypes.Number64(), salt: new BufferTypes.Utf8(), relay: new BufferTypes.Uint8(), confirmations: new BufferTypes.Uint32(), asset: new BufferTypes.Buffer() }); schemaStore.add(SchemaName.TransactionBlock, { id: new BufferTypes.Utf8(), blockId: new BufferTypes.Utf8(), type: new BufferTypes.Uint8(), createdAt: new BufferTypes.Uint32(), senderPublicKey: new BufferTypes.Utf8(), senderAddress: new BufferTypes.Uint64(), signature: new BufferTypes.Utf8(), secondSignature: new BufferTypes.Utf8(), fee: new BufferTypes.Number64(), salt: new BufferTypes.Utf8(), confirmations: new BufferTypes.Uint32(), asset: new BufferTypes.Buffer(), }); schemaStore.add(SchemaName.TransactionAssetRegister, { referral: new BufferTypes.Uint64() }); schemaStore.add(SchemaName.TransactionAssetSend, { recipientAddress: new BufferTypes.Uint64(), amount: new BufferTypes.Number64() }); schemaStore.add(SchemaName.TransactionAssetSignature, { publicKey: new BufferTypes.Utf8(), }); schemaStore.add(SchemaName.TransactionAssetDelegate, { username: new BufferTypes.Utf8() }); schemaStore.add(SchemaName.TransactionAssetStake, { amount: new BufferTypes.Number64(), startTime: new BufferTypes.Uint32(), startVoteCount: new BufferTypes.Uint8(), airdropReward: new BufferTypes.Object(SchemaName.AirdropReward) }); schemaStore.add(SchemaName.TransactionAssetVote, { votes: new BufferTypes.Array(new BufferTypes.Utf8()), reward: new BufferTypes.Number64(), unstake: new BufferTypes.Number64(), airdropReward: new BufferTypes.Object(SchemaName.AirdropReward) }); schemaStore.add(SchemaName.AirdropReward, { sponsors: new BufferTypes.Map(new BufferTypes.Uint64(), new BufferTypes.Number64()) }); schemaStore.add(SchemaName.CommonBlockResponse, { isExist: new BufferTypes.Boolean() }); schemaStore.add(SchemaName.RequestBlocks, { height: new BufferTypes.Uint32(), limit: new BufferTypes.Uint8() }); schemaStore.add(SchemaName.Empty, {});
/** * A single statement for the C preprocessor (CPP). This represents everything except #if etc. * * @author Adam */ public class CppStatement extends AbstractSyntaxElementNoNesting { /** * The type of preprocessor directive. */ public static enum Type { DEFINE, UNDEF, INCLUDE, PRAGMA, ERROR, WARNING, LINE, EMPTY } private @NonNull Type type; private @Nullable ICode expression; /** * Creates a {@link CppStatement}. * * @param presenceCondition The presence condition. * @param type The type of preprocessor directive used. * @param expression The text expression following the directive. <code>null</code> if there is none. */ public CppStatement(@NonNull Formula presenceCondition, @NonNull Type type, @Nullable ICode expression) { super(presenceCondition); this.type = type; this.expression = expression; } /** * De-serializes the given JSON to a {@link CodeElement}. This is the inverse operation to * {@link #serializeToJson(JsonObject, Function, Function)}. * * @param json The JSON do de-serialize. * @param deserializeFunction The function to use for de-serializing secondary nested elements. Do not use this to * de-serialize the {@link CodeElement}s in the primary nesting structure! * (i.e. {@link #getNestedElement(int)}) * * @throws FormatException If the JSON does not have the expected format. */ protected CppStatement(@NonNull JsonObject json, @NonNull CheckedFunction<@NonNull JsonElement, @NonNull CodeElement<?>, FormatException> deserializeFunction) throws FormatException { super(json, deserializeFunction); String typeString = json.getString("cppType"); this.type = Type.valueOf(typeString); if (json.getElement("cppExpression") != null) { this.expression = (ICode) deserializeFunction.apply(json.getObject("cppExpression")); } } @Override public @NonNull String elementToString(@NonNull String indentation) { String result = "#" + type + "\n"; ICode expression = this.expression; if (expression != null) { result += expression.toString(indentation + "\t"); } return result; } /** * Returns the type of preprocessor directive. * * @return The type of directive that this statement represents. */ public @NonNull Type getType() { return type; } /** * Returns the text expression following the directive. * * @return The text expression following the directive. <code>null</code> if there is none. */ public @Nullable ICode getExpression() { return expression; } @Override public void accept(@NonNull ISyntaxElementVisitor visitor) { visitor.visitCppStatement(this); } @Override protected int hashCode(@NonNull CodeElementHasher hasher) { return super.hashCode(hasher) + type.hashCode() + (expression != null ? hasher.hashCode((AbstractCodeElement<?>) expression) : 123); } @Override protected boolean equals(@NonNull AbstractCodeElement<?> other, @NonNull CodeElementEqualityChecker checker) { boolean equal = other instanceof CppStatement && super.equals(other, checker); if (equal) { CppStatement o = (CppStatement) other; if (this.expression != null && o.expression != null) { equal = this.type == o.type && checker.isEqual( (AbstractCodeElement<?>) this.expression, (AbstractCodeElement<?>) o.expression); } else { equal = this.type == o.type && this.expression == o.expression; } } return equal; } @Override public void serializeToJson(JsonObject result, @NonNull Function<@NonNull CodeElement<?>, @NonNull JsonElement> serializeFunction, @NonNull Function<@NonNull CodeElement<?>, @NonNull Integer> idFunction) { super.serializeToJson(result, serializeFunction, idFunction); result.putElement("cppType", new JsonString(notNull(type.name()))); if (expression != null) { result.putElement("cppExpression", serializeFunction.apply(expression)); } } }
<filename>wsjobd/wsjobd.py #!/usr/bin/env python2 # coding: utf-8 import logging import threading import time from collections import OrderedDict import psutil from geventwebsocket import Resource from geventwebsocket import WebSocketApplication from geventwebsocket import WebSocketError from geventwebsocket import WebSocketServer from pykit import threadutil from pykit import utfjson from pykit import jobq logger = logging.getLogger(__name__) MEM_AVAILABLE = 'mem_available' CPU_IDLE_PERCENT = 'cpu_idle_percent' CLIENT_NUMBER = 'client_number' JOBS_DIR = 'jobs' CHECK_LOAD_PARAMS = { 'mem_low_threshold': { 'load_name': MEM_AVAILABLE, 'default': 500 * 1024 ** 2, # 500M 'greater': True, }, 'cpu_low_threshold': { 'load_name': CPU_IDLE_PERCENT, 'default': 3, # 3% 'greater': True, }, 'max_client_number': { 'load_name': CLIENT_NUMBER, 'default': 1000, 'greater': False, }, } class SystemOverloadError(Exception): pass class JobError(Exception): pass class InvalidMessageError(JobError): pass class InvalidProgressError(InvalidMessageError): pass class LoadingError(JobError): pass class JobNotInSessionError(JobError): pass class Job(object): lock = threading.RLock() sessions = {} def __init__(self, channel, msg, func): self.ident = msg['ident'] self.channel = channel self.data = msg self.worker = func self.ctx = {} self.err = None self.progress_available = threading.Event() if self.ident in self.sessions: logger.info('job: %s already exists, created by chennel %s' % (self.ident, repr(self.sessions[self.ident].channel))) return else: self.sessions[self.ident] = self logger.info(('inserted job: %s to sessions by channel %s, ' + 'there are %d jobs in sessions now') % (self.ident, repr(self.channel), len(self.sessions))) self.thread = threadutil.start_thread(target=self.work, args=(), daemon=True) def work(self): logger.info("job %s started, the data is: %s" % (self.ident, self.data)) try: self.worker(self) except Exception as e: logger.exception('job %s got exception: %s' % (self.ident, repr(e))) self.err = e finally: logger.info('job %s ended' % self.ident) self.close() def close(self): with self.lock: del self.sessions[self.ident] logger.info(('removed job: %s from sessions, there are %d ' + 'jobs in sessions now') % (self.ident, len(self.sessions))) def get_or_create_job(channel, msg, func): with Job.lock: Job(channel, msg, func) job = Job.sessions.get(msg['ident']) return job def progress_sender(job, channel, interval=5, stat=None): stat = stat or (lambda data: data) data = job.data i = 10 try: while True: # if thread died due to some reason, still send 10 stats if not job.thread.is_alive(): logger.info('job %s died: %s' % (job.ident, repr(job.err))) if i == 0: channel.ws.close() break i -= 1 logger.info('jod %s on channel %s send progress: %s' % (job.ident, repr(channel), repr(stat(data)))) to_send = stat(data) if channel.report_system_load and type(to_send) == type({}): to_send['system_load'] = channel.get_system_load() channel.ws.send(utfjson.dump(to_send)) if job.progress_available.wait(interval): job.progress_available.clear() except WebSocketError as e: if channel.ws.closed == True: logger.info('the client has closed the connection') else: logger.exception(('got websocket error when sending progress on' + ' channel %s: %s') % (repr(channel), repr(e))) except Exception as e: logger.exception('got exception when sending progress on channel %s: %s' % (repr(channel), repr(e))) channel.ws.close() class JobdWebSocketApplication(WebSocketApplication): jobq_mgr = None def on_open(self): logger.info('on open, the channel is: ' + repr(self)) self.ignore_message = False def _parse_request(self, message): try: try: msg = utfjson.load(message) except Exception as e: raise InvalidMessageError( 'message is not a vaild json string: %s' % message) self._check_msg(msg) self.report_system_load = msg.get('report_system_load') == True self.cpu_sample_interval = msg.get('cpu_sample_interval', 0.02) if not isinstance(self.cpu_sample_interval, (int, long, float)): raise InvalidMessageError( 'cpu_sample_interval is not a number') check_load = msg.get('check_load') if type(check_load) == type({}): self._check_system_load(check_load) self.jobs_dir = msg.get('jobs_dir', JOBS_DIR) self._setup_response(msg) return except SystemOverloadError as e: logger.info('system overload on chennel %s, %s' % (repr(self), repr(e))) self._send_err_and_close(e) except JobError as e: logger.info('error on channel %s while handling message, %s' % (repr(self), repr(e))) self._send_err_and_close(e) except Exception as e: logger.exception(('exception on channel %s while handling ' + 'message, %s') % (repr(self), repr(e))) self._send_err_and_close(e) def on_message(self, message): logger.info('on message, the channel is: %s, the message is: %s' % (repr(self), message)) if self.ignore_message: return else: self.ignore_message = True self.jobq_mgr.put((self, message)) def _send_err_and_close(self, err): try: err_msg = { 'err': err.__class__.__name__, 'val': err.args, } self.ws.send(utfjson.dump(err_msg)) except Exception as e: logger.error(('error on channel %s while sending back error ' + 'message, %s') % (repr(self), repr(e))) def get_system_load(self): return { MEM_AVAILABLE: psutil.virtual_memory().available, CPU_IDLE_PERCENT: psutil.cpu_times_percent( self.cpu_sample_interval).idle, CLIENT_NUMBER: len(self.protocol.server.clients), } def _check_system_load(self, check_load): system_load = self.get_system_load() for param_name, param_attr in CHECK_LOAD_PARAMS.iteritems(): param_value = check_load.get(param_name, param_attr['default']) if not isinstance(param_value, (int, long, float)): raise InvalidMessageError('%s is not a number' % param_name) load_name = param_attr['load_name'] diff = system_load[load_name] - param_value if not param_attr['greater']: diff = 0 - diff if diff < 0: raise SystemOverloadError( '%s: %d is %s than: %d' % (load_name, system_load[load_name], param_attr['greater'] and 'less' or 'greater', param_value)) def _check_msg(self, msg): if type(msg) != type({}): raise InvalidMessageError("message is not dictionary") if 'ident' not in msg: raise InvalidMessageError("'ident' is not in message") if 'func' not in msg: raise InvalidMessageError("'func' is not in message") def _setup_response(self, msg): func = self._get_func_by_name(msg) channel = self job = get_or_create_job(channel, msg, func) if job is None: raise JobNotInSessionError( 'job not in sessions: ' + repr(Job.sessions)) progress = msg.get('progress', {}) if progress in (None, False): return if type(progress) != type({}): raise InvalidProgressError( 'the progress in message is not a dictionary') interval = progress.get('interval', 5) progress_key = progress.get('key') if progress_key is None: lam = lambda r: r else: lam = lambda r: r.get(progress_key) threadutil.start_thread(target=progress_sender, args=(job, channel, interval, lam), daemon=True) def _get_func_by_name(self, msg): mod_func = self.jobs_dir.split('/') + msg['func'].split('.') mod_path = '.'.join(mod_func[:-1]) func_name = mod_func[-1] try: mod = __import__(mod_path) except (ImportError, SyntaxError) as e: raise LoadingError('failed to import %s: %s' % (mod_path, repr(e))) for mod_name in mod_path.split('.')[1:]: mod = getattr(mod, mod_name) logger.info('mod imported from: ' + repr(mod.__file__)) try: func = getattr(mod, func_name) except AttributeError as e: raise LoadingError("function not found: " + repr(func_name)) return func def on_close(self, reason): logger.info('on close, the channel is: ' + repr(self)) def _parse_request(args): app, msg = args app._parse_request(msg) def run(ip='127.0.0.1', port=63482, jobq_thread_count=10): JobdWebSocketApplication.jobq_mgr = jobq.JobManager( [(_parse_request, jobq_thread_count)]) WebSocketServer( (ip, port), Resource(OrderedDict({'/': JobdWebSocketApplication})), ).serve_forever()
Tetramethylammonium hydroxide (TMAH) thermochemolysis for probing in situ softwood lignin modification in each gut segment of the termite. Termites are highly effective in lignocellulose degradation; however, the process of lignin deconstruction along the alimentary canal is not well understood. In this study, the wood metabolites in each gut segment were tentatively analyzed using pyrolysis-gas chromatography-mass spectrometry in the presence of tetramethylammonium hydroxide. Collectively, the significant differences in the pyrolysate distribution among each sample established conservation of the major -O-4' bonds of lignin during termite digestion, although a selective lignin substructure modification was observed across the whole gut; initiation of lignin-polysaccharide dissociation, aliphatic oxidation/carboxylation, phenolic dehydroxylation in the foregut, and linkage modification of the 5-5', -5', and -1' substructures; the continuation of foregut reactions into the midgut with further phenolic carboxylation/demethoxylation/carbonylation; and phenolic/aliphatic esterifications in the hindgut. Overall, elucidation of the stepwise lignin unlocking mechanism in termites provides a valuable insight for understanding plant cell wall structure and its recalcitrance.
<reponame>spacerovka/jShop<filename>src/main/java/shop/main/controller/admin/AdminDiscountController.java package shop.main.controller.admin; import static shop.main.controller.admin.AdminController.ADMIN_PREFIX; import static shop.main.controller.admin.AdminController.MANAGER_PREFIX; import java.util.ArrayList; import java.util.Arrays; import java.util.stream.Collectors; import javax.servlet.http.HttpServletRequest; import javax.validation.Valid; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.PageRequest; import org.springframework.data.domain.Pageable; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.validation.BindingResult; import org.springframework.web.bind.annotation.ModelAttribute; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.servlet.mvc.support.RedirectAttributes; import shop.main.data.entity.Discount; import shop.main.data.service.DiscountService; @Controller @RequestMapping(value = { ADMIN_PREFIX, MANAGER_PREFIX }) public class AdminDiscountController extends AdminController { @Autowired DiscountService discountService; @RequestMapping(value = "/discounts") public String discountsList(Model model) { loadTableData("", null, 1, PAGE_SIZE, model); return "../admin/discounts/discounts"; } @RequestMapping(value = "/findDiscounts", method = RequestMethod.POST) public String findDiscounts(@RequestParam String name, @RequestParam String status, @RequestParam(value = "current", required = false) Integer current, @RequestParam(value = "pageSize", required = false) Integer pageSize, Model model) { loadTableData(name, status, current, pageSize, model); return "../admin/discounts/_table"; } private void loadTableData(String name, String status, Integer current, Integer pageSize, Model model) { Pageable pageable = new PageRequest(current - 1, pageSize); model.addAttribute("discountList", discountService.findByNameAndStatus(name, status, pageable)); model.addAttribute("current", current); model.addAttribute("pageSize", pageSize); addPaginator(model, current, pageSize, discountService.countByNameAndStatus(name, status)); } @RequestMapping(value = "/discount", method = RequestMethod.POST) public String saveDiscount(@ModelAttribute("discount") @Valid Discount discount, BindingResult result, Model model, final RedirectAttributes redirectAttributes, HttpServletRequest request) { if (result.hasErrors()) { model.addAttribute("errorSummary", result.getFieldErrors().stream() .map(e -> e.getField() + " error - " + e.getDefaultMessage() + " ").collect(Collectors.toList())); return "../admin/discounts/edit_discount"; } else { if (discount.isNew()) { redirectAttributes.addFlashAttribute("flashMessage", "Discount added successfully!"); if (discountService.notUniqueCoupon(discount.getCoupon())) { model.addAttribute("errorSummary", (new ArrayList<String>(Arrays.asList("Coupon code must be unique!")))); return "../admin/discounts/edit_discount"; } } else { redirectAttributes.addFlashAttribute("flashMessage", "Discount updated successfully!"); } discountService.save(discount); return "redirect:" + getUrlPrefix(request) + "discounts"; } } @RequestMapping(value = "/discount/add", method = RequestMethod.GET) public String addDiscount(Model model) { model.addAttribute("discount", new Discount()); return "../admin/discounts/edit_discount"; } @RequestMapping(value = "/discount/{id}/update", method = RequestMethod.GET) public String editDiscount(@PathVariable("id") long id, Model model) { model.addAttribute("discount", discountService.findById(id)); return "../admin/discounts/edit_discount"; } @RequestMapping(value = "/discount/{id}/delete", method = RequestMethod.GET) public String deleteDiscount(@PathVariable("id") long id, Model model, final RedirectAttributes redirectAttributes, HttpServletRequest request) { discountService.deleteById(id); redirectAttributes.addFlashAttribute("flashMessage", "Discount deleted successfully!"); return "redirect:" + getUrlPrefix(request) + "discounts"; } }
import torch import numpy as np from torch.nn import functional as F from medcam import medcam_utils class _BaseWrapper(): def __init__(self, model, postprocessor=None, retain_graph=False): """A base wrapper of common functions for the backends.""" self.device = next(model.parameters()).device self.retain_graph = retain_graph self.model = model self.forward_handlers = [] self.backward_handlers = [] self.postprocessor = postprocessor self.registered_hooks = {} def generate_attention_map(self, batch, label): """Handles the generation of the attention map from start to finish.""" output = self.forward(batch) self.backward(label=label) attention_map = self.generate() return output, attention_map, self.output_batch_size, self.output_channels, self.output_shape def forward(self, batch): """Calls the forward() of the model.""" self.model.zero_grad() self.logits = self.model.model_forward(batch) self._extract_metadata(batch, self.logits) self._set_postprocessor_and_label(self.logits) self.remove_hook(forward=True, backward=False) return self.logits def backward(self, label=None): """Applies postprocessing and class discrimination on the model output and then backwards it.""" if label is None: label = self.model.medcam_dict['label'] self.mask = self._isolate_class(self.logits, label) self.logits.backward(gradient=self.mask, retain_graph=self.retain_graph) self.remove_hook(forward=True, backward=True) def _isolate_class(self, output, label): """Isolates a desired class on the channel dim by creating a mask that is applied on the gradients during backward.""" if label is None: return torch.ones(output.shape).to(self.device) if label == "best": if self.output_batch_size > 1: raise RuntimeError("Best label mode works only with a batch size of one. You need to choose a specific label or None with a batch size bigger than one.") B, C, *data_shape = output.shape if len(data_shape) > 0: _output = output.view(B, C, -1) _output = torch.sum(_output, dim=2) label = torch.argmax(_output, dim=1).item() else: label = torch.argmax(output, dim=1).item() if callable(label): mask = label(output) * 1.0 print(mask.dtype) else: mask = torch.zeros(output.shape).to(self.device) mask[:, label] = 1 return mask def _extract_metadata(self, input, output): # TODO: Does not work for classification output (shape: (1, 1000)), merge with the one in medcam_inject """Extracts metadata like batch size, number of channels and the data shape from the output batch.""" self.input_dim = len(input.shape[2:]) self.output_batch_size = output.shape[0] if self.model.medcam_dict['channels'] == 'default': self.output_channels = output.shape[1] else: self.output_channels = self.model.medcam_dict['channels'] if self.model.medcam_dict['data_shape'] == 'default': if len(output.shape) == 2: # Classification -> Cannot convert attention map to classifiaction self.output_shape = None else: # Output is an 2D/3D image self.output_shape = output.shape[2:] else: self.output_shape = self.model.medcam_dict['data_shape'] def _normalize_per_channel(self, attention_map): if torch.min(attention_map) == torch.max(attention_map): return torch.zeros(attention_map.shape) # Normalization per channel B, C, *data_shape = attention_map.shape attention_map = attention_map.view(B, C, -1) attention_map_min = torch.min(attention_map, dim=2, keepdim=True)[0] attention_map_max = torch.max(attention_map, dim=2, keepdim=True)[0] attention_map -= attention_map_min attention_map /= (attention_map_max - attention_map_min) attention_map = attention_map.view(B, C, *data_shape) return attention_map def generate(self): """Generates an attention map.""" raise NotImplementedError def remove_hook(self, forward, backward): """ Remove all the forward/backward hook functions """ if forward: for handle in self.forward_handlers: handle.remove() self.forward_handlers = [] if backward: for handle in self.backward_handlers: handle.remove() self.backward_handlers = [] def layers(self, reverse=False): """Returns the layers of the model. Optionally reverses the order of the layers.""" return medcam_utils.get_layers(self.model, reverse) def _set_postprocessor_and_label(self, output): if self.model.medcam_dict['label'] is None: if output.shape[0] == self.output_batch_size and len(output.shape) == 2: # classification self.model.medcam_dict['label'] = "best"
import { Injectable } from '@nestjs/common'; import { Task, TasksStatus } from './tasks.model'; import * as uuid from 'uuid/v1'; @Injectable() export class TasksService { private tasks:Task[]=[]; getAllTasks():Task[]{ return this.tasks; }; getTaskById(id:string):Task{ return this.tasks.find(task=> task.id === id); }; createTask(createTaskDto):Task{ const {title,description}=createTaskDto; const task:Task={ id:uuid(), title, description, status:TasksStatus.OPEN } this.tasks.push(task); return task; }; deleteTask(id:string):void{ this.tasks = this.tasks.filter(task=> task.id !==id); }; updateTaskStatus(id:string,status:TasksStatus){ const task=this.getTaskById(id); task.status=status; return task; } }
package com.creditsuisse.eventfilereader.model; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; @Entity public class Event { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private Long id; @Column(nullable = false) private String idevent; @Column(nullable = false) private Long duration; @Column(nullable = true) private String eventtype; @Column(nullable = true) private String host; @Column(nullable = true) private boolean alert; public Long getId() { return id; } public void setId(Long id) { this.id = id; } public String getIdevent() { return idevent; } public void setIdevent(String idevent) { this.idevent = idevent; } public Long getDuration() { return duration; } public void setDuration(Long duration) { this.duration = duration; } public String getEventtype() { return eventtype; } public void setEventtype(String eventtype) { this.eventtype = eventtype; } public String getHost() { return host; } public void setHost(String host) { this.host = host; } public boolean isAlert() { return alert; } public void setAlert(boolean alert) { this.alert = alert; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (alert ? 1231 : 1237); result = prime * result + ((duration == null) ? 0 : duration.hashCode()); result = prime * result + ((eventtype == null) ? 0 : eventtype.hashCode()); result = prime * result + ((host == null) ? 0 : host.hashCode()); result = prime * result + ((id == null) ? 0 : id.hashCode()); result = prime * result + ((idevent == null) ? 0 : idevent.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; Event other = (Event) obj; if (alert != other.alert) return false; if (duration == null) { if (other.duration != null) return false; } else if (!duration.equals(other.duration)) return false; if (eventtype == null) { if (other.eventtype != null) return false; } else if (!eventtype.equals(other.eventtype)) return false; if (host == null) { if (other.host != null) return false; } else if (!host.equals(other.host)) return false; if (id == null) { if (other.id != null) return false; } else if (!id.equals(other.id)) return false; if (idevent == null) { if (other.idevent != null) return false; } else if (!idevent.equals(other.idevent)) return false; return true; } @Override public String toString() { return "Event [id=" + id + ", idevent=" + idevent + ", duration=" + duration + ", eventtype=" + eventtype + ", host=" + host + ", alert=" + alert + "]"; } }
Genomic measures of inbreeding coefficients and genome-wide scan for runs of homozygosity islands in Iranian river buffalo, Bubalus bubalis Background Consecutive homozygous fragments of a genome inherited by offspring from a common ancestor are known as runs of homozygosity (ROH). ROH can be used to calculate genomic inbreeding and to identify genomic regions that are potentially under historical selection pressure. The dataset of our study consisted of 254 Azeri (AZ) and 115 Khuzestani (KHZ) river buffalo genotyped for ~65,000 SNPs for the following two purposes: 1) to estimate and compare inbreeding calculated using ROH (FROH), excess of homozygosity (FHOM), correlation between uniting gametes (FUNI), and diagonal elements of the genomic relationship matrix (FGRM); 2) to identify frequently occurring ROH (i.e. ROH islands) for our selection signature and gene enrichment studies. Results In this study, 9102 ROH were identified, with an average number of 21.2±13.1 and 33.2±15.9 segments per animal in AZ and KHZ breeds, respectively. On average in AZ, 4.35% (108.8±120.3Mb), and in KHZ, 5.96% (149.1±107.7Mb) of the genome was autozygous. The estimated inbreeding values based on FHOM, FUNI and FGRM were higher in AZ than they were in KHZ, which was in contrast to the FROH estimates. We identified 11 ROH islands (four in AZ and seven in KHZ). In the KHZ breed, the genes located in ROH islands were enriched for multiple Gene Ontology (GO) terms (P ≤ 0.05). The genes located in ROH islands were associated with diverse biological functions and traits such as body size and muscle development (BMP2), immune response (CYP27B1), milk production and components (MARS, ADRA1A, and KCTD16), coat colour and pigmentation (PMEL and MYO1A), reproductive traits (INHBC, INHBE, STAT6 and PCNA), and bone development (SUOX). Conclusion The calculated FROH was in line with expected higher inbreeding in KHZ than in AZ because of the smaller effective population size of KHZ. Thus, we find that FROH can be used as a robust estimate of genomic inbreeding. Further, the majority of ROH peaks were overlapped with or in close proximity to the previously reported genomic regions with signatures of selection. This tells us that it is likely that the genes in the ROH islands have been subject to artificial or natural selection. Background There are two main species of buffalo: the Asian water buffalo (Bubalus bubalis) and the African wild buffalo (Syncerus caffer), the second of which is also known as the cape buffalo. Domestication of B. bubalis, including of the river (B. bubalis bubalis, 2n = 50) and swamp (B. bubalis carabanensis, 2n = 48) subspecies, occurred approximately 3000-6000 years ago. The domestication of river buffalo occurred in the Indo-Pakistani area, and domestication of swamp buffalo occurred close to the border of China. River buffalo expanded broadly from India, Egypt and Southeast Asia to Europe, and the swamp buffalo is the most common type of buffalo in China and Southeast Asia. The worldwide water buffalo population accounts for only approximately 11% of the entire cattle population. However, the population of water buffalo has increased in the past five decades by approximately 1.65% annually. India, Pakistan and Europe (with 5.3, 4.8 and 4.5% rates of increase, respectively) have the highest rates of annual increase. In many tropical and subtropical countries, river buffalo are raised for both meat and milk production. In Iran and in other developing nations, river buffalo production is of great economic importance because of the ability of buffalo to make the best use of low-quality feed in the production of their valuable milk, which has a unique taste and curd properties, high resistance to local parasites, high adaptation to harsh climate conditions, and long productive lifespan. The three major Iranian river buffalo breeds are Azeri (AZ), Khuzestani (KHZ) and Mazandarani (MZ), and each of these breeds belongs to different geographical zone. The AZ, KHZ and MZ are common in the north-west and north, west and south-west, and north of the country, respectively. In Iran, the recording of milk and meat production, and the selective breeding of buffalo for better dairy performance (i.e. in milk production, and fat and protein percentage) and better meat production are performed by the Animal Breeding Centre of Iran (ABCI). Following performance and pedigree recording in some herds, and genetic analysis, candidate bulls are selected from rural herds based on their genetic merits, and the semen of these selected bulls is collected and distributed to all herds. However, despite buffalo production being important in Iran, particularly in rural regions, controlling inbreeding and ensuring genetic improvement of desired traits through traditional breeding programmes are difficult because of a shortage of reliable pedigree and performance records for water buffalo in the country. The inbreeding coefficient measured from pedigree information (F PED ) has been the most common parameter for describing the level of inbreeding since Wright However, the reliability of the estimated F PED depends on the completeness and correctness of pedigree. With the availability of high-density SNP-chip markers, inbreeding can also be defined according to genomic information such as genome-wide autozygosity Autozygosity occurs when parents pass identical chromosomal fragments, which they already inherited from a common ancestor, on to their offspring ; these genomic regions of homozygosity are known as runs of homozygosity (ROH). Estimated inbreeding based on ROH (F ROH ) can discriminate between homozygous (i.e. identical by descent ) and non-autozygous (i.e. identical by state ) positions in the genome. Further, well-recorded pedigree information is not required to have reliable F ROH. Thus, using genetic markers instead of pedigree information to calculate inbreeding can produce more robust estimates. Identifying ROH can also help to find the footprints of genetic selection on the genome. However, ROH are suggestive, but not conclusive, of genomic regions under natural or artificial selection because the incidence, extent and distribution of ROH across the genome are influenced by many factors other than ROH, such as recombination rate, population structure, mutation rate and inbreeding. Nevertheless, ROH that frequently occur among individuals may contain genes associated with different traits that have been under historical selection, so that the genes located in ROH islands can be important for selective breeding. ROH can also provide detailed information on the genetic relatedness of animals, which allows breeders to better control inbreeding in the population. This allows mate allocation aiming to minimise inbreeding at the genome level to be achieved more precisely, and the individual animals that have high proportions of ROH coverage to be excluded or used less frequently in mating. The distribution and the occurrence of ROH have been studied in humans, cattle, pigs and sheep [17, but are poorly studied in some species, for example, in water buffalo. The current study aims to estimate autozygosity in the genome of AZ and KHZ buffalo breeds, and identify ROH spots that frequently occur among the individuals. The study also examines the function of the genes located in ROH islands to identify potential selection signature regions. Moreover, the study compares F ROH with other genomic methods of inbreeding estimation. Runs of homozygosity The AZ and KHZ are two major buffalo breeds adapted to distinct geographical areas in Iran (Fig. 1). The PC analysis of the IBS matrix derived from SNP data confirmed two separate populations with no overlap, which means that the samples from the AZ and KHZ breeds were genetically different (Fig. 2). Although Mokhber et al. reported that AZ and KHZ are two distinct populations, they reported a moderate level of admixture between the AZ and MZ breeds. Thus, we excluded the MZ breed from our study. In total, 9102 ROH were detected, 5352 ROH in the AZ genome and 3750 in the KHZ genome (Table 1; Additional file 1). The average number of ROH per individual was 21.23 ± 13.06 in the AZ breed (ranging from 4 to 88) and 33.2 ± 15.92 in the KHZ breed (ranging from 4 to 132). Moreover, all of the individuals in our study had at least four ROH longer than 1 Mb. The variation between samples in total number of ROH and total length of ROH are presented in Fig. 3. Individuals with an almost equal portion of the genome covered by ROH had different numbers and lengths of ROH, which could be an indication of different combinations of recent and distant inbreeding events in the samples. Table 2 presents the averages of the estimated inbreeding coefficients using different methods (see also Additional file 2). The average F ROH calculated from ROH > 1 Mb in length was 0.043 ± 0.05 in the AZ breed and 0.059 ± 0.04 in the KHZ breed (Table 2; Additional file 1). The estimated inbreeding values based on F HOM, F UNI and F GRM were higher in AZ than they were in KHZ, which was in contrast to the F ROH estimates. However, the Pearson's correlations between F ROH and the estimated inbreeding with other methods were high (Table 3). Candidate genes inside frequently occurring runs of homozygosity regions A genome-wide search for SNPs that have frequently occurred within ROH hotspots revealed 11 regions on BTA1, BTA2, BTA5, BTA7, BTA13, BTA14, BTA19 and BTA29 ( Fig. 4; Additional file 3). The detected ROH islands on BTA7, BTA13 and BTA14 were partially Co-location of ROH islands and the identified selection signatures using iHS The majority of ROH hotspots detected in our study ( Fig. 4; Additional file 3) overlapped with selection signature regions reported by Mokhber et al. for the AZ and KHZ breeds using the haplotype-based method (i.e. iHS) (Additional files 5 and 6). For example, the SNP Affx-79610232 on BTA5 (55,271,590 bp) with the highest iHS was located in our detected ROH island in Discussion We defined ROH as the lengths of homozygous genotypes that were > 1 Mb and contained only up to one heterozygous genotype. Given the strong linkage disequilibrium (LD) between SNPs with a distance up to 100 Kb, short homozygous haplotypes are expected to be prevalent in the buffalo genome. Thus, we set a minimum length of 1 Mb and a minimum number of 40 (AZ) and 38 (KHZ) SNP (as described in methods section) to avoid detecting small and prevalent haplotypes as ROH. Unlike human populations, livestock species generally have higher levels of autozygosity and longer ROHs. However, genotyping errors can always affect the quality of ROH calling. Therefore, we allowed one heterozygous SNP in ROH to avoid losing particularly long ROH because of a single genotyping error. As presented in Table 1, more than 53% of the detected ROH were 2-4 Mb in length. The proportion of different lengths of ROH can be used as an indicator of the number of past generations in which inbreeding has occurred, because the recombination events can rearrange the chromosomes and reduce the length of ROH. Thus, recent inbreeding results in longer ROH because of long IBD stretches. In contrast, short ROHs arise as a result of ancient inbreeding because in meiosis across generations, the long IBD segments are broken down. We detected ROH with a length from 2 to 4 Mb in all of the samples (Additional file 1), which might indicate that some inbreeding events occurred about 20 generations ago. However, our results should be interpreted with caution. As reported by Ferenakovi et al., a medium-density chip could result in overestimation of the number of long-length ROHs (> 4 Mb), probably because some heterozygous genotypes tend to appear in these ROHs by increasing the density of markers. Nevertheless, our results were in line with a previous report of a relatively sharp decrease in the effective population size (N e ) of AZ and KHZ breeds and the consequent increased rate of inbreeding since 20 generations ago. The portion of the genome that was autozygote in the AZ and KHZ breeds was lower than the reported ROH coverage in the Marchigiana beef breed (7%), Austrian dual purpose breeds (9%), and Holstein cattle (10%). This could be because of lower inbreeding in Iranian water buffalo or because we ignored ROH of < 1 Mb in length in our study. On average, F HOM, F UNI and F GRM were higher in AZ than they were in KHZ. However, the previously reported N e for AZ was larger than it was for KHZ. Therefore, we expected a lower inbreeding level in AZ. The only comparable estimated inbreeding with our expectation was F ROH, which showed lower inbreeding for AZ (0.043) than for KHZ (0.059). The highest correlation was observed between F UNI and F ROH (AZ = 0.98 and KHZ = 0.94). Literature has reported different correlation coefficients between F UNI and F ROH (0.15-0.80), between F HOM and F ROH (0.06-0.95), and between F GRM and F ROH (0.17-0.81). The considerable variation among different studies may be because of a strong dependency of F HOM, F UNI and F GRM on allelic frequencies. The F PED of 0.03 previously reported in Iranian buffalo was lower than the estimated F ROH in the current study. Given that pedigree data were not available for our study, we could not calculate F PED and compare it with F ROH. However, previous studies reported moderate to high (0.47-0.82) and low to moderate (0.12-0.76) correlations between F PED and F ROH in cattle and sheep, respectively. A low to moderate correlation between F PED and F ROH was also reported by Peripolli et al. in Gyr cattle, suggesting that F PED may not accurately capture small IBD segments that result from ancient inbreeding. Further, accurate and in-depth pedigree records are required to measure F PED. Additionally, methods based on allelic frequency have demonstrated considerable variation among different breeds. Given that ROH does not depend on allele frequencies, and can capture recent and ancient inbreeding, it seems to be a suitable method for measuring inbreeding. The total length of ROH islands were about 6 and 15 Mb in the AZ and KHZ breeds, respectively (Additional file 3). Consequently, fewer genes were identified in ROH islands in the AZ breed than in the KHZ breed; that is probably why the genes located in ROH islands of the AZ breed were not enriched in any GO terms (P > 0.05). In the KHZ breed, however, the genes located in the ROH islands were significantly enriched (P ≤ 0.05) in 40 GO terms (Additional file 4). These GO terms belonged to 23 biological processes (BP), 12 cellular component (CC) and 5 molecular function (MF) groups. In this paper, we focused principally on the GO terms that include the genes with known large effects on important traits in livestock. Five genes were identified with positive regulation of DNA metabolic development (GO:0051054) in the BP group. Among these genes, STAT6 (signal transducer Fig. 4 Manhattan plot of the distribution of frequently occurring runs of homozygosity (ROH) in Azeri (AZ) and Khuzestani (KHZ) Iranian water buffalo breeds. The X-axis shows the distribution of ROH over the genome, and the Y-axis shows the percentage of ROH shared among animals within each breed. The significance threshold of 20% (less than 1% of all SNPs) shown as a blue line is used for detecting ROH islands (green arrows) and activator of transcription 6, on BTA5) has been reported to have large effects on the growth efficiency and the quality of carcass in cattle. Additionally, using co-expression network analysis, Nguyen et al.. reported the critical role of STAT6, PBX2 (PBX homeo-box2) and PBRM1 (Protein polybromo1) as transcription factors in regulating pubertal development in Brahman heifers. Twelve genes in ROH islands were associated with lipid metabolic process (GO:0006629) in the BP group. Of these genes, BMP2 (bone morphogenetic protein 2, on BTA13) plays a major role in rebuilding hair follicles in goats. Further, BMP2 in porcine, cattle and sheep has been reported to have an influence on regulating body size and muscle development. Kim et al. found several signatures of selection containing genes such as BMP2 associated with body size and development in goats and sheep native to Egypt. These researchers concluded that the genes influencing body size may be important in regulating adaptation to hot, arid habitats because efficiency in thermoregulation can be associated with body size. Supporting their conclusion is the fact that most breeds in tropical zones have smaller body size than breeds in temperate zones because tropical breeds can regulate their body temperature more efficiently. However, other factors that differ between temperate and arid zones may also contribute to variations in the body sizes of breeds living in different climates. CYP27B1 (cytochrome P450 family 27 subfamily B member 1) located on BTA5 was also one of the genes enriched in the lipid metabolic process (GO:0006629). This gene is important for making 1--hydroxylase, which is required in vitamin D bio-activation, and has been reported to be up-regulated as a result of bacterial infection, suggesting that this gene plays a role in modulating innate immune responses. In ROH islands on BTA13, three genes were associated with the positive regulation of DNA replication (GO: 0045740) in the BP group. Proliferating cell nuclear antigen (PCNA) has been reported to be associated with follicular development and growth in buffalo ovaries, and may therefore be related to fertility performance. Single-organism cellular process (GO:0044763) with 64 genes was significantly enriched (P = 0.05) in ROH islands, including MARS (methionyl-tRNA synthetase, on BTA5), and ADRA1D (adrenoceptor alpha 1D, on BTA13). MARS has been reported to influence milk and protein production in Chinese and Portuguese Holstein cattle, and ADRA1D largely affects milk protein in Murrah dairy buffalo. INHBC and INHBE (inhibin beta C and E subunits, on BTA5) have been reported as candidate genes associated with reproductive performance in tropical young bulls, and composite reproductive traits in Lori-Bakhtiari sheep. KCTD16 (potassium channel tetramerization domain containing 16, on BTA7) was reported as a candidate gene for meat quality in Simmental beef cattle, for residual feed intake in Junmu White pigs, and for fat yield in Nordic Holstein cattle. PMEL (premelanosome protein) and MYO1A (myosin IA) on BTA5 have been reported as putative candidate genes related to coat colour phenotypes in cattle. PMEL is required for the melanin biosynthesis process in the pigmentation of hair, mucous membranes and eyes. In cattle, PMEL is reported as a candidate gene associated with the dilution of coat colour and consequently colour intensity. Light coat colouring can be beneficial for animals in adapting to hot climates because it can help them to reduce sunlight absorption. However, most of the AZ and KHZ buffalo have a dark coat, which could be a result of some other favourable traits associated with a darker coat colour or the result of artificial selection caused by human interference. SUOX (Sulphite oxidase, on BTA5), within this BP category, was reported to be associated with bone development in cattle. The average LD (r 2 ) between adjacent SNPs in ROH islands was higher than the r 2 of adjacent SNPs located on the same chromosome (Additional file 3). Thus, the recombination rates in the ROH islands were lower than those in the rest of the genome. These results are in line with some previous studies. However, a moderate recombination rate has been reported between the SNPs in ROH islands in Valle del Belice sheep. Additionally, ROH hotspots can result from a wide range of underlying causes such as inbreeding and selection. Peripolli et al. argued that the high LD observed in most ROH hotspots is not necessarily caused by selection or conserved IBD haplotypes, but can be an indication of a lower recombination rate in those regions. Nevertheless, most of the ROH hotspots in our study overlapped with selection signature regions found with iHS, which supports the theory that ROH can be used to find genomic regions that have been under natural and/or artificial selection. Buffalo species have a relatively lower heat tolerance capability than some other livestock species because of their inadequately dispersed sweat glands and their dark coat colour. However, Iranian buffalo breeds have historically been raised in a hot climate. Therefore, selection for higher heat tolerance may have occurred in Iranian buffalo for better adaptation to heat stress. It has been reported that combined networks of multiple genes are often involved in the regulation of complex traits such as adaptation to hot climates. Thus, selection for complex traits would leave only minor footprints because of the selection for numerous regions with lower intensity across the genome. Therefore, we expected to find several genes directly or indirectly influencing different traits that were under artificial selection or important for adaptation and survival in hot areas. We found genes influencing energy and digestive metabolism (KCTD16), autoimmune response (CYP27B1), thermoregulation (BMP2), embryonic development and reproduction (STAT6, PCNA, INHBC and INHBE). These genes seem to be important for species such as water buffalo that have adapted to a hot climate. Conclusion The inbreeding coefficients based on F HOM, F UNI and F GRM were higher in the AZ breed than they were in the KHZ breed, which contradicted our expectations according to higher N e in AZ breed. Given that F ROH was the only measurement of inbreeding in our study that showed KHZ water buffalo were more inbred, this measurement seems to be a suitable measure of genomic inbreeding. This is most likely because it is less affected by allele frequencies. Further, knowing the distribution of ROH across the genome, inbreeding can be avoided more efficiently through mating allocation. Additionally, frequently occurring ROH can be used as suggestive evidence of historical selection. In our study, we found some overlap between ROH islands and genomic regions showing signatures of selection in previous studies of AZ and KHZ breeds. Therefore, the genes located in ROH islands could be under the influence of artificial and/or natural selection. We found that the genes located in ROH islands were associated with biological pathways such as adaptation to a hot climate, immune response, milk production, growth efficiency, reproduction performance and bone development. Sample collection, ethical statement, and data quality control Hair roots and blood samples were obtained from 112 herds of AZ and 47 herds of KHZ breeds. Samples of the AZ breed were gathered from East and West Azerbaijan, Gilan and Ardabil (37.02°-38.78°N, 44.81°4 9.52°E), which are north-western provinces of Iran. Samples of the KHZ breed were obtained from Kermanshah (34.54°N, 45.60°E) and Khuzestan (30.68-32.55°N, 48.02°-48.97°E), which are the south and south-western provinces of Iran, respectively (Fig. 1). All practices relating to data collection were reviewed and confirmed by the research ethics committee of the College of Agriculture and Natural Resources of the University of Tehran, Iran and by the ABCI. Three hundred and sixty-nine buffalo (254 AZ and 115 KHZ) were genotyped using 90 K SNPChip (Axiom® Buffalo 90 K Genotyping Array), which consisted of 89,988 almost evenly distributed SNPs throughout the genome. The same dataset was previously used by Mokhber et al., and it partially overlapped with the dataset used by Colli et al. and by Fallahi et al.. The SNPs in the 90 K SNPChip were selected using buffalo DNA sequence, but similar to methods used in previous studies [1,5,33,, were reported according to the location on the cattle reference genome assembly (UMD3.1 ) Although chromosome-level assembly of the water buffalo genome (UOA_WB_1) has been published recently, we used the UMD3.1 assembly in our study because it is more reliable and has better gene annotation information. Genotypes were obtained through AffyPipe, and all the monomorphic and polymorphic SNPs with high resolution (n = 64,750) were stored. According to the filtration criteria, samples with more than 5% missing genotype and SNPs with 5% missing rate were eliminated from further analyses. We also filtered out SNPs with unidentified position in the UMD3.1 assembly, positioned on the sex chromosomes, with minor allele frequency of < 2%, and with p-value for the Hardy-Weinberg equilibrium chi-square test < 10 − 6. In total, 62,122 SNPs and 369 samples with an average call rate of 99.6% passed the quality-control filters. Genetic distance between breeds Genetic distance, which is based on the IBS matrix, was estimated through the --ibs-matrix command in PLINK v1.9. Principal component (PC) analysis of genetic distances was performed to visualise the genetic diversity of the samples, and was depicted using R (http://www.Rproject.org/). According to the first and second PCs, we removed four samples: two from each breed that were placed outside their expected breed cluster. Runs of homozygosity analyses ROH can be detected in the genome through two main approaches: 1) genotype-counting algorithms in which the genome is scanned to identify long stretches of consecutive homozygous genotypes like the one implemented in PLINK v1.9, and 2) model-based methods that utilize Hidden Markov Models (HMM) like the one implemented in RzooRoH. This package can enable a better assessment of the contribution of various generations to the current level of inbreeding, estimating inbreeding at both genome-wide and local scales, and classifying homozygous-by-descent (HBD) segments into age-based classes. However, we used PLINK in our study because of the simplicity of running the sliding-window approach to detect ROH with sufficiently high assurance. A genome scan for ROH was conducted for the AZ (n = 252) and KHZ (n = 113) breeds, separately. For each individual, ROH segments with the following attributes were identified: 1) each ROH stretch was at least 1 Mb in length; 2) there was at the most only one heterozygous and one missing SNP in each ROH; 3) there was a minimum number of SNPs that could form ROH in each breed, calculated according to Eq. 1 to control the false positive rate of the identified ROH. l log e n a n s log e 1−het where l is the minimum number of SNPs in ROH, is the false positive rate of the identified ROH (set at 0.5); n a and n s are the number of individuals and the number of SNPs per individual, respectively; and het is the average heterozygosity across individuals. l was calculated to be 40 and 38 in AZ and KHZ breeds, respectively; 4) each ROH contained at least one SNP over 100 Kb; and 5) the maximum gap between two neighbouring SNPs in ROH had to be less than 1 Mb. The ROH that had these five attributes were divided into the following five groups: 1-2, 2-4, 4-8, 8-16 and > 16 Mb, as suggested in the literature. Then for each breed, the frequency and the average length (Mb) of ROH within each category, the percentage of each ROH category, and the percentage of genome coverage by each ROH category were calculated, using R (http://www.R-project.org/). Inbreeding coefficient estimations The coefficient of inbreeding was estimated using ROH (F ROH ), excess of homozygosity (F HOM ), correlation between uniting gametes (F UNI ) and diagonal elements of the genomic relationship matrix (F GRM ). F ROH was calculated for each individual using Eq. 2 : where F ROH i is the inbreeding coefficient of animal i; n is the total number of ROH; and L ROH j is the length of the j th ROH in animal i; L aut is the total autosome length covered by the SNP markers (2.5 Gb in our study). We also calculated the following three different genomic inbreeding estimations: F GRM (Eq. 3), F HOM (Eq. 4) and F UNI (Eq. 5) using --ibc command in GCTA software. where x i and p i are the number of copies and the frequency of the reference allele for SNP i, respectively; h i is 2p i (1-2p i ); and n is the total number of SNPs. The Pearson's correlation coefficient between F ROH and the other genomic inbreeding estimates was also calculated. Frequently appearing runs of homozygosity and gene enrichment analyses To detect the genomic regions frequently covered with ROH in the AZ and KHZ populations, the number of times each SNP occurred in ROH was calculated separately in each breed. The ROH repeated in more than 20% of the individuals in each breed (approximately less than 1% of the SNPs) were nominated ROH islands, as suggested in previous studies. Further, the frequency of ROHs were plotted against their physical position along UMD3.1. To identify genes in ROH islands, we used UMD3.1 map viewer from the NCBI website (https://www.ncbi. nlm.nih.gov/mapview/). Additionally, to find significantly enriched Gene Ontology (GO) terms (P ≤ 0.05) of the genes located in ROH peaks, we used DAVID v6.8 tool. Finally, we performed an extensive literature review to explore the biological function of the annotated genes in ROH islands. To discover whether ROH islands were associated with regions of the genome with a low recombination rate, the average LD of all the adjacent SNPs across each chromosome was compared with the average LD between adjacent SNPs inside the ROH islands located on the same chromosome. Additionally, to discover whether the ROH hotspots were associated with genomic regions that showed signatures of selection through other methods, we compared ROH islands with integrated haplotype homozygosity scores (iHS) that had already been published for AZ and KHZ breeds. An iHS is a measure of haplotype homozygosity based on the difference between observed LD structure around a selected allele relative to the expected LD pattern according to the whole genome. Therefore, it can be used to detect regions under historical selection. Additional file 4. The genes located in detected runs of homozygosity (ROH) islands in the Khuzestani (KHZ) breed that were significantly enriched (P ≤ 0.05) in biological processes (BP), cellular component (CC) and molecular function (MF) Gene Ontology (GO) terms. Additional file 5. List of integrated haplotype homozygosity scores (iHS) for all SNPs in Azeri (AZ) and Khuzestani (KHZ) breeds. Additional file 6. Manhattan plot of integrated haplotype homozygosity score (iHS) across the genome.
Study of Elevator Safety Performance Test System Based on LabVIEW The upgrading of China's coal mine detection equipment and technical means are very backward, mainly distributed instruments and devices, low efficiency, poor accuracy, detection and testing of the limitations of the data, testing methods, data-processing technology is backward. Unable to provide an objective and impartial technical data to meet modern coal mine production safety needs. In this paper, based on LabVIEW hoist safety performance test systems, introduced the system's hardware and software. The system has good real-time, high accuracy and short development cycle, data handling, and other advantages.
/* * Copyright (c) 2017 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.acra.sender; import android.content.ComponentName; import android.content.Context; import android.content.Intent; import android.content.pm.PackageManager; import android.content.pm.ResolveInfo; import android.net.Uri; import android.os.Build; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import org.acra.ACRA; import org.acra.ACRAConstants; import org.acra.attachment.AcraContentProvider; import org.acra.attachment.DefaultAttachmentProvider; import org.acra.config.ConfigUtils; import org.acra.config.CoreConfiguration; import org.acra.config.MailSenderConfiguration; import org.acra.data.CrashReportData; import org.acra.util.IOUtils; import org.acra.util.InstanceCreator; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; import static org.acra.ACRA.LOG_TAG; /** * Send reports through an email intent. * <p> * The user will be asked to chose his preferred email client if no default is set. Included report fields can be defined using * {@link org.acra.annotation.AcraCore#reportContent()}. Crash receiving mailbox has to be * defined with {@link org.acra.annotation.AcraMailSender#mailTo()}. */ @SuppressWarnings("WeakerAccess") public class EmailIntentSender implements ReportSender { public static final String DEFAULT_REPORT_FILENAME = "ACRA-report" + ACRAConstants.REPORTFILE_EXTENSION; private final CoreConfiguration config; private final MailSenderConfiguration mailConfig; public EmailIntentSender(@NonNull CoreConfiguration config) { this.config = config; this.mailConfig = ConfigUtils.getPluginConfiguration(config, MailSenderConfiguration.class); } @Override public void send(@NonNull Context context, @NonNull CrashReportData errorContent) throws ReportSenderException { final PackageManager pm = context.getPackageManager(); final String subject = buildSubject(context); final String body; try { body = config.reportFormat().toFormattedString(errorContent, config.reportContent(), "\n", "\n\t", false); } catch (Exception e) { throw new ReportSenderException("Failed to convert Report to text", e); } final ArrayList<Uri> attachments = new ArrayList<>(); final boolean contentAttached = fillAttachmentList(context, body, attachments); //we have to resolve with sendto, because send is supported by non-email apps final Intent resolveIntent = buildResolveIntent(subject, body); final ComponentName resolveActivity = resolveIntent.resolveActivity(pm); if (resolveActivity != null) { if (attachments.size() == 0) { //no attachments, send directly context.startActivity(resolveIntent); } else { final Intent attachmentIntent = buildAttachmentIntent(subject, body, attachments, contentAttached); final List<Intent> initialIntents = buildInitialIntents(pm, resolveIntent, attachmentIntent); final String packageName = getPackageName(resolveActivity, initialIntents); attachmentIntent.setPackage(packageName); if (packageName == null) { //let user choose email client for (Intent intent : initialIntents) { grantPermission(context, intent, intent.getPackage(), attachments); } showChooser(context, initialIntents); } else if (attachmentIntent.resolveActivity(pm) != null) { //use default email client grantPermission(context, attachmentIntent, packageName, attachments); context.startActivity(attachmentIntent); } else { ACRA.log.w(LOG_TAG, "No email client supporting attachments found. Attachments will be ignored"); context.startActivity(resolveIntent); } } } else { throw new ReportSenderException("No email client found"); } } /** * Finds the package name of the default email client supporting attachments * * @param resolveActivity the resolved activity * @param initialIntents a list of intents to be used when * @return package name of the default email client, or null if more than one app match */ @Nullable private String getPackageName(@NonNull ComponentName resolveActivity, @NonNull List<Intent> initialIntents) { String packageName = resolveActivity.getPackageName(); if (packageName.equals("android")) { //multiple activities support the intent and no default is set if (initialIntents.size() > 1) { packageName = null; } else if (initialIntents.size() == 1) { //only one of them supports attachments, use that one packageName = initialIntents.get(0).getPackage(); } } return packageName; } /** * Builds an email intent with attachments * * @param subject the message subject * @param body the message body * @param attachments the attachments * @param contentAttached if the body is already contained in the attachments * @return email intent */ @NonNull protected Intent buildAttachmentIntent(@NonNull String subject, @NonNull String body, @NonNull ArrayList<Uri> attachments, boolean contentAttached) { final Intent intent = new Intent(Intent.ACTION_SEND_MULTIPLE); intent.putExtra(Intent.EXTRA_EMAIL, new String[]{ConfigUtils.getPluginConfiguration(config, MailSenderConfiguration.class).mailTo()}); intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK); intent.putExtra(Intent.EXTRA_SUBJECT, subject); intent.setType("message/rfc822"); intent.putParcelableArrayListExtra(Intent.EXTRA_STREAM, attachments); if (!contentAttached) intent.putExtra(Intent.EXTRA_TEXT, body); return intent; } /** * Builds an intent used to resolve email clients and to send reports without attachments or as fallback if no attachments are supported * * @param subject the message subject * @param body the message body * @return email intent */ @NonNull protected Intent buildResolveIntent(@NonNull String subject, @NonNull String body) { final Intent intent = new Intent(Intent.ACTION_SENDTO); intent.setData(Uri.fromParts("mailto", ConfigUtils.getPluginConfiguration(config, MailSenderConfiguration.class).mailTo(), null)); intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK); intent.putExtra(Intent.EXTRA_SUBJECT, subject); intent.putExtra(Intent.EXTRA_TEXT, body); return intent; } @NonNull private List<Intent> buildInitialIntents(@NonNull PackageManager pm, @NonNull Intent resolveIntent, @NonNull Intent emailIntent) { final List<ResolveInfo> resolveInfoList = pm.queryIntentActivities(resolveIntent, PackageManager.MATCH_DEFAULT_ONLY); final List<Intent> initialIntents = new ArrayList<>(); for (ResolveInfo info : resolveInfoList) { final Intent packageSpecificIntent = new Intent(emailIntent); packageSpecificIntent.setPackage(info.activityInfo.packageName); if (packageSpecificIntent.resolveActivity(pm) != null) { initialIntents.add(packageSpecificIntent); } } return initialIntents; } private void showChooser(@NonNull Context context, @NonNull List<Intent> initialIntents) { final Intent chooser = new Intent(Intent.ACTION_CHOOSER); chooser.putExtra(Intent.EXTRA_INTENT, initialIntents.remove(0)); chooser.putExtra(Intent.EXTRA_INITIAL_INTENTS, initialIntents.toArray(new Intent[initialIntents.size()])); chooser.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK); context.startActivity(chooser); } private void grantPermission(@NonNull Context context, @NonNull Intent intent, String packageName, @NonNull List<Uri> attachments) { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { intent.addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION); } else { //flags do not work on extras prior to lollipop, so we have to grant read permissions manually for (Uri uri : attachments) { context.grantUriPermission(packageName, uri, Intent.FLAG_GRANT_READ_URI_PERMISSION); } } } /** * Creates the message subject * * @param context a context * @return the message subject */ @NonNull protected String buildSubject(@NonNull Context context) { final String subject = mailConfig.subject(); if (subject != null) { return subject; } return context.getPackageName() + " Crash Report"; } /** * Adds all attachment uris into the given list * * @param context a context * @param body the report content * @param attachments the target list * @return if the attachments contain the content */ protected boolean fillAttachmentList(@NonNull Context context, @NonNull String body, @NonNull List<Uri> attachments) { final InstanceCreator instanceCreator = new InstanceCreator(); attachments.addAll(instanceCreator.create(config.attachmentUriProvider(), DefaultAttachmentProvider::new).getAttachments(context, config)); if (mailConfig.reportAsFile()) { final Uri report = createAttachmentFromString(context, mailConfig.reportFileName(), body); if (report != null) { attachments.add(report); return true; } } return false; } /** * Creates a temporary file with the given content and name, to be used as an email attachment * * @param context a context * @param name the name * @param content the content * @return a content uri for the file */ @Nullable protected Uri createAttachmentFromString(@NonNull Context context, @NonNull String name, @NonNull String content) { final File cache = new File(context.getCacheDir(), name); try { IOUtils.writeStringToFile(cache, content); return AcraContentProvider.getUriForFile(context, cache); } catch (IOException ignored) { } return null; } }
Contribution for increasing the manufacturing companies productivity using a tracking and planning production program In this paper is presented roughly a tracking and planning production program. The major challenge for manufacturing companies is to have the highest efficiency regarding the productivity. For this reason, the best way to achieve this is to track all elements from production process and to plan all phases as well as possible. The method used for planning production, is the Critical Path Method (CPM) because this kind of planning method determinates in advance the time of fabrications of complex parts. Using this tracking and planning production program, the companies can check the machine tools workload, can analyse the capacity, can assign the proper equipment for parts fabrication, can estimate the delivering products time and also the cost of it. One of the advantages is that this information is provided on real time. Introduction In this days, most of the manufacturing companies are involved in a dynamic challenge because the need of a short product and technology life cycle and a high numbers of product variants as the demand for individualized products rises . An important obstacle which manufacturing companies have to cross, when is needing to concept a new manufacturing system to produce several products, is the right selection of the optimal quantity and portfolio of product-dedicated and flexible capacities. Production planning is a time bound activity, in terms of years, months weeks and days. It is an integral part of the companies planning process and depicts the organization planning process. The support activities of the tracking and planning production program can be broken roughly into three-time horizons: long term, medium term, and short term. In the long term, the system is responsible for providing information to make decisions on the appropriate amount of capacity (including equipment, buildings, suppliers, and so forth) to meet the market demands of the future. In the intermediate term, the fundamental issue addressed by the tracking and planning production program is matching supply and demand in terms of both volume and product mix. Although this is also true in the longer term, in the intermediate term, the focus is more on providing the exact material and production capacity needed to meet customer needs. This means planning for the right quantities of material to arrive at the right time and place to support product production and distribution. In the short term, detailed scheduling of resources is required to meet production requirements. This involves time, people, material, equipment, and facilities. Key to this activity is people working on the right things. As the day-to-day activities continue, the tracking and planning production program must track the use of resources and execution results to report on material consumption, labour utilization, equipment utilization, completion of customer orders, and other important measures of manufacturing performance. Moreover, as customers change their minds, things go wrong, and other changes occur, the tracking and planning production program must provide the information to managers, customers, and suppliers on what happened, provide problem-solving support, and report on the resolution of the problems. Throughout this process, communication with customers on production status and changes in expectations must be maintained. The specific requirements for the tracking and planning production program design depend on the nature of the production process, the degree of supply chain integration, customers' expectations, and the needs of management. As the program is required to integrate with other company systems in the supply chain and/or with the tracking and planning production programs of the firm, additional design parameters are introduced. Moreover, these tracking and planning production program requirements are not static. As competitive conditions, customer expectations, supplier capabilities, and internal needs change, the tracking and planning production program needs to change. The need of creating a tracking and planning production program In order to manage the production process easily and efficiently, it is necessary to implement a program that closely follows all the elements that make up the process. It is built on rigorous methodology and intends to bring together all available resources within the company to make the product as efficient as possible. Getting real-time information (even on-line) for all areas of the company determines maximum control, which reduces the risk of the occurrence of some disturbing factors. First, the program is used to assign a machine tool which will perform the mechanical processing. Each production part passes through at least two machining operations, cutting the blank and machining, drilling or turning. In this way, it will be assigned a cutting machine and a milling machine. Some parts, such as BLOCK LOCATOR, also have electro-erosion machining, and PIN parts have a grinding process. These mechanical processes will be assigned to specific machines. Assignment to a machine tool is required for generating the CNC program. This differ depending on the machine's working parameters, the tools available in the machine's storage tool, fixing devices, etc. It is not possible to assign a machine tool that is in reparation or maintenance because the processing time adds the waiting time until the machine can be used again. The next step, after the machine is assigned, is to generate the CNC program. Once generated, it is transferred online, and the processing process starts. The tracking and planning production program manages the machine tools activity, the parts processing part stages. An important aspect of this program is the ability to stop a machining process caused by the modification of the 3D model of the part. It is possible for the part to suffer shape changes in the design phase even at the time of its machining. The machining process is stopped, is checked the stage where the blank is and if it is possible to insert the changes into the actual piece, the CNC program is changed. Improving the efficiency of the machining department, cost reduction and process assignment, continuous control over the entire activity, real-time analyses and reports are some of the benefits of implementing the tracking and planning production program. The method used by production planning program The method used for planning production, is the Critical Path Method (CPM) because this kind of planning method determinates in advance the time of fabrications of complex parts. The principle of critical path analysis is to divide a project (complex actions) into component parts at a level that allows for their logical and technological correlation, that is, to make it possible to establish interactions between the component parts. These components are the activities of complex actions. When defining the list of activities, the specialist involved in this operation uses his experience to answer, for each activity, the following questions: "What other activities do or necessarily precede this activity?"; "What is the duration of the activity?". The CPM method is a critical path analysis process in which the only parameter analysed is the time and the representation of the network graph takes into account the following conventions: each activity is associated with an orientated segment called the arc, defined by its ends, each activity being identified by a spring; -each arc is associated with a value equal to the duration of the activity it represents; -the conditioning of two activities is represented by the succession of two arcs. Bellow is an example of how the CPM method works. If two or more activities have the same precedent activity, for example A precedes B and A precedes C, the representation in the network graph will take the form of Figure 1 (a). Arcs B and C symbolize two activities that can only begin after activity A is completed. Activities B and C can be executed simultaneously. Also, the execution of an activity may depend on the completion of several previous direct activities, for example A precedes C and B precedes C as in Figure 1 (b). In this situation, activity C cannot begin, logically, until A and B have finished. Fig. 1. Examples of network graph The most important values to be computed after the network has been traced are: -the earliest start time of an event -is the closest (time) moment at which a node can be reached; -the latest moment of realization of an event -is the most remote (time) moment at which a node must be reached for the project to finish on the set date. Among the advantages of the CPM method (and in general the critical path analysis) we highlight: -anticipating the execution time of the complex projects; -during the running of the project allows for permanent control of its execution; -explaining the logical and technological links between activities; -highlighting critical activities; -highlighting non-critical activities, which have time reserves; -allows periodic updates to be made without redoing the graph; -offers the possibility to carry out calculations to optimize the duration of a project, based on the cost criterion; -represents a streamlined and rational method that allows timely programming of activities considering resources. Input data / Output data The operation of the program for tracking and planning the production requires the introduction in good conditions of the input data as can be seen in the figure below. In Figure 2, it can be seen with yellow colour, which part information should be entered for assignment of the machine tool. The first step involves loading the 3D model of the part into the program. In this way is named also the part, the name which will be used in the program. Some inputs are just to know the system from which the part is part of (project, line, station). The "Rh / Lh" box determines the amount of pieces needed to be manufactured. For example, if "Rh" or "Lh" is selected, only one piece will be processed. If "Rh / Lh" is selected then two pieces will be processed, one of them will be symmetrical, and if "Rh=Lh" is selected then two identical pieces will be processed. The type of part and its complexity is the main factor in the assignment of the machines tool. Below is a table in which is showed the predefined assignment of existing machine tools to processing different parts. Table 1. Predefined assignment of machine tools to different parts The reports about machine tools, about the parts processed or which will be processed are the output data of the program for tracking and planning production. They can be saved as files in Microsoft Office Excel format or can be viewed online within the program. After loading the CNC program, it is automatically transferred to the machine tools assigned via the digital data management system, Product Life Management (PLM). The PLM system is primarily a database of all the information about the projects that a company have realized or is realizing it. Other outputs are: -the actual start date of each operation; -the actual end date of each operation; -the duration of the operation in the programming process (i.e., operational or non-operational time); -the gap between each operation and its starting date at the latest; -delaying the execution of the production task with its starting date at the latest. The generation of reports can be determined by their own information and control requirements or can be set as follows: -daily reports; -monthly reports; -quarterly reports; -annual reports. These reports can be used to calculate the cost of mechanical processing, to calculate machine tool efficiency, to calculate service life, etc. Based on the reports, it can be found whether a machine tool does not work at full capacity due to the small number of machined parts. Then it must be equipped with tools and devices suitable for the processing of other types of parts. It is important that the machine tool does not have a rest time because it is a disadvantage in the production efficiency. Presentation of the functioning and usage of the program The tracking and planning production works well and generates accurate information with the condition that it is used correctly. That is why the operator of this program should be trained and then supervised for a determined period of time. Below is a broad overview of how to use the program. Figure 4 illustrates the start-up page of the program after opening it from the computer start-up bar. Loading the 3D model is done by activating the Search button in the newly opened window. In this way is named also the part, the name which will be used in the program. Some inputs are just to know the system from which the part is part of (project, line, station). The "Rh / Lh" box determines the amount of pieces needed to be manufactured. For example, if "Rh" or "Lh" is selected, only one piece will be processed. If "Rh / Lh" is selected then two pieces will be processed, one of them will be symmetrical, and if "Rh=Lh" is selected then two identical pieces will be processed. The type of part and its complexity must be known by the operator because these are the main factors in assigning the suitable machine tool. Machine tools are automatically selected based on their availability. For example, a part that can be machined on five machines, for it will be assigned the machine that has the shortest amount of time to complete the current processing. At the same time, the tools and devices available in a machine tool can determine its assignment. After entering all part information, it can be seen a preliminary assignment of the machine tool, Figure 5. If all the information is entered correctly, press the OK button and generate the file with the assigned machines, a file that is saved in the same location as the 3D model of the part. The program operator views the contents of this file, as illustrated in Figure 6. The Add new part button opens the window where the CNC program of the given part is loaded. The file containing the CNC program is called "CNC-x_y" where "x" is the CNC program number created and "y" is the part name (Figure 7). Fig. 7. Loading the CNC program for the given part The Display machine tool button opens the machine tool tracking window. Here are all the machine tools available in the mechanical processing site, which parts are processed, and which parts will be processed on these machines, their availability, their capacity, as shown in Figure 8. A machine tool cannot be assigned for the machining of a part if its processing takes longer than the remaining time until the machine enters into maintenance. That's why each machine has an Activate / Deactivate button that stops it or starts it for various operations, either maintenance, repair or improvement by adding tools, devices. Fig. 8. Displaying the machine tools and the information about them By pressing the Display MT button, it can be seen all details about the tool machine: the workpiece which is process on it, the workpiece which will be process, the available tools and devices, the machine capacity, the machine efficiency, etc. The Display added parts button opens the window with the parts loaded in the program (Figure 9). The parts can be processed on maximum 3 type of machine tools. In this window it can be seen which machine tools was assigned for the given part. The "Processing time" column contain the total time of processing part (in all phases: cutting, machining and electro-erosion) if the progress is 100% and the time spent until now if the progress is less than 100%. Pressing the Display part details button, a new window is opened where is displayed all information about the given part. Information like who have added this part in program or how much it cost, can be printed as a report file. As it can be seen from the daily workload diagram, Figure 10, some machine tools have a workload per day of 100%. These machine tools are productively efficient because they work 24 hours a day. The vast majority of machine tools have a workload between 50% and 80%. These machine tools have an average workload that can be increased, but the fact that there are several machine tools of the same type can be considered that they have a good workload. The machine tools which have a low workload, such as the R530 CORREA A30 and the R562 DMGDMU 70, don't produce enough parts to be efficient. Analysing this diagram, it is the decision to discover the elements that contribute to low efficiency. Some of the reasons may be: customer ordering of parts that can be produced on this machine is insufficient, the machine is in the maintenance or repair phase, the stages preceding the machining of the parts on these machines last longer than was planned. For a better analyse of machine tools efficiency, is needed to see also the weekly and monthly workload diagrams, Figure 11 and 12. If some machine tool has a lower workload per month is a disadvantage for production. This mean that this machine which was a cost for the company, now I not produce enough to amortize investment. If after the check, the machine is working in normal conditions, and is equipped properly for manufacturing specific part, is necessary to increase the order from clients. If not, it will be an extra cost for the company. Another diagram provided by the tracking and planning production program, is the duration of mechanical fabrication for different parts diagram. This show how long time take the all operation to produce the specific part. The different operations are marked with separately colour to be more visible. Conclusions The program for tracking and planning production can be improved by changing the user interface in a more interactive way so that it is as easy to use. The program's databases must be as detailed as possible, to contain the correct information, which can be done by adding a procedure that checks all the information entered into the system. With the increase in production capacity, this program needs to be adapted so that more users can enter, process, and save information in the system at the same time without affecting each other's work. In conclusion, the program presented is appropriate to a manufacturing company's production by its simplicity of use, by generating reports on the actual situation in the department and by its efficiency in assigning machine tools suitable for processing the given parts.
Within the last thirty years, internet, social media, and mobile technologies have transformed the ways we interact with information, each other, and the world. In addition to these nearly ubiquitous technologies, we are witnessing the rapid advance of new digital technologies such as artificial intelligence and machine learning, augmented and mixed reality, and the Internet of Things. All of these new and emerging technologies raise fundamental and new questions about what we can know, what we may hope for, what we should do, and what it means to be human. New technologies have always raised such questions, of course. Historically, it has taken generations to appreciate the gains and losses that came with major technological shifts. Indeed, the impacts of previous shifts—from oral to written communication, from manuscript to print books, from manual to machine power, from human to digital computers—are still being assessed today. The technologies associated with such changes are not ethically neutral: A designer’s intentions shape a particular technology, and its efficacy is complicated by a user’s intentions. Human agency is involved throughout, as we shape technologies and they shape us. But the speed and scale with which digital technologies have been created and adopted has left little time for critical reflection on them and how we may integrate them into our lives intentionally. Regardless of when we were born or the depth of our technological expertise, we are all of us digitally naïve. We lack knowledge of how technologies are developed—especially of proprietary algorithms—so we use tools we do not and cannot fully understand. The strategies and filters we have relied on historically for balancing trust and skepticism are gone or broken, so we do not easily know what information is trustworthy and our access to knowledge is segregated. Ethical considerations are not often raised during the design or adoption of new technologies, so we are not integrating our values into the digital dimension of our lives. Individually and collectively, we are still learning how to design and use new and emerging technologies well and wisely. At best, we are learning new technological competencies. But the larger epistemological and ethical challenges facing us—about how we know what we know and about what we should do with what we know—require much more individual and cultural attention. As we become more digitally literate, we also must seek to become digitally wise. Through this blog we will explore a dialogue between the “digital,” by which we primarily mean new information and communication technologies, and wisdom. While our digital technologies are new, presenting us with new challenges and opportunities, wisdom—including wisdom about technology—is ancient. In particular, we believe the resources of the Christian tradition can help us move from a position of digital naiveté toward one of digital wisdom. Our technologies reveal and influence our values and hopes, and theological reflection can provide a broader framework for evaluating and enriching these. As Ron Cole-Turner has observed, technology is not “irrelevant to the theological meaning of humanity … transformations through technology are part of the larger cosmic drama of creation and redemption.” By bringing technology into conversation with theology, we aim to clarify the relationship between them so that our lives may be truly enhanced. This is an ambitious task, and each of us can only help clarify one modest part of it. We hope others will join us in this conversation.
The (pro)renin receptor antagonist, PRO20, attenuates high fat diet induced hepatic steatosis and fibrosis in nonalcoholic fatty liver disease (NAFLD) Nonalcoholic fatty liver disease (NAFLD) comprises a spectrum of liver damage directly related to diabetes, metabolic syndrome, and obesity. The (Pro)renin receptor (PRR) in the liver recently has been shown to play an important role in hepatic steatosis. In this study, we test our hypothesis that PRR antagonist, PRO20, reduces hepatic steatosis and fibrosis. Wildtype mice in C57Bl/6J background (n=910/group) were fed with either HFD (60% calories from fat) or normal fat chow (NFD, 10% calories from fat) with matching calories for 6 weeks. Methionine choline deficient (MCD) diet was used to study fibrosis development in C57BL/6J mice for 8 weeks. Two weeks following respective diet modifications, mice were implanted with a subcutaneous osmotic pump (4 or 6 week release model) containing either PRO20 (700ug/kg/d) or saline. Liver tissues were processed for Oil Red O (ORO), H&E, and Massons trichrome histology staining for lipid and collagen accumulation. The presence of lipids, indicative of steatosis, was quantified using ImageJ/FIJI software and presented as % of ORO staining to the total area. We found that 6 weeks of HFD (15.90 ± 1.49 %) induced a significant elevation on liver ORO staining compared with the liver from mice fed with NFD (3.60 ± 1.61 %, p<0.0001). More importantly, PRO20 treatment (9.91 ± 1.46 %; p = 0.001) significantly reduced the ORO staining in mice treated with HFD. Hepatic triglyceride concentrations were quantified with colorimetric assay and shown to be significantly lowered in PRO20 treated mice (2.110 ± 0.6175) compared with scramble peptide treated (6.470 ± 0.3553, p<0.0001) mice under HFD. Collagen area was also found to be reduced in MCD PRO20 (4.970 ± 0.9096 %) treated mice compared with the control mice (8.901 ± 0.2782, p = 0.0033). Moreover, PRO20 treatment (258 ± 53.12 U/L) decreased liver ALT levels in MCDfed PRO20 mice compared with the control mice (682 ± 116.4 U/L). De novo lipogenesis genes thought to contribute to the development and progression of NAFLD were evaluated by realtime PCR and presented as foldchange expression normalized to actin. The glycerol3phosphate acetyltransferase 3 (GPAT3) (1.835 ± 0.1798, p = 0.0183) expression was found to be downregulated in the livers of HFD PRO20 treated mice. Our findings indicate that PRO20 reduces HFDinduced hepatic steatosis and MCDinduced liver fibrosis and liver dysfunction. We conclude that PRR antagonism using PRO20 might be an effective therapeutic approach for treatment of NAFLD.
Calcium Phosphate Nanoparticles Induce Mucosal Immunity and Protection against Herpes Simplex Virus Type 2 ABSTRACT Previously we reported that calcium phosphate nanoparticles (CAP) represented a superior alternative to alum adjuvants in mice immunized with viral protein. Additionally, we showed that CAP was safe and elicited no detectable immunoglobulin E (IgE) response. In this study, we demonstrated that following mucosal delivery of herpes simplex virus type 2 (HSV-2) antigen with CAP, CAP adjuvant enhanced protective systemic and mucosal immunity versus live virus. Mice were immunized intravaginally and intranasally with HSV-2 protein plus CAP adjuvant (HSV-2+CAP), CAP alone, phosphate-buffered saline, or HSV-2 alone. HSV-2+CAP induced HSV-specific mucosal IgA and IgG and concurrently enhanced systemic IgG responses. Our results demonstrate the potency of CAP as a mucosal adjuvant. Furthermore, we show that systemic immunity could be induced via the mucosal route following inoculation with CAP-based vaccine. Moreover, neutralizing antibodies were found in the sera of mice immunized intranasally or intravaginally with HSV-2+CAP. Also, the results of our in vivo experiments indicated that mice vaccinated with HSV-2+CAP were protected against live HSV-2 infection. In conclusion, these preclinical data support the hypothesis that CAP may be an effective mucosal adjuvant that protects against viral infection. Since mucosal surfaces act as the primary point of entry for most pathogens and the first line of defense against them, vaccines inducing effective mucosal immunity may reduce rates of infection and decrease the morbidity and mortality of infectious diseases. Currently, no safe and effective mucosal vaccine adjuvants are approved for human use. Mucosal vaccine delivery is a promising strategy. Mucosal vaccines administered in one part of the body can elicit an antibody response in mucosal tissues remote from the site of initial antigen exposure. This effect occurs because of the common mucosal immune system. A major obstacle to developing a mucosal vaccine in humans is finding a safe and effective adjuvant. Experimental mucosal adjuvants include cholera toxin, heat-labile enterotoxin, mutant toxins (LTK63 and LTR72), CpG oligodeoxynucleotide, polymerized liposomes, microparticles, and interleukins or immune modulators. None of these adjuvants is approved for use in humans. Biodegradable calcium phosphate particles have been investigated as an alternative to aluminum adjuvants for parenteral vaccines. Clinical studies conducted in France described the use of a calcium phosphate adjuvant for secondary or booster immunizations against diphtheria and tetanus. Calcium phosphate has also been used for allergen desensitization. Early studies indicated that calcium phosphate particles produce strong adjuvant effects, induced less immunoglobulin E (IgE) than aluminum adjuvants, and elicited only minimal local irritation in animal experiments and human clinical trials. Here, we describe a unique formulation of calcium phosphate nanoparticles (CAP) which is distinct from the formu-lations of calcium phosphate described by European scientists and demonstrate its use as an effective mucosal adjuvant. Our results indicate that following viral challenge, mice immunized with CAP-based formulations of herpes simplex virus type 2 (HSV-2) glycoprotein exhibited significantly increased survival rates and less severe clinical infection than controls. These findings demonstrate that CAP delivered as a mucosal adjuvant confers protective antiviral immunity. MATERIALS AND METHODS Formulation of subunit vaccine. The preparation of partially purified HSV-2 glycoproteins has been described previously. Briefly, infected cells were collected and sonicated. The viral suspension was centrifuged at 5,500 g for 15 min. Supernatant was collected and treated with 1% IGEPAL (Sigma Chemical Co., St. Louis, Mo.) lysis buffer for 30 min on ice. The solution was centrifuged at 18,500 g for 2 h. The supernatant was dialyzed against phosphate-buffered saline (PBS) at 4°C and stored at 80°C. Then 1 mg of HSV-2 protein was added to 7.5 ml of 12.5 mM calcium chloride, followed by the addition of 7.5 ml of 12.5 mM dibasic sodium phosphate and 1.5 ml of 15.6 mM sodium citrate. The solution was stirred until the final average particle size was less than 1.2 m, as determined with a Coulter N4Plus Submicron particle sizer, and was treated with 129 mM cellobiose overnight. The total protein inside CAP was 123 g. The particle containing HSV-2 protein was coated again with 3.877 mg of HSV-2 proteins by coincubation for 1 h at 4°C. The final concentration of CAP plus HSV solution was 2 mg of HSV/ml and 10 mg of CAP/ml. The control vaccines were PBS, CAP alone, and HSV-2 protein alone. Animals. Female BALB/c mice, 6 to 8 weeks old and weighing 25 g, were obtained from Charles River Laboratories. The mice were maintained in standard housing with a normal diet of Purina rodent chow 5001. Immunization and sample collection. Eight groups of five female BALB/c mice were inoculated intravaginally or intranasally with HSV-2CAP (20 g of viral protein plus 100 g of CAP per dose per mouse), HSV-2 alone (20 g per dose per mouse), or CAP alone (100 g per dose per mouse) in a total volume of 50 l (intravaginally) or 10 l (intranasally). The mice received two inoculations, on days 0 and 7. Samples were collected 7, 14, and 38 days after the last immunization. Blood was obtained from the orbital sinus, and the serum samples were stored at 20°C. Mucosal samples were collected 14 days after the last immunization by vaginal lavage with 100 l of PBS. The sediments were removed by centrifugation, and samples were pooled and stored at 20°C. ELISA. HSV-specific antibodies were detected by an end-point dilution enzyme-linked immunosorbent assay (ELISA) as described previously. Titers for IgG in plasma samples were expressed as group mean standard error of the mean of values for individual animals. Titers for IgA and IgG in mucosal samples were expressed as the mean of triplicate assays from pooled mucosal samples. HSV-2 challenge experiment. Using methods reported previously, mice were injected subcutaneously with DepoProvera (Upjohn, Kalamazoo, Mich.) at a concentration of 2 mg/mouse in 50 l of distilled water on the 45th day following primary immunization. Five days later, the mice were challenged intravaginally with 10 6 PFU of HSV-2. Mice were examined daily for genital pathology, and the clinical scoring was performed by an investigator blinded to the animal's immunization status. Clinical pathology was scored on a 5-point scale: 0, no apparent infection; 1, slight redness of external vagina; 2, severe redness and swelling of external vagina; 3, genital ulceration with severe redness, swelling, and hair loss of genital and surrounding tissue; 4, severe ulceration of genital and surrounding tissue and paralysis; and 5, death. Neutralization assay. As reported previously, Vero cells were propagated in culture plates. Pooled mouse serum samples from day 38 after the last immunization were incubated with HSV-2 and assessed for the presence of HSV-2specific neutralizing antibodies by plaque assay. The titer is the reciprocal of the serum dilution required to inhibit the cytolysis of a confluent monolayer of Vero cells by 50%. Statistical analysis. Pathological data were analyzed by analysis of variance to determine the difference between groups. Fig. 1, both the intranasal and intravaginal HSV-2CAP-vaccinated mice showed a high titer of HSVspecific mucosal IgA and IgG in vaginal lavage fluid at 14 days after the last immunization. As indicated in Serological IgG and IgG2a titers determined on day 38 after the last immunization showed a systemic response in the mice after intranasal or intravaginal immunization with HSV-2CAP compared to PBS, CAP alone, or HSV-2 alone (Fig. 2). The neutralization assay was performed at day 38 following secondary immunization. Neutralizing antibodies were found in both the intranasally and intravaginally HSV-2CAP-immunized mice at titers of 1:40 and 1:80, respectively. Notably, neutralizing antibodies were absent in the mice inoculated with PBS alone, CAP alone, or HSV-2 alone. Resistance to HSV-2 infection was evaluated by monitoring clinical pathology. On days 6, 8, and 10, the reduced clinical severity in mice intravaginally immunized with HSV-2CAP achieved statistical significance (P 0.05) compared to mice immunized with PBS, CAP alone, or HSV-2 alone (Fig. 3, right panel). One of five mice intravaginally inoculated with HSV-2CAP died from HSV-2 infection, whereas all of the mice intravaginally vaccinated with PBS, HSV-2 alone, and CAP alone developed severe disease and died by day 8 or 10. Similarly, the mice vaccinated intranasally with HSV-2CAP showed reduced clinical severity compared with mice immunized with PBS, CAP alone, or HSV-2 alone at days 8 and 10 (Fig. 3, left panel). Two of five mice intranasally vaccinated with CAPHSV-2 died, compared with the controls (i.e., recipients of PBS, CAP alone, and HSV-2 only), all of which died eventually. All surviving mice were kept for 2 more weeks and recovered gradually. DISCUSSION The mucosal tissues are the primary routes of entry into the body for microbial pathogens. Vaccines inducing mucosal immunity prevent the transmission of infection via mucosal surfaces. However, no mucosal vaccine adjuvant is currently approved for human use. Because of the weak inherent immunogenicity of some antigens targeted for vaccine development, such as epitope subunits and recombinant peptides, there is a great need for safe and efficient mucosal adjuvants. The only adjuvants used in licensed vaccines in the United States are aluminum compounds, which effectively enhance immune responses. However, human studies have shown them to be weak adjuvants for inducing cell and humoral immunity to some virus protein subunits (S. J. D. Bell, personal observation). Additionally, alum can elicit an IgE antibody response that increases the risk of allergic reactions. We have reported previously that CAP delivered intraperitoneally with HSV-2 and Epstein-Barr virus proteins induced high titers of IgG2a antibody and neutralizing antibody and facilitated a high degree of protection against viral infection in a murine model. In this study, using HSV-2 protein as a model antigen, we evaluated the immunity and efficacy of an HSV-2CAP experimental vaccine. Our results indicated that mice vaccinated either intravaginally or intranasally with HSV-2CAP had high antibody levels at mucosal surfaces and effective neutralizing antibody titers and were protected against virus infection. We assumed that the neutralizing antibody prevented the attachment of pathogens to the epithelial surfaces and conferred protection against subsequent viral infection. Our findings also confirmed the previous studies showing that antibodies can efficiently neutralize virus in mucosal areas. The immune system within the female lower genital tract is the initial defense against sexually transmitted diseases. Our study suggested that intravaginal immunization induced relatively higher levels of mucosal IgG and IgA than intranasal immunization, providing optimal protection against HSV-2 infection. This observation supports the findings of others and suggests that genital local immunity and Th1 response in association with other protecting factors, such as local production of antibodies and viral clearance from the vaginal mucosa, play a major role in HSV-2 infection in mice. Our next step is to prove that CD4 T cells secreting gamma interferon and B cells or natural antibodies are critical for immune protection against lethal genital HSV-2 reinfection. The exact mechanism of the adjuvant action of CAP is not fully understood. M cells in the mucosal tissues are known to reside exclusively in the epithelium and deliver foreign material by transepithelial transport from the lumen to the underlying mucosa-associated lymphoid tissue. Particulate antigens are desirable because they permit M cells to translocate across the tight epithelial barrier to mucosal dendritic cells. Therefore, the particulate mucosal vaccine created from the combination of soluble antigens formulated within CAP provides the Recent comparative studies have indicated that microparticles are potent adjuvants for mucosal delivery. However, microparticles are not an ideal size for inducing cellular immunity because they tend to be too large, and it is believed that M cells, dendritic cells, macrophages, and local lymph nodes are more effective at taking up smaller particles. Advantageously, CAP are generally in the preferred size range (i.e., less than 1.2 m, versus 1 m-sized polymers) and also stimulate cellular immunity and cytotoxic T lymphocyte responses (unpublished data). Based on these results, we conclude that (i) the CAP-based HSV-2 subunit vaccine appears to concurrently induce both systemic and mucosal immunity and (ii) CAP shows great potential as a safe and effective mucosal vaccine adjuvant for humans, given its relative absence of side effects and lack of IgE antibody induction.
Rhode Island has recently learned that its renewable energy standards could be ruinously expensive. But they’re in good company: more than a dozen states have “learned” the same thing, from reports from the same economists at the Beacon Hill Institute (BHI). Housed at Boston’s Suffolk University, BHI turns out study after study for right-wing, anti-government groups. Funding for BHI’s relentless efforts has come from Charles and David Koch (leading tea party funders) and others on the same wavelength. For the Rhode Island study, BHI teamed up with the Rhode Island Center for Freedom & Prosperity, a member of the Koch’s State Policy Network. While BHI’s name and location place it close to the Massachusetts state government, it is philosophically a different beacon on a different hill. Last year BHI requested a grant from the Searle Freedom Trust, aimed at undermining the Regional Greenhouse Gas Initiative (RGGI), a multi-state effort that Massachusetts participates in. The grant application said, “Success will take the form of media recognition … and legislative activity that will pare back or repeal RGGI.” Suffolk vice-president Greg Gatlin said that BHI had not gone through the university’s required grant approval process, and “the University would not have authorized this grant proposal as written.” As it turned out, the proposal was not funded. BHI has worked closely with the American Legislative Exchange Council (ALEC), a corporate-funded network of ultra-conservative legislators and policy analysts, which drafts and advocates laws that will push state policies to the right. After gaining notoriety for supporting “stand your ground” gun laws, ALEC has now decided to downplay social issues and refocus on its core economic mission: attacking Obamacare, progressive taxation, and environmental protection. In the effort to roll back renewable energy targets and standards, ALEC and its local partners have sponsored numerous BHI studies of individual state renewable energy policies. The conclusion, in every case, is that wind and solar energy are exorbitantly expensive, energy efficiency cannot be counted on, and there’s nothing like good old fossil fuels – except, of course, for nuclear power. David Tuerck, the head of both BHI and Suffolk’s economics department, told the Washington Post that Koch funding did not determine the institute’s conclusions about renewable energy. Its reports, however, are decidedly Koch-friendly. BHI’s Rhode Island study, for example, pointed to an analysis done by another conservative think tank that in turn relied on a 2006 study, which was cautiously optimistic about the prospects for wind power in Britain. After being filtered through two American anti-renewable-energy think tanks, that study came out sounding like something different altogether. BHI claims that wind is so intermittent that expensive fossil-fuel generation is always needed as backup; in contrast, the original British study says that at the levels of wind adoption “foreseeable in the next 20 years, it is neither necessary nor appropriate to allocate dedicated ‘back up’ or reserve plant” to wind energy facilities. The misrepresentations of renewable energy in the BHI reports are too numerous to list here. Last year, with several colleagues, I wrote a critique of the BHI/ALEC energy studies, which dives into the details. Perhaps the most outrageous was the treatment of wind power, which is rapidly becoming competitive with conventional sources of electricity. (Nine states get more than 10 percent of their electricity from wind.) Earlier BHI anti-renewable energy studies often presented low, medium, and high estimates of wind costs, just like a normal academic analysis. In fact, data on actual costs show that all wind installations in recent years have been cheaper than BHI’s low case. In other words, real data show that BHI’s three estimates of wind power costs were too high, far too high, and absurdly too high. The Koch brothers may be getting what they paid for in BHI’s steady stream of anti-environmental analyses. It’s less clear why Suffolk University tolerates this blatantly partisan institute, seeking to overturn sound Massachusetts and national policies. Beacon Hill isn’t the place for them; there must be a small hill somewhere in Texas where they would feel more at home. Although, despite an abundance of anti-environmental politicians, Texas has installed a lot of wind power, because it looks cheap to them. Frank Ackerman is a senior economist at Synapse Energy Economics in Cambridge, MA, and a lecturer at MIT.
package com.mercateo.test.clock.example.timer; import static org.assertj.core.api.Assertions.assertThat; import java.time.Clock; import java.time.OffsetDateTime; import org.junit.Test; public class MyWatchTest { @Test public void testNow() { // given OffsetDateTime given = OffsetDateTime.parse("2018-11-07T10:12:12.414+01:00"); Clock clock = Clock.fixed(given.toInstant(), given.getOffset()); MyWatch myWatch = new MyWatch(clock); // when String now = myWatch.now(); // then assertThat(now).isEqualTo("2018-11-07T10:12:12.414+01:00"); } }
<gh_stars>1000+ package com.github.jknack.handlebars.helper; import static com.github.jknack.handlebars.helper.StringHelpers.abbreviate; import static com.github.jknack.handlebars.helper.StringHelpers.capitalize; import static com.github.jknack.handlebars.helper.StringHelpers.capitalizeFirst; import static com.github.jknack.handlebars.helper.StringHelpers.center; import static com.github.jknack.handlebars.helper.StringHelpers.cut; import static com.github.jknack.handlebars.helper.StringHelpers.defaultIfEmpty; import static com.github.jknack.handlebars.helper.StringHelpers.ljust; import static com.github.jknack.handlebars.helper.StringHelpers.lower; import static com.github.jknack.handlebars.helper.StringHelpers.replace; import static com.github.jknack.handlebars.helper.StringHelpers.rjust; import static com.github.jknack.handlebars.helper.StringHelpers.slugify; import static com.github.jknack.handlebars.helper.StringHelpers.stringFormat; import static com.github.jknack.handlebars.helper.StringHelpers.stripTags; import static com.github.jknack.handlebars.helper.StringHelpers.substring; import static com.github.jknack.handlebars.helper.StringHelpers.upper; import static com.github.jknack.handlebars.helper.StringHelpers.wordWrap; import static com.github.jknack.handlebars.helper.StringHelpers.yesno; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import java.util.*; import org.junit.Test; import com.github.jknack.handlebars.AbstractTest; import com.github.jknack.handlebars.Context; import com.github.jknack.handlebars.Handlebars; import com.github.jknack.handlebars.Helper; import com.github.jknack.handlebars.Options; import com.github.jknack.handlebars.TagType; import com.github.jknack.handlebars.Template; /** * Unit test for {@link StringHelpers}. * * @author edgar.espina * @since 0.2.2 */ public class StringHelpersTest extends AbstractTest { @Test public void capFirst() throws IOException { Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(false); assertEquals("capitalizeFirst", capitalizeFirst.name()); assertEquals("Handlebars.java", capitalizeFirst.apply("handlebars.java", options)); verify(options).isFalsy(any()); } @Test public void center() throws IOException { Options options = mock(Options.class); when(options.hash("size")).thenReturn(19); when(options.isFalsy(any())).thenReturn(false); when(options.hash("pad", " ")).thenReturn(null); assertEquals("center", center.name()); assertEquals(" Handlebars.java ", center.apply("Handlebars.java", options)); verify(options).hash("size"); verify(options).isFalsy(any()); verify(options).hash("pad", " "); } @Test public void centerWithPad() throws IOException { Options options = mock(Options.class); when(options.hash("size")).thenReturn(19); when(options.hash("pad", " ")).thenReturn("*"); when(options.isFalsy(any())).thenReturn(false); assertEquals("center", center.name()); assertEquals("**Handlebars.java**", center.apply("Handlebars.java", options)); verify(options).hash("size"); verify(options).hash("pad", " "); verify(options).isFalsy(any()); } @Test public void cut() throws IOException { Options options = mock(Options.class); when(options.param(0, " ")).thenReturn(" "); when(options.isFalsy(any())).thenReturn(false); assertEquals("cut", cut.name()); assertEquals("handlebars.java", cut.apply("handle bars . java", options)); verify(options).param(0, " "); verify(options).isFalsy(any()); } @Test public void cutNoWhitespace() throws IOException { Options options = mock(Options.class); when(options.param(0, " ")).thenReturn("*"); when(options.isFalsy(any())).thenReturn(false); assertEquals("cut", cut.name()); assertEquals("handlebars.java", cut.apply("handle*bars*.**java", options)); verify(options).param(0, " "); verify(options).isFalsy(any()); } @Test public void defaultStr() throws IOException { Options options = mock(Options.class); when(options.param(0, "")).thenReturn("handlebars.java"); assertEquals("defaultIfEmpty", defaultIfEmpty.name()); assertEquals("handlebars.java", defaultIfEmpty.apply(null, options)); assertEquals("handlebars.java", defaultIfEmpty.apply(false, options)); assertEquals("handlebars.java", defaultIfEmpty.apply(Collections.emptyList(), options)); assertEquals("something", defaultIfEmpty.apply("something", options)); verify(options, times(3)).param(anyInt(), anyString()); } @Test public void joinIterable() throws IOException { shouldCompileTo("{{{join this \", \"}}}", Arrays.asList("6", "7", "8"), $("join", StringHelpers.join), "6, 7, 8"); } @Test public void joinEmptyList() throws IOException { shouldCompileTo("{{{join this \", \"}}}", Collections.emptyList(), $("join", StringHelpers.join), ""); } @Test public void joinIterator() throws IOException { shouldCompileTo("{{{join this \", \"}}}", Arrays.asList("6", "7", "8").iterator(), $("join", StringHelpers.join), "6, 7, 8"); } @Test public void joinArray() throws IOException { shouldCompileTo("{{{join this \", \"}}}", new String[]{"6", "7", "8" }, $("join", StringHelpers.join), "6, 7, 8"); } @Test public void joinValues() throws IOException { shouldCompileTo("{{{join \"6\" 7 n8 \"-\"}}}", $("n8", 8), $("join", StringHelpers.join), "6-7-8"); } @Test public void joinWithPrefixAndSuffix() throws IOException { shouldCompileTo("{{{join this \", \" prefix='<' suffix='>'}}}", Arrays.asList("6", "7", "8"), $("join", StringHelpers.join), "<6, 7, 8>"); } @Test public void ljust() throws IOException { Options options = mock(Options.class); when(options.hash("size")).thenReturn(20); when(options.hash("pad", " ")).thenReturn(null); when(options.isFalsy(any())).thenReturn(false); assertEquals("ljust", ljust.name()); assertEquals("Handlebars.java ", ljust.apply("Handlebars.java", options)); verify(options).hash("size"); verify(options).hash("pad", " "); verify(options).isFalsy(any()); } @Test public void ljustWithPad() throws IOException { Options options = mock(Options.class); when(options.hash("size")).thenReturn(17); when(options.hash("pad", " ")).thenReturn("+"); when(options.isFalsy(any())).thenReturn(false); assertEquals("ljust", ljust.name()); assertEquals("Handlebars.java++", ljust.apply("Handlebars.java", options)); verify(options).hash("size"); verify(options).hash("pad", " "); verify(options).isFalsy(any()); } @Test public void rjust() throws IOException { Options options = mock(Options.class); when(options.hash("size")).thenReturn(20); when(options.hash("pad", " ")).thenReturn(null); when(options.isFalsy(any())).thenReturn(false); assertEquals("rjust", rjust.name()); assertEquals(" Handlebars.java", rjust.apply("Handlebars.java", options)); verify(options).hash("size"); verify(options).hash("pad", " "); verify(options).isFalsy(any()); } @Test public void rjustWithPad() throws IOException { Options options = mock(Options.class); when(options.hash("size")).thenReturn(17); when(options.hash("pad", " ")).thenReturn("+"); when(options.isFalsy(any())).thenReturn(false); assertEquals("rjust", rjust.name()); assertEquals("++Handlebars.java", rjust.apply("Handlebars.java", options)); verify(options).hash("size"); verify(options).hash("pad", " "); verify(options).isFalsy(any()); } @Test public void substringWithStart() throws IOException { Handlebars hbs = mock(Handlebars.class); Context ctx = mock(Context.class); Template fn = mock(Template.class); Options options = new Options.Builder(hbs, substring.name(), TagType.VAR, ctx, fn) .setParams(new Object[]{11 }) .build(); assertEquals("substring", substring.name()); assertEquals("java", substring.apply("Handlebars.java", options)); } @Test public void substringWithStartAndEnd() throws IOException { Handlebars hbs = mock(Handlebars.class); Context ctx = mock(Context.class); Template fn = mock(Template.class); Options options = new Options.Builder(hbs, substring.name(), TagType.VAR, ctx, fn) .setParams(new Object[]{0, 10 }) .build(); assertEquals("substring", substring.name()); assertEquals("Handlebars", substring.apply("Handlebars.java", options)); } @Test public void lower() throws IOException { Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(false); assertEquals("lower", lower.name()); assertEquals("handlebars.java", lower.apply("Handlebars.java", options)); verify(options).isFalsy(any()); } @Test public void upper() throws IOException { Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(false); assertEquals("upper", upper.name()); assertEquals("HANDLEBARS.JAVA", upper.apply("Handlebars.java", options)); verify(options).isFalsy(any()); } @Test public void slugify() throws IOException { Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(false); assertEquals("slugify", slugify.name()); assertEquals("joel-is-a-slug", slugify.apply(" Joel is a slug ", options)); verify(options).isFalsy(any()); } @Test public void replace() throws IOException { Handlebars hbs = mock(Handlebars.class); Context ctx = mock(Context.class); Template fn = mock(Template.class); Options options = new Options.Builder(hbs, replace.name(), TagType.VAR, ctx, fn) .setParams(new Object[]{"...", "rocks" }) .build(); assertEquals("replace", replace.name()); assertEquals("Handlebars rocks", replace.apply("Handlebars ...", options)); } @Test public void stringFormat() throws IOException { Handlebars hbs = mock(Handlebars.class); Context ctx = mock(Context.class); Template fn = mock(Template.class); Options options = new Options.Builder(hbs, stringFormat.name(), TagType.VAR, ctx, fn) .setParams(new Object[]{"handlebars.java" }) .build(); assertEquals("stringFormat", stringFormat.name()); assertEquals("Hello handlebars.java!", stringFormat.apply("Hello %s!", options)); } @Test public void stringDecimalFormat() throws IOException { Handlebars hbs = mock(Handlebars.class); Context ctx = mock(Context.class); Template fn = mock(Template.class); Options options = new Options.Builder(hbs, stringFormat.name(), TagType.VAR, ctx, fn) .setParams(new Object[]{10.0 / 3.0 }) .build(); assertEquals("stringFormat", stringFormat.name()); assertEquals(String.format("10 / 3 = %.2f", 10.0 / 3.0), stringFormat.apply("10 / 3 = %.2f", options)); } @Test public void stripTags() throws IOException { Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(false); assertEquals("stripTags", stripTags.name()); assertEquals("Joel is a slug", stripTags.apply("<b>Joel</b> <button>is</button> a <span>slug</span>", options)); verify(options).isFalsy(any()); } @Test public void stripTagsMultiLine() throws IOException { Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(false); assertEquals("stripTags", stripTags.name()); assertEquals("Joel\nis a slug", stripTags.apply( "<b>Joel</b>\n<button>is<\n/button> a <span>slug</span>", options)); verify(options).isFalsy(any()); } @Test public void capitalize() throws IOException { Options options = mock(Options.class); when(options.hash("fully", false)).thenReturn(false) .thenReturn(true); when(options.isFalsy(any())).thenReturn(false); assertEquals("capitalize", capitalize.name()); assertEquals("Handlebars Java", capitalize.apply("handlebars java", options)); assertEquals("Handlebars Java", capitalize.apply("HAndleBars JAVA", options)); verify(options, times(2)).hash("fully", false); verify(options, times(2)).isFalsy(any()); } @Test public void abbreviate() throws IOException { Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(false); when(options.param(0, null)).thenReturn(13); assertEquals("abbreviate", abbreviate.name()); assertEquals("Handlebars...", abbreviate.apply("Handlebars.java", options)); verify(options).isFalsy(any()); verify(options).param(0, null); } @Test public void wordWrap() throws IOException { Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(false); when(options.param(0, null)).thenReturn(5); assertEquals("wordWrap", wordWrap.name()); assertEquals("Joel" + System.lineSeparator() + "is a" + System.lineSeparator() + "slug", wordWrap.apply("Joel is a slug", options)); verify(options).isFalsy(any()); verify(options).param(0, null); } @Test public void yesno() throws IOException { Options options = mock(Options.class); when(options.hash("yes", "yes")).thenReturn("yes"); when(options.hash("no", "no")).thenReturn("no"); when(options.hash("maybe", "maybe")).thenReturn("maybe"); assertEquals("yesno", yesno.name()); assertEquals("yes", yesno.apply(true, options)); assertEquals("no", yesno.apply(false, options)); assertEquals("maybe", yesno.apply(null, options)); verify(options).hash("yes", "yes"); verify(options).hash("no", "no"); verify(options).hash("maybe", "maybe"); } @Test public void yesnoCustom() throws IOException { Options options = mock(Options.class); when(options.hash("yes", "yes")).thenReturn("yea"); when(options.hash("no", "no")).thenReturn("nop"); when(options.hash("maybe", "maybe")).thenReturn("whatever"); assertEquals("yesno", yesno.name()); assertEquals("yea", yesno.apply(true, options)); assertEquals("nop", yesno.apply(false, options)); assertEquals("whatever", yesno.apply(null, options)); verify(options).hash("yes", "yes"); verify(options).hash("no", "no"); verify(options).hash("maybe", "maybe"); } @Test public void nullContext() throws IOException { Set<Helper<Object>> helpers = new LinkedHashSet<>(Arrays.asList(StringHelpers.values())); helpers.remove(StringHelpers.join); helpers.remove(StringHelpers.yesno); helpers.remove(StringHelpers.defaultIfEmpty); helpers.remove(cut); Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(true); when(options.param(0, null)).thenReturn(null); for (Helper<Object> helper : helpers) { assertNull(helper.apply($, options)); } verify(options, times(helpers.size() - 1)).isFalsy(any()); verify(options, times(helpers.size())).param(0, null); } @Test public void nullContextWithDefault() throws IOException { Set<Helper<Object>> helpers = new LinkedHashSet<>(Arrays.asList(StringHelpers.values())); helpers.remove(StringHelpers.join); helpers.remove(StringHelpers.yesno); helpers.remove(StringHelpers.defaultIfEmpty); helpers.remove(cut); String nothing = "nothing"; Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(true); when(options.param(0, null)).thenReturn(nothing); for (Helper<Object> helper : helpers) { assertEquals(nothing, helper.apply($, options)); } verify(options, times(helpers.size() - 1)).isFalsy(any()); verify(options, times(helpers.size())).param(0, null); } @Test public void nullContextWithNumber() throws IOException { Set<Helper<Object>> helpers = new LinkedHashSet<>(Arrays.asList(StringHelpers.values())); helpers.remove(StringHelpers.join); helpers.remove(StringHelpers.yesno); helpers.remove(StringHelpers.defaultIfEmpty); helpers.remove(cut); Object number = 32; Options options = mock(Options.class); when(options.isFalsy(any())).thenReturn(true); when(options.param(0, null)).thenReturn(number); for (Helper<Object> helper : helpers) { assertEquals(number.toString(), helper.apply($, options)); } verify(options, times(helpers.size() - 1)).isFalsy(any()); verify(options, times(helpers.size())).param(0, null); } }
NON-EXISTENCE OF GLOBAL SOLUTIONS TO NONLINEAR WAVE EQUATIONS WITH POSITIVE INITIAL ENERGY. We consider the Cauchy problem for nonlinear abstract wave equations in a Hilbert space. Our main goal is to show that this problem has solutions with arbitrary positive initial energy that blow up in a finite time. The main theorem is proved by employing a result on growth of solutions of abstract nonlinear wave equation and the concavity method. A number of examples of nonlinear wave equations are given. A result on blow up of solutions with arbitrary positive initial energy to the initial boundary value problem for the wave equation under nonlinear boundary conditions is also obtained. 1. Introduction. The paper is devoted to the question of nonexistence of global solutions of second order abstract wave equations of the form P u tt + Au + Qu t = F (u), under the initial conditions u = u 0, u t = u 1 in a real Hilbert space H with the inner product (, ) and the corresponding norm. Here A is densely defined selfadjoint positive definite operator in a Hilbert space H, P is selfadjoint densely defined positive operator and Q is selfadjoint densely defined non-negative operator in H so that ) → H is a nonlinear gradient operator with the potential G() which satisfies the condition for some > 0, R 0 ≥ 0. As far as we know first result about nonexistence of a global solution of an evolution equation of the form in a Hilbert space H is a global non-existence theorem obtained by using the energy method in for the equation with P = I, Q = 0 and a nonlinear gradient operator F () with potential G(u) that satisfy the conditions (F (u), u) ≥ G(u), (F (u), u) ≥ G( u 2 ), ∀u ∈ D(A where > 2 is a given number and a continuous function G : R + → R + satisfies the condition < ∞ for some v 0 > 0. One of the effective techniques which has been employed to demonstrate when a solution to a nonlinear partial differential equation blows up in a finite time is the concavity method. The idea of the concavity method introduced in is based on a construction of some positive functional (t) = (u(t)), which is defined in terms of the local solution of the problem (the local solvability of the problem is therefore required) and proving that the function (t) satisfies the inequality given in the following statement: 20]). Let (t) be a positive, twice differentiable function, which satisfies the inequality for some > 0. If (t 0 ) > 0 and (t 0 ) > 0, then there exists a time t 1 ≤ t 0 + (t0) The concavity method and its modifications were used in the study of various nonlinear partial differential equations (see e.g., -, ). One of the main conditions on initial data guaranteing nonexistence of a global solution of problems considered in these papers is negativity of the initial energy of the corresponding problem. In a number of papers employing the potential well theory introduced in it is shown that some solutions of nonlinear wave equations with positive energy may blow up in a finite time. In recent years considerable attention has been given to the question of global non-existence of solutions to initial boundary value problems for nonlinear wave equations with arbitrary positive initial energy. The concavity method and its modifications are employed to find sufficient conditions of blow up of solutions with arbitrary large positive initial energy to the Cauchy problem and initial boundary value problems for nonlinear Klein -Gordon equation, damped Kirchhoff-type equation, generalized Boussinesq equaton, quasilinear strongly damped wave equations and some other equations (see, e.g., -, and references therein). Our main goal is to show that non-existence of global solutions with arbitrary positive initial energy of the problem actually can be established for wider class of nonlinear wave equations than equations considered in the preceeding papers by using the Lemma 1.1 and a modification of the following theorem on growth of solutions of the problem, with Q = 0. We would also like to note that our main result on nonexistence of global solutions of the problem, is new even for the case of undamped equations, i.e. equations of the form with Q ≡ 0. -EXISTENCE OF GLOBAL SOLUTIONS TO NONLINEAR WAVE EQUATIONS 989 for some > 0, u is a solution of the problem, and the initial data satisfy the conditions and Then, lim t→+∞ (u(t), P u(t)) = +∞, if u() exists on (0, +∞). 2. Non-existence of global solutions to abstract wave equation. In this section we show that a version of the Theorem 1.2 due to is valid also for the solutions of the damped equation. We show that if u(t) is a global solution of the problem, for initial data satisfying some conditions (including initial data with arbitrary positive initial energy), then it becomes unbounded as t → ∞. Then by using this result we prove our main Theorem 2.2 about nonexistence of global solutions of the problem,. Employing the equation in, utilizing the condition and the definition of the energy we obtain Thanks to the energy identity we deduce from this inequality the following one: First suppose that the condition holds true. Then, the last inequality implies that Therefore (t) → +∞ and (t) → +∞ as t → +∞. Suppose now that holds true. Then, upon multiplying the inequality by (t) ≥ 0, in view of the equality, we may write In view of the definition of (t), by an application of the Cauchy-Schwarz inequality for the inner product we obtain that, the difference composed of the first two terms on the right hand side of is nonnegative. Consequently, we deduce that where Following we define the positive functional H(t) := − (t) and multiplying the inequality by − −2− (t) we see that H(t) satisfies Now, suppose that which translates in terms of H as We claim that H (t) < 0 for all t ≥ 0. If not, then there is a smallest time > 0 such that Hence, upon multiplying the inequality by H (t) we deduce the inequality Integrating this inequality and rearranging we get Thanks to the condition the right hand side of the inequality is strictly positive. Hence, the left hand side must be strictly positive for each t ∈ . This contradicts our assumption H () = 0. Thus, we conclude that This, in turn, implies that the inequality is valid for all t ≥ 0, and the left hand side of the inequality is strictly positive for all t ≥ 0. Also note that, as H (t) < 0, the first term in brackets on the left hand side of the inequality is strictly negative. Thus, we conclude that the second term in brackets is also strictly negative for all times, that is we have Translating back this inequality equivalently reads This inequality implies that, (t) grows as t 2, and (t) grows as t, as time increases. Let us note that, only the right hand side of the last inequality involves the positive parameter C 0. Therefore, we choose so that the right hand side of is minimal. It is easy to check that, with this choice of C 0, the condition is equivalent to the second condition in. The first statement of the theorem, that is easily follows from the inequality. This finishes the proof. Next, suppose that u(t) is a global solution to the problem, with initial data , that satisfy the condition. From the inequality we immediately deduce that This implies the statement of the theorem. Theorem 2.2. Suppose that the operators P, Q, A and F satisfy all the conditions of Theorem 2.1. Let u(t) be the solution to the problem - with initial data satisfying one of conditions or. Further suppose that, there exists a 0, a 1 > 0 such that Then, there exists t 1 > 0 such that Moreover, there are infinitely many initial data with arbitrary large initial energy E(u 0, u 1 ) for which the corresponding solutions blow up in a finite time, i.e. For local existence of weak and strong solutions of a Cauchy problem for second order evolution equations of the form, we refer, e.g. to and references therein. Let us note that under some natural restrictions on the nonlinearity and its potential, local solvability of the problem, can be established by Galerkin method (see e.g. ). Hence, we deduce from the previous inequality that, there exists t * 0 ≥ 0 such that, for all t ≥ t * 0 we have which is the main inequality in the assumptions of the Lemma 1.1. Moreover, as stated by Theorem 2.1, we also have that (t) → +∞ as t → +∞, which implies that we can find t 0 ≥ t * 0 such that (t 0 ) > 0. Finally, both of the assumptions and on the initial data necessarily imply that u 0 ≡ 0, which ensures by our choice of C 0 that, (t) ≥ C 0 > 0 for any t ≥ 0, in particular (t 0 ) > 0. Hence, we see that all assumptions of the Lemma 1.1 are satisfied after the time t = t 0, and consequently (t) → +∞ in a finite time. This contradicts to our initial assumption that the solution was global. It remains to show that the last statement of the theorem holds true. Suppose that u 0 is an arbitrary element of D such that Note that, the last condition is necessary to have the condition. The condition for this choice of u 1 takes the form which can be seen to be equivalent to In view of this inequality is satisfied for 2 small enough. Moreover, the initial energy has the form and it is clear that E(u 0, u 1 ) can take arbitrary large values for large enough. Finally, let us note that the condition is satisfied, for instance, if the potential G() satisfies the conditions Clearly is satisfied for u 0 = w with 1 and w ∈ D such that G(w) > 0. Remark 1. It is also worth mentioning that, the virtue of the conditions of the Theorem 2.2 lies in the fact that, it provides blowing up solutions with initial data having arbitrary large positive initial energies. For small positive initial energies however, this condition can not recapture what is already known, for instance, for the problem where ⊂ R 3 is a bounded domain with smooth boundary ∂. The potential well theory gives a full characterization of the behaviour of the solutions in terms of global existence/nonexistence for initial energies less than the depth of the potential well. For example, take R 0 = 0 and suppose that the initial data verify E = 0. In this case we know from the potential well theory that, all nonzero solutions must blow up in finite time (see ), whereas the conditions only contains initial data that satisfy the condition (u 0, u 1 ) > 0. 3. Examples of nonlinear wave equations. Example 1. Nonlinear Klein-Gordon equation. Let u be a local strong solution to the Cauchy problem where m > 0 is a given number, u 0 ∈ H 1 (R 3 ), u 1 ∈ L 2 (R 3 ) are given compactly supported functions. The equation can be written in the form with P = I, A = −∆ x + m 2 I and F (u) = |u| 2 u. It is easy to see that, this nonlinearity satisfies the condition with = 1 2 > 0 and R 0 = 0. Moreover, the condition holds with a 0 = m 2. Thus, it follows from Theorem 2.2 that, if the initial energy is nonnegative, and where ⊂ R n is a bounded domain with smooth boundary ∂, p is an arbitrary positive number if n = 1, 2 and p ∈ (0, 2 n−2 ] if n ≥ 3. Let us note that this result easily follows from the results of T. Cazenave obtained in for solutions of the problem and the Theorem 1.2 of H. A. Levine. Indeed, T. Cazenave proved that each solution of the problem either blows up in a finite time or is uniformly bounded. Thus, if the functions u 0, u 1 satisfy the conditions of Theorem 1.2, that is then the corresponding local solution of the problem can not be continued on the whole interval [0, ∞), i.e. it must blow up in a finite time. For results on local solvability of the Cauchy problem and initial boundary value problems for semilinear Klein -Gordon equations see, e.g.,. Example 2. Generalized Boussinesq equation. Similarly we can find sufficient conditions for blow up of solutions with arbitrary positive initial energy for the generalized Boussinesq equation under the homogeneous Dirichlet boundary conditions where f (u) = |u| m u + P m−1 (u), m ≥ 1 is a given integer, > 0 is a given number, ∈ R n is a bounded domain and P m−1 (u) is a polynomial of order ≤ m − 1. Since is bounded, the Poincar inequality assures that the assumption is verified. Hence, the conclusion of Theorem 2.2 holds provided that the assumptions of Theorem 1.2 are fulfilled, that is u 0, u 1 satisfy Example 3. Nonlinear plate equations. It is clear that we can apply Theorem 2.2 to find sufficient conditions of blow up of solutions to initial boundary value problems for the nonlinear plate equations of the form where f () : R → R is a continuous function which satisfies the condition where Cauchy problem and initial boundary value for system of nonlinear Klein-Gordon equation where h 1, h 2 ∈ L 2 (R 3 ) are given functions, m,, b, are positive numbers. Initial boundary value problem under homogeneous Dirichlet boundary condition for the strongly damped wave equation where P is some polynomial of order less than p + 1. Initial boundary value problem under homogeneous Dirichlet boundary condition for nonlinear wave equation with structural damping term where f : R → R is a continuous function that satisfies the condition. For local solvability of initial boundary value problem for equations and we refer to, where the authors employed the fact that the semigroups generated by corresponding linear problems are analytic (see ). Initial boundary value problem for quasilinear strongly damped wave equation of the form where p > m ≥ 2 are given numbers. This problem can be written as an abstract Cauchy problem, in the Hilbert space H = L 2 () with Since p > m ≥ 2, the condition is satisfied with = p 4 − 1 2 > 0 and R 0 = 0. Initial boundary value problem for nonlinear Love equation: Here f (u) = |u| p−2 u + P (u) + h(x), P (u) is a polynomial of order < p − 1, h ∈ L 2 is a given function, m > 2, p > 2, b > 0, c > 0, a > 0 ∈ R, are given numbers. We assume also that m < p when a > 0. This problem can be written as a Cauchy problem, in the Hilbert space H = L 2 with Employing Young's inequality we can show that the condition is satisfied with some > 0 and R 0 > 0. Local solvability of the problem as well as blow up of solutions of this problem with nonpositive initial energy when a < 0, f (u) = |u| m−2 u is discussed in. Employing, similar to the proof of the Theorem 1.2 we can show that if (u 0, u 1 ) u 0 2 > 2E(u 0, u 1 ) + then (t) = u(t) 2 → +∞ as t → +∞. Finally arguing as in the proof of the Theorem 2.2 we get the inequality Thanks to the last inequality and the Lemma 1.1 we proved the following where 1 ∪ 2 = ∂, mes( 1 ) = 0 and the nonlinear term f () satisfies the condition. Notice that the first result on blow up of solutions of the equation under nonlinear boundary conditions with negative initial energy was obtained in, in it is shown that there are solutions with positive initial energy that blow up in a finite time.
Federal agency says treatment amounted to human trafficking even though they had work visas. A US federal agency has filed lawsuits over the unequal treatment of more than 500 migrant workers from India brought into the country to work at shipyards in Mississipi and Texas, and over 200 Thai farm labourers brought in to work in Hawaii and Washington state. The US Equal Employment Opportunity Commission said on Wednesday that the workers were forced to live in substandard housing and were exploited with fees that meant that for some their net earnings were almost zero. The EEOC termed the treatment of the workers as amounting to human trafficking, even though they had been brought into the country on work visas. "Foreign workers should be treated as equals when working in the United States, not as second-class citizens," said Olophius Perry, the EEOC's Los Angeles district director. Last year, Mordechai Orian, the head of the labour firm that had recruited the Thai farm labourers, was arrested and charged in a federal court with forced labour conspiracy. In lawsuits filed on Tuesday, the EEOC said that Global Horizons Inc, Orian's Beverly Hills-based company, had recruited the labourers to work on six farms in Hawaii and two in Washington state between 2003 and 2007. The workers earned between $8.50-9.50 an hour to harvest crops, but many were forced to pay recruitment fees of between $12,000 and $25,000, EEOC officials said. They also said that the workers had to take high interest loans and were charged for lodging and food. Michael Green, Orian's Hawaii-based attorney, has disputed the claims, however. "The conditions were fine and Orian would never allow anything different," Green said. "The people who came here were paid, they were not living in squalor or bad conditions, they were paid more money than they ever were in Thailand, and they enjoyed their work." As the federal criminal case against Orian, an Israeli national, continues, he is required to submit to electronic monitoring. The EEOC says that the workers were being subjected to fees until they had almost no income left at all. "They were nickeled and dimed to the point where they really didn't have any pay," said Anna Park, regional attorney for the EEOC Los Angeles office. The EEOC says that some of the workers were forced to live in crowded conditions, and their quarters were infested with rats and insects. Workers of other nationalities on the same farms were not subject to the same conditions, Park said. Officials also said that the workers had their passports taken from them, and were threatened with deportation if they complained. The EEOC says that some of the Thai workers have since returned to their home country, and that the total number of affected workers could be about 400. Some of the workers have now been given visas for victims of human trafficking, but EEOC officials would not say how many won that designation. In the case of the 500 Indian workers, the EEOC alleged in a lawsuit in Mississippi that Signal International LLC, a Gulf coast marine services company, subjected them to segregated facilities and discriminatory treatment. It said the Indian men paid recruiters up to $20,000 to come to the United States, and after they arrived they were forced to pay rent for crowded housing in fenced camps.
The TV ratings for the Stanley Cup playoffs have not been good. Monday’s Game 1 of the Final got a 2.8, well off the 4.2 of the opener a year ago, crushed by Game 7 of Warriors-Thunder. The final numbers for Game 2 likely won’t be much better with the game banished from NBC to NBCSN thanks to not the Warriors, but the extreme jungle gym competition that is American Ninja Warrior. Ratings have been down throughout the playoffs, as well, with the New York, Los Angeles and Chicago markets all being eliminated by Round 2. And then there’s Canada, where viewership has plummeted without a single Canadian team in the playoffs for the first time in 46 years. But good things are happening for the long-term health and growth of the NHL, and they have nothing to do with adding another team in the desert. Ice hockey’s inherent selling point over every other sport is the speed of the game. The fastest skaters in the NHL can approach 30mph at top gear, a notch or two above Usain Bolt running full throttle. Add in the skill of handling the puck, the soccer-like passing artistry and repeated contact with other bodies flying around the ice and it’s a compelling overall package. This is not a desperate “please like my sport” pitch – just a simple analysis that hockey as some obvious things going for it, speed chief among them. Only the NHL has long failed to capitalize on the best part of its game. Countless words can, and have, been written on how the rise of the New Jersey Devils 20 years ago, behind Jacques Lemaire’s neutral zone trap, stalled the play on the ice and the growth of the sport. None of them will be rehashed here because reading about those Devils teams was almost as boring as watching them play hockey. Almost. Regardless of which team wins this year’s cup, the rest of the league will have a clear mandate they will have to meet in order to compete: get faster. Both the Pittsburgh Penguins and San Jose Sharks play lightning-fast, possible pun intended. The Tampa Bay Lightning, the team the Penguins beat in the Eastern Conference Finals and who fell in six games to the Chicago Blackhawks in the Cup Final a year ago, also play a quick and skilled game, same as Chicago. But San Jose and Pittsburgh have shifted it into a higher gear – and the Penguins especially, who probably not coincidentally hold a 2-0 lead in the series. Started by an offseason trade that landed Phil Kessel from Toronto, old man Penguins general manager Jim Rutherford embraced a new style. Kessel may look like a guy who would huff and puff his way through a beer league game, but he plays at a pace few in the league can match, his every rush down the ice nearly producing the feel of a Bartolo Colon home run. Before midseason, Rutherford somehow acquired Trevor Daley, one of the NHL’s best skaters, from the Blackhawks in exchange for Rob Scuderi, a toppled traffic cone on a snowy road. That was followed by the addition of winger and blur of blonde hair Carl Hagelin and the Penguins giving full-time roles to Bryan Rust and Conor Sheary, two young wingers from the AHL. Added to Sidney Crosby and Evgeni Malkin, the Penguins quickly became the fastest team in the NHL and posted the league’s best record in the final months of the season. This Penguins team is much different than even the last Pittsburgh Cup-winning team of 2009. Those Penguins gave heavy minutes to Scuderi, Brooks Orpik and Matt Cooke, players who were a mix of slow, slow and dirty and just dirty. None of the three would hold a roster spot on the 2016 edition. In fact, Orpik now is one of the top defenseman for the Washington Capitals, the Presidents’ Trophy winner the Penguins dispatched with ease in the second round. Washington GM Brian MacLellan built his group to be “big and heavy” for the playoffs, an approach that now seems outdated. You can’t physically impose your will on an opponent if you can’t catch them to hit them. MacLellan has since said he hopes to add more speed to the Capitals lineup for next season. Even the Philadelphia Flyers, who have mostly been attempting to punch their way to Stanley Cups since the mid-70s, have seemed to adjust their approach under general manager Ron Hextall and the franchise now has more Shayne Gostisbehere types in the pipeline than toothless whirlwinds of violence. The Sharks, the highest-scoring team in the playoffs entering the final, so far have proven unable to skate with the Penguins, but they made it this far on puck movement and skill. Playing a physical game won’t turn things around, even if this Sharks team was capable of it: the biggest hit in the series came in Game 1 when Patrick Marleau dropped Rust. If Marleau, one of the league’s true gentlemen, is your enforcer, you better focus on speed. Casual hockey fans may not be noticing it yet – and judging by TV ratings, they most definitely are not – but the Penguins and Sharks are showing the NHL has shifted from big and slow to skilled and fast. Hockey at its best. The rest of the league must now try to keep up.
<filename>src/theme/constants/RadiusSizes.ts export enum FormRadiusSizes { xs = '0.35em', sm = '0.55em', md = '0.75em', lg = '1em', xl = '2.25em', } export enum ImageRadiusSize { xs = '0.35em', sm = '0.55em', md = '0.75em', lg = '1em', xl = '2.25em', }
// Pick the correct statement for const and volatile. // (A) const is the opposite of volatile and vice versa. // (B) const and volatile can’t be used for struct and union. // (C) const and volatile can’t be used for enum. // (D) const and volatile can’t be used for typedef. // (E) const and volatile are independent i.e. it’s possible that a variable is defined as both const and volatile. // Answer: (E) // Explanation: In C, const and volatile are type qualifiers and these two are independent. // Basically, const means that the value isn’t modifiable by the program. // And volatile means that the value is subject to sudden change (possibly from outside the program). // In fact, C standard mentions an example of valid declaration which is both const and volatile. // The example is “extern const volatile int real_time_clock;” where real_time_clock may be modifiable by hardware, // but cannot be assigned to, incremented, or decremented. So we should already treat const and volatile separately. // Besides, these type qualifier applies for struct, union, enum and typedef as well.
<reponame>Magaiwer/projetointegrador<filename>src/main/java/projetointegrador/model/Form.java package projetointegrador.model; import lombok.Data; import lombok.EqualsAndHashCode; import lombok.ToString; import org.hibernate.annotations.DynamicUpdate; import projetointegrador.listeners.AuditListeners; import javax.persistence.*; import javax.persistence.Entity; import java.util.List; @Entity @EntityListeners(AuditListeners.class) @Table(name = "form") @DynamicUpdate @Data @ToString(exclude={"entity", "permissions"}) @EqualsAndHashCode(exclude = "entity") public class Form { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private Long id; @Column private String name; @Column private boolean audit; @Column private String description; @ManyToOne(fetch = FetchType.LAZY) @JoinColumn(name = "entity_id") private Entities entity; @OneToMany(mappedBy = "form", cascade = CascadeType.ALL, orphanRemoval = true) private List<Permission> permissions; }
<gh_stars>1-10 #pragma once #include "CoreDEF.h" #include <boost/circular_buffer.hpp> #include <vector> namespace bv { // *********************** /// Class holds reusable elements in circular buffer. You can buffers and fill with new data /// without need to memory realocations. template< typename ElementType > class Reusable { private: boost::circular_buffer< ElementType > m_queue; public: explicit Reusable ( const std::vector< ElementType > & reusables ); ElementType GetNext (); }; // ========================================================================= // // Implementation // ========================================================================= // // *********************** // template< typename ElementType > inline Reusable< ElementType >::Reusable ( const std::vector< ElementType > & reusables ) : m_queue( reusables.size() ) { assert( reusables.size() > 0 ); for( auto & element : reusables ) { m_queue.push_back( element ); } } // *********************** // template< typename ElementType > inline ElementType Reusable< ElementType >::GetNext () { auto element = m_queue.front(); m_queue.pop_front(); m_queue.push_back( element ); return element; } } // bv
// Copyright (c) Microsoft Corporation. // Licensed under the MIT license. #include "precomp.h" #include "inc/CodepointWidthDetector.hpp" namespace { // used to store range data in CodepointWidthDetector's internal map struct UnicodeRange final { unsigned int lowerBound; unsigned int upperBound; CodepointWidth width; }; static bool operator<(const UnicodeRange& range, const unsigned int searchTerm) noexcept { return range.upperBound < searchTerm; } static constexpr std::array<UnicodeRange, 285> s_wideAndAmbiguousTable{ // generated from http://www.unicode.org/Public/UCD/latest/ucd/EastAsianWidth.txt // anything not present here is presumed to be Narrow. UnicodeRange{ 0xa1, 0xa1, CodepointWidth::Ambiguous }, UnicodeRange{ 0xa4, 0xa4, CodepointWidth::Ambiguous }, UnicodeRange{ 0xa7, 0xa8, CodepointWidth::Ambiguous }, UnicodeRange{ 0xaa, 0xaa, CodepointWidth::Ambiguous }, UnicodeRange{ 0xad, 0xae, CodepointWidth::Ambiguous }, UnicodeRange{ 0xb0, 0xb4, CodepointWidth::Ambiguous }, UnicodeRange{ 0xb6, 0xba, CodepointWidth::Ambiguous }, UnicodeRange{ 0xbc, 0xbf, CodepointWidth::Ambiguous }, UnicodeRange{ 0xc6, 0xc6, CodepointWidth::Ambiguous }, UnicodeRange{ 0xd0, 0xd0, CodepointWidth::Ambiguous }, UnicodeRange{ 0xd7, 0xd8, CodepointWidth::Ambiguous }, UnicodeRange{ 0xde, 0xe1, CodepointWidth::Ambiguous }, UnicodeRange{ 0xe6, 0xe6, CodepointWidth::Ambiguous }, UnicodeRange{ 0xe8, 0xea, CodepointWidth::Ambiguous }, UnicodeRange{ 0xec, 0xed, CodepointWidth::Ambiguous }, UnicodeRange{ 0xf0, 0xf0, CodepointWidth::Ambiguous }, UnicodeRange{ 0xf2, 0xf3, CodepointWidth::Ambiguous }, UnicodeRange{ 0xf7, 0xfa, CodepointWidth::Ambiguous }, UnicodeRange{ 0xfc, 0xfc, CodepointWidth::Ambiguous }, UnicodeRange{ 0xfe, 0xfe, CodepointWidth::Ambiguous }, UnicodeRange{ 0x101, 0x101, CodepointWidth::Ambiguous }, UnicodeRange{ 0x111, 0x111, CodepointWidth::Ambiguous }, UnicodeRange{ 0x113, 0x113, CodepointWidth::Ambiguous }, UnicodeRange{ 0x11b, 0x11b, CodepointWidth::Ambiguous }, UnicodeRange{ 0x126, 0x127, CodepointWidth::Ambiguous }, UnicodeRange{ 0x12b, 0x12b, CodepointWidth::Ambiguous }, UnicodeRange{ 0x131, 0x133, CodepointWidth::Ambiguous }, UnicodeRange{ 0x138, 0x138, CodepointWidth::Ambiguous }, UnicodeRange{ 0x13f, 0x142, CodepointWidth::Ambiguous }, UnicodeRange{ 0x144, 0x144, CodepointWidth::Ambiguous }, UnicodeRange{ 0x148, 0x14b, CodepointWidth::Ambiguous }, UnicodeRange{ 0x14d, 0x14d, CodepointWidth::Ambiguous }, UnicodeRange{ 0x152, 0x153, CodepointWidth::Ambiguous }, UnicodeRange{ 0x166, 0x167, CodepointWidth::Ambiguous }, UnicodeRange{ 0x16b, 0x16b, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1ce, 0x1ce, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1d0, 0x1d0, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1d2, 0x1d2, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1d4, 0x1d4, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1d6, 0x1d6, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1d8, 0x1d8, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1da, 0x1da, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1dc, 0x1dc, CodepointWidth::Ambiguous }, UnicodeRange{ 0x251, 0x251, CodepointWidth::Ambiguous }, UnicodeRange{ 0x261, 0x261, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2c4, 0x2c4, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2c7, 0x2c7, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2c9, 0x2cb, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2cd, 0x2cd, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2d0, 0x2d0, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2d8, 0x2db, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2dd, 0x2dd, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2df, 0x2df, CodepointWidth::Ambiguous }, UnicodeRange{ 0x300, 0x36f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x391, 0x3a1, CodepointWidth::Ambiguous }, UnicodeRange{ 0x3a3, 0x3a9, CodepointWidth::Ambiguous }, UnicodeRange{ 0x3b1, 0x3c1, CodepointWidth::Ambiguous }, UnicodeRange{ 0x3c3, 0x3c9, CodepointWidth::Ambiguous }, UnicodeRange{ 0x401, 0x401, CodepointWidth::Ambiguous }, UnicodeRange{ 0x410, 0x44f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x451, 0x451, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1100, 0x115f, CodepointWidth::Wide }, UnicodeRange{ 0x2010, 0x2010, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2013, 0x2016, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2018, 0x2019, CodepointWidth::Ambiguous }, UnicodeRange{ 0x201c, 0x201d, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2020, 0x2022, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2024, 0x2027, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2030, 0x2030, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2032, 0x2033, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2035, 0x2035, CodepointWidth::Ambiguous }, UnicodeRange{ 0x203b, 0x203b, CodepointWidth::Ambiguous }, UnicodeRange{ 0x203e, 0x203e, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2074, 0x2074, CodepointWidth::Ambiguous }, UnicodeRange{ 0x207f, 0x207f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2081, 0x2084, CodepointWidth::Ambiguous }, UnicodeRange{ 0x20ac, 0x20ac, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2103, 0x2103, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2105, 0x2105, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2109, 0x2109, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2113, 0x2113, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2116, 0x2116, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2121, 0x2122, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2126, 0x2126, CodepointWidth::Ambiguous }, UnicodeRange{ 0x212b, 0x212b, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2153, 0x2154, CodepointWidth::Ambiguous }, UnicodeRange{ 0x215b, 0x215e, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2160, 0x216b, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2170, 0x2179, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2189, 0x2189, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2190, 0x2199, CodepointWidth::Ambiguous }, UnicodeRange{ 0x21b8, 0x21b9, CodepointWidth::Ambiguous }, UnicodeRange{ 0x21d2, 0x21d2, CodepointWidth::Ambiguous }, UnicodeRange{ 0x21d4, 0x21d4, CodepointWidth::Ambiguous }, UnicodeRange{ 0x21e7, 0x21e7, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2200, 0x2200, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2202, 0x2203, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2207, 0x2208, CodepointWidth::Ambiguous }, UnicodeRange{ 0x220b, 0x220b, CodepointWidth::Ambiguous }, UnicodeRange{ 0x220f, 0x220f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2211, 0x2211, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2215, 0x2215, CodepointWidth::Ambiguous }, UnicodeRange{ 0x221a, 0x221a, CodepointWidth::Ambiguous }, UnicodeRange{ 0x221d, 0x2220, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2223, 0x2223, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2225, 0x2225, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2227, 0x222c, CodepointWidth::Ambiguous }, UnicodeRange{ 0x222e, 0x222e, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2234, 0x2237, CodepointWidth::Ambiguous }, UnicodeRange{ 0x223c, 0x223d, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2248, 0x2248, CodepointWidth::Ambiguous }, UnicodeRange{ 0x224c, 0x224c, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2252, 0x2252, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2260, 0x2261, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2264, 0x2267, CodepointWidth::Ambiguous }, UnicodeRange{ 0x226a, 0x226b, CodepointWidth::Ambiguous }, UnicodeRange{ 0x226e, 0x226f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2282, 0x2283, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2286, 0x2287, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2295, 0x2295, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2299, 0x2299, CodepointWidth::Ambiguous }, UnicodeRange{ 0x22a5, 0x22a5, CodepointWidth::Ambiguous }, UnicodeRange{ 0x22bf, 0x22bf, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2312, 0x2312, CodepointWidth::Ambiguous }, UnicodeRange{ 0x231a, 0x231b, CodepointWidth::Wide }, UnicodeRange{ 0x2329, 0x232a, CodepointWidth::Wide }, UnicodeRange{ 0x23e9, 0x23ec, CodepointWidth::Wide }, UnicodeRange{ 0x23f0, 0x23f0, CodepointWidth::Wide }, UnicodeRange{ 0x23f3, 0x23f3, CodepointWidth::Wide }, UnicodeRange{ 0x2460, 0x24e9, CodepointWidth::Ambiguous }, UnicodeRange{ 0x24eb, 0x254b, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2550, 0x2573, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2580, 0x258f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2592, 0x2595, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25a0, 0x25a1, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25a3, 0x25a9, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25b2, 0x25b3, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25b6, 0x25b7, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25bc, 0x25bd, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25c0, 0x25c1, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25c6, 0x25c8, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25cb, 0x25cb, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25ce, 0x25d1, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25e2, 0x25e5, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25ef, 0x25ef, CodepointWidth::Ambiguous }, UnicodeRange{ 0x25fd, 0x25fe, CodepointWidth::Wide }, UnicodeRange{ 0x2605, 0x2606, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2609, 0x2609, CodepointWidth::Ambiguous }, UnicodeRange{ 0x260e, 0x260f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2614, 0x2615, CodepointWidth::Wide }, UnicodeRange{ 0x261c, 0x261c, CodepointWidth::Ambiguous }, UnicodeRange{ 0x261e, 0x261e, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2640, 0x2640, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2642, 0x2642, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2648, 0x2653, CodepointWidth::Wide }, UnicodeRange{ 0x2660, 0x2661, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2663, 0x2665, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2667, 0x266a, CodepointWidth::Ambiguous }, UnicodeRange{ 0x266c, 0x266d, CodepointWidth::Ambiguous }, UnicodeRange{ 0x266f, 0x266f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x267f, 0x267f, CodepointWidth::Wide }, UnicodeRange{ 0x2693, 0x2693, CodepointWidth::Wide }, UnicodeRange{ 0x269e, 0x269f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26a1, 0x26a1, CodepointWidth::Wide }, UnicodeRange{ 0x26aa, 0x26ab, CodepointWidth::Wide }, UnicodeRange{ 0x26bd, 0x26be, CodepointWidth::Wide }, UnicodeRange{ 0x26bf, 0x26bf, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26c4, 0x26c5, CodepointWidth::Wide }, UnicodeRange{ 0x26c6, 0x26cd, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26ce, 0x26ce, CodepointWidth::Wide }, UnicodeRange{ 0x26cf, 0x26d3, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26d4, 0x26d4, CodepointWidth::Wide }, UnicodeRange{ 0x26d5, 0x26e1, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26e3, 0x26e3, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26e8, 0x26e9, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26ea, 0x26ea, CodepointWidth::Wide }, UnicodeRange{ 0x26eb, 0x26f1, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26f2, 0x26f3, CodepointWidth::Wide }, UnicodeRange{ 0x26f4, 0x26f4, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26f5, 0x26f5, CodepointWidth::Wide }, UnicodeRange{ 0x26f6, 0x26f9, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26fa, 0x26fa, CodepointWidth::Wide }, UnicodeRange{ 0x26fb, 0x26fc, CodepointWidth::Ambiguous }, UnicodeRange{ 0x26fd, 0x26fd, CodepointWidth::Wide }, UnicodeRange{ 0x26fe, 0x26ff, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2705, 0x2705, CodepointWidth::Wide }, UnicodeRange{ 0x270a, 0x270b, CodepointWidth::Wide }, UnicodeRange{ 0x2728, 0x2728, CodepointWidth::Wide }, UnicodeRange{ 0x273d, 0x273d, CodepointWidth::Ambiguous }, UnicodeRange{ 0x274c, 0x274c, CodepointWidth::Wide }, UnicodeRange{ 0x274e, 0x274e, CodepointWidth::Wide }, UnicodeRange{ 0x2753, 0x2755, CodepointWidth::Wide }, UnicodeRange{ 0x2757, 0x2757, CodepointWidth::Wide }, UnicodeRange{ 0x2776, 0x277f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2795, 0x2797, CodepointWidth::Wide }, UnicodeRange{ 0x27b0, 0x27b0, CodepointWidth::Wide }, UnicodeRange{ 0x27bf, 0x27bf, CodepointWidth::Wide }, UnicodeRange{ 0x2b1b, 0x2b1c, CodepointWidth::Wide }, UnicodeRange{ 0x2b50, 0x2b50, CodepointWidth::Wide }, UnicodeRange{ 0x2b55, 0x2b55, CodepointWidth::Wide }, UnicodeRange{ 0x2b56, 0x2b59, CodepointWidth::Ambiguous }, UnicodeRange{ 0x2e80, 0x2e99, CodepointWidth::Wide }, UnicodeRange{ 0x2e9b, 0x2ef3, CodepointWidth::Wide }, UnicodeRange{ 0x2f00, 0x2fd5, CodepointWidth::Wide }, UnicodeRange{ 0x2ff0, 0x2ffb, CodepointWidth::Wide }, UnicodeRange{ 0x3000, 0x303e, CodepointWidth::Wide }, UnicodeRange{ 0x3041, 0x3096, CodepointWidth::Wide }, UnicodeRange{ 0x3099, 0x30ff, CodepointWidth::Wide }, UnicodeRange{ 0x3105, 0x312e, CodepointWidth::Wide }, UnicodeRange{ 0x3131, 0x318e, CodepointWidth::Wide }, UnicodeRange{ 0x3190, 0x31ba, CodepointWidth::Wide }, UnicodeRange{ 0x31c0, 0x31e3, CodepointWidth::Wide }, UnicodeRange{ 0x31f0, 0x321e, CodepointWidth::Wide }, UnicodeRange{ 0x3220, 0x3247, CodepointWidth::Wide }, UnicodeRange{ 0x3248, 0x324f, CodepointWidth::Ambiguous }, UnicodeRange{ 0x3250, 0x32fe, CodepointWidth::Wide }, UnicodeRange{ 0x3300, 0x4dbf, CodepointWidth::Wide }, UnicodeRange{ 0x4e00, 0xa48c, CodepointWidth::Wide }, UnicodeRange{ 0xa490, 0xa4c6, CodepointWidth::Wide }, UnicodeRange{ 0xa960, 0xa97c, CodepointWidth::Wide }, UnicodeRange{ 0xac00, 0xd7a3, CodepointWidth::Wide }, UnicodeRange{ 0xe000, 0xf8ff, CodepointWidth::Ambiguous }, UnicodeRange{ 0xf900, 0xfaff, CodepointWidth::Wide }, UnicodeRange{ 0xfe00, 0xfe0f, CodepointWidth::Ambiguous }, UnicodeRange{ 0xfe10, 0xfe19, CodepointWidth::Wide }, UnicodeRange{ 0xfe30, 0xfe52, CodepointWidth::Wide }, UnicodeRange{ 0xfe54, 0xfe66, CodepointWidth::Wide }, UnicodeRange{ 0xfe68, 0xfe6b, CodepointWidth::Wide }, UnicodeRange{ 0xff01, 0xff60, CodepointWidth::Wide }, UnicodeRange{ 0xffe0, 0xffe6, CodepointWidth::Wide }, UnicodeRange{ 0xfffd, 0xfffd, CodepointWidth::Ambiguous }, UnicodeRange{ 0x16fe0, 0x16fe1, CodepointWidth::Wide }, UnicodeRange{ 0x17000, 0x187ec, CodepointWidth::Wide }, UnicodeRange{ 0x18800, 0x18af2, CodepointWidth::Wide }, UnicodeRange{ 0x1b000, 0x1b11e, CodepointWidth::Wide }, UnicodeRange{ 0x1b170, 0x1b2fb, CodepointWidth::Wide }, UnicodeRange{ 0x1f004, 0x1f004, CodepointWidth::Wide }, UnicodeRange{ 0x1f0cf, 0x1f0cf, CodepointWidth::Wide }, UnicodeRange{ 0x1f100, 0x1f10a, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1f110, 0x1f12d, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1f130, 0x1f169, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1f170, 0x1f18d, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1f18e, 0x1f18e, CodepointWidth::Wide }, UnicodeRange{ 0x1f18f, 0x1f190, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1f191, 0x1f19a, CodepointWidth::Wide }, UnicodeRange{ 0x1f19b, 0x1f1ac, CodepointWidth::Ambiguous }, UnicodeRange{ 0x1f200, 0x1f202, CodepointWidth::Wide }, UnicodeRange{ 0x1f210, 0x1f23b, CodepointWidth::Wide }, UnicodeRange{ 0x1f240, 0x1f248, CodepointWidth::Wide }, UnicodeRange{ 0x1f250, 0x1f251, CodepointWidth::Wide }, UnicodeRange{ 0x1f260, 0x1f265, CodepointWidth::Wide }, UnicodeRange{ 0x1f300, 0x1f320, CodepointWidth::Wide }, UnicodeRange{ 0x1f32d, 0x1f335, CodepointWidth::Wide }, UnicodeRange{ 0x1f337, 0x1f37c, CodepointWidth::Wide }, UnicodeRange{ 0x1f37e, 0x1f393, CodepointWidth::Wide }, UnicodeRange{ 0x1f3a0, 0x1f3ca, CodepointWidth::Wide }, UnicodeRange{ 0x1f3cf, 0x1f3d3, CodepointWidth::Wide }, UnicodeRange{ 0x1f3e0, 0x1f3f0, CodepointWidth::Wide }, UnicodeRange{ 0x1f3f4, 0x1f3f4, CodepointWidth::Wide }, UnicodeRange{ 0x1f3f8, 0x1f43e, CodepointWidth::Wide }, UnicodeRange{ 0x1f440, 0x1f440, CodepointWidth::Wide }, UnicodeRange{ 0x1f442, 0x1f4fc, CodepointWidth::Wide }, UnicodeRange{ 0x1f4ff, 0x1f53d, CodepointWidth::Wide }, UnicodeRange{ 0x1f54b, 0x1f54e, CodepointWidth::Wide }, UnicodeRange{ 0x1f550, 0x1f567, CodepointWidth::Wide }, UnicodeRange{ 0x1f57a, 0x1f57a, CodepointWidth::Wide }, UnicodeRange{ 0x1f595, 0x1f596, CodepointWidth::Wide }, UnicodeRange{ 0x1f5a4, 0x1f5a4, CodepointWidth::Wide }, UnicodeRange{ 0x1f5fb, 0x1f64f, CodepointWidth::Wide }, UnicodeRange{ 0x1f680, 0x1f6c5, CodepointWidth::Wide }, UnicodeRange{ 0x1f6cc, 0x1f6cc, CodepointWidth::Wide }, UnicodeRange{ 0x1f6d0, 0x1f6d2, CodepointWidth::Wide }, UnicodeRange{ 0x1f6eb, 0x1f6ec, CodepointWidth::Wide }, UnicodeRange{ 0x1f6f4, 0x1f6f8, CodepointWidth::Wide }, UnicodeRange{ 0x1f910, 0x1f93e, CodepointWidth::Wide }, UnicodeRange{ 0x1f940, 0x1f94c, CodepointWidth::Wide }, UnicodeRange{ 0x1f950, 0x1f96b, CodepointWidth::Wide }, UnicodeRange{ 0x1f980, 0x1f997, CodepointWidth::Wide }, UnicodeRange{ 0x1f9c0, 0x1f9c0, CodepointWidth::Wide }, UnicodeRange{ 0x1f9d0, 0x1f9e6, CodepointWidth::Wide }, UnicodeRange{ 0x20000, 0x2fffd, CodepointWidth::Wide }, UnicodeRange{ 0x30000, 0x3fffd, CodepointWidth::Wide }, UnicodeRange{ 0xe0100, 0xe01ef, CodepointWidth::Ambiguous }, UnicodeRange{ 0xf0000, 0xffffd, CodepointWidth::Ambiguous }, UnicodeRange{ 0x100000, 0x10fffd, CodepointWidth::Ambiguous } }; } // Routine Description: // - Constructs an instance of the CodepointWidthDetector class CodepointWidthDetector::CodepointWidthDetector() noexcept : _fallbackCache{}, _pfnFallbackMethod{} { } // Routine Description: // - returns the width type of codepoint by searching the map generated from the unicode spec // Arguments: // - glyph - the utf16 encoded codepoint to search for // Return Value: // - the width type of the codepoint CodepointWidth CodepointWidthDetector::GetWidth(const std::wstring_view glyph) const { if (glyph.empty()) { return CodepointWidth::Invalid; } const auto codepoint = _extractCodepoint(glyph); const auto it = std::lower_bound(s_wideAndAmbiguousTable.begin(), s_wideAndAmbiguousTable.end(), codepoint); // For characters that are not _in_ the table, lower_bound will return the nearest item that is. // We must check its bounds to make sure that our hit was a true hit. if (it != s_wideAndAmbiguousTable.end() && codepoint >= it->lowerBound && codepoint <= it->upperBound) { return it->width; } return CodepointWidth::Narrow; } // Routine Description: // - checks if wch is wide. will attempt to fallback as much possible until an answer is determined // Arguments: // - wch - the wchar to check width of // Return Value: // - true if wch is wide bool CodepointWidthDetector::IsWide(const wchar_t wch) const noexcept { try { return IsWide({ &wch, 1 }); } CATCH_LOG(); return true; } // Routine Description: // - checks if codepoint is wide. will attempt to fallback as much possible until an answer is determined // Arguments: // - glyph - the utf16 encoded codepoint to check width of // Return Value: // - true if codepoint is wide bool CodepointWidthDetector::IsWide(const std::wstring_view glyph) const { THROW_HR_IF(E_INVALIDARG, glyph.empty()); if (glyph.size() == 1) { // We first attempt to look at our custom quick lookup table of char width preferences. const auto width = GetQuickCharWidth(glyph.front()); // If it's invalid, the quick width had no opinion, so go to the lookup table. if (width == CodepointWidth::Invalid) { return _lookupIsWide(glyph); } // If it's ambiguous, the quick width wanted us to ask the font directly, try that if we can. // If not, go to the lookup table. else if (width == CodepointWidth::Ambiguous) { if (_pfnFallbackMethod) { return _checkFallbackViaCache(glyph); } else { return _lookupIsWide(glyph); } } // Otherwise, return Wide as True and Narrow as False. else { return width == CodepointWidth::Wide; } } else { return _lookupIsWide(glyph); } } // Routine Description: // - checks if codepoint is wide using fallback methods. // Arguments: // - glyph - the utf16 encoded codepoint to check width of // Return Value: // - true if codepoint is wide or if it can't be confirmed to be narrow bool CodepointWidthDetector::_lookupIsWide(const std::wstring_view glyph) const noexcept { try { // Use our generated table to try to lookup the width based on the Unicode standard. const CodepointWidth width = GetWidth(glyph); // If it's ambiguous, then ask the font if we can. if (width == CodepointWidth::Ambiguous) { if (_pfnFallbackMethod) { return _checkFallbackViaCache(glyph); } } // If it's not ambiguous, it should say wide or narrow. Turn that into True = Wide or False = Narrow. else { return width == CodepointWidth::Wide; } } CATCH_LOG(); // If we got this far, we couldn't figure it out. // It's better to be too wide than too narrow. return true; } // Routine Description: // - Checks the fallback function but caches the results until the font changes // because the lookup function is usually very expensive and will return the same results // for the same inputs. // Arguments: // - glyph - the utf16 encoded codepoint to check width of // - true if codepoint is wide or false if it is narrow bool CodepointWidthDetector::_checkFallbackViaCache(const std::wstring_view glyph) const { const std::wstring findMe{ glyph }; // TODO: Cache needs to be emptied when font changes. const auto it = _fallbackCache.find(findMe); if (it == _fallbackCache.end()) { auto result = _pfnFallbackMethod(glyph); _fallbackCache.insert_or_assign(findMe, result); return result; } else { return it->second; } } // Routine Description: // - extract unicode codepoint from utf16 encoding // Arguments: // - glyph - the utf16 encoded codepoint convert // Return Value: // - the codepoint being stored unsigned int CodepointWidthDetector::_extractCodepoint(const std::wstring_view glyph) noexcept { if (glyph.size() == 1) { return static_cast<unsigned int>(glyph.front()); } else { const unsigned int mask = 0x3FF; // leading bits, shifted over to make space for trailing bits unsigned int codepoint = (glyph.at(0) & mask) << 10; // trailing bits codepoint |= (glyph.at(1) & mask); // 0x10000 is subtracted from the codepoint to encode a surrogate pair, add it back codepoint += 0x10000; return codepoint; } } // Method Description: // - Sets a function that should be used as the fallback mechanism for // determining a particular glyph's width, should the glyph be an ambiguous // width. // A Terminal could hook in a Renderer's IsGlyphWideByFont method as the // fallback to ask the renderer for the glyph's width (for example). // Arguments: // - pfnFallback - the function to use as the fallback method. // Return Value: // - <none> void CodepointWidthDetector::SetFallbackMethod(std::function<bool(const std::wstring_view)> pfnFallback) { _pfnFallbackMethod = pfnFallback; } // Method Description: // - Resets the internal ambiguous character width cache mechanism // since it will be different when the font changes and we should // re-query the new font for that information. // Arguments: // - <none> // Return Value: // - <none> void CodepointWidthDetector::NotifyFontChanged() const noexcept { _fallbackCache.clear(); }
GROVELAND, Calif. — As firefighters make progress containing a raging wildfire in and near Yosemite National Park, officials have turned to unmanned aircraft to monitor for unexpected developments. The California National Guard launched a drone Wednesday in an effort to get an early bead on spot blazes. Incident commander Mike Wilkins said Wednesday that the unmanned MQ-1 aircraft already is giving ground-based crews a birds-eye view of new developments. "Already this morning it’s allowed us to see a spot fire we wouldn’t have seen," he said. A similar unmanned NASA aircraft has been used for fire surveys in past years. The drone took off from the Victorville Airport in Southern California and generally flew over unpopulated areas on its 300-mile flight. While outside of the fire area it will be escorted by a manned aircraft. Officials were careful to point out images are being used only to aid in the effort to contain the Rim Fire burning in the western Sierra Nevada. Wilkins said they’ll use the information the drone broadcasts to decide in real time where to deploy resources. They also hope to use it to detect any changes on the ground that could threaten crews. In 2009 an unmanned NASA Predator equipped with an infrared imaging sensor was used to help the U.S. Forest Service assess damage from a fire in Angeles National Forest. In 2008 a drone capable of detecting hot spots helped firefighters assess movement of a series of wildfires stretching from Southern California’s Lake Arrowhead to San Diego. The Rim Fire has burned through 293 square miles of the Sierra Nevada, destroyed 111 structures and threatened giant sequoias. Fire officials say they expect full containment in three weeks but that it will burn for much longer than that. "It’s looking better every day," said incident spokesman Glen Stratton. "So far everything is holding." The fire has threatened San Francisco’s water supply at the Hetch Hetchy Reservoir, the only municipal water supply inside a national park. Stratton said Wednesday that the fire is burning itself out as it approaches the reservoir and that crews are lighting back burns on the south side of the reservoir to push it back into the wilderness area. Pushed by winds blowing to the north and east, the largest fire in Sierra Nevada recorded history has unleashed a smoky haze that has worsened air quality more than 100 miles away in Nevada. Most of the structures that were destroyed are tent cabins and other outbuildings, but 11 homes have been lost, said California fire spokesman Daniel Berlant. On Wednesday morning, authorities said the blaze was 23 percent contained, with crews aided by higher humidity continuing to make progress against it overnight. The fire started Aug. 17 and investigators are trying to determine the cause.
[Editorial notes : Earlier this week, browser testing service, BrowserStack was hacked. Cofounders, Ritesh and Nakul share the hacking details and the process going forward, in an honest and matured manner.] As you may already know, BrowserStack experienced an attack on 9th November, 2014 at 23:30 GMT during which an individual was able to gain unauthorized access to some of our users’ registered email addresses. He then tried to send an email to all our registered users, but he was only able to reach less than 1% (our estimate is 5,000 users). The email contained inaccurate information, even claiming that BrowserStack would be shutting down. When we realized this, our only concern was to protect our users. This involved temporarily taking down the service, as we scrutinized each component carefully. This inconvenienced our users for several hours, and for that we are truly sorry. What happened? BrowserStack application servers run using Amazon Web Services. The configuration is vast, consisting of thousands of servers. One of these was an old prototype machine, which was the target of the breach. The machine had been running since before 2012, and was not in active use. It was penetrated using the shellshock vulnerability, and since it was no longer in active use, it did not have the appropriate patch installed. The old prototype machine had our AWS API access key and secret key. Once the hacker gained access to the keys, he created an IAM user, and generated a key-pair. He was then able to run an instance inside our AWS account using these credentials, and mount one of our backup disks. This backup was of one of our component services, used for production environment, and contained a config file with our database password. He also whitelisted his IP on our database security group, which is the AWS firewall. He began to copy one of our tables, which contained partial user information, including email IDs, hashed passwords, and last tested URL. His copy operation locked the database table, which raised alerts on our monitoring system. On receiving the alerts, we checked the logs, saw an unrecognized IP, and blocked it right away. In that time, the hacker had been able to retrieve only a portion of the data. Finally, using this data and the SES credentials, he was able to send an email to some of our users. What was the extent of the damage? Our database logs confirmed that user data was partially copied, but no user test history was compromised. Therefore all user data remains wholly intact. Most crucially, credit card details were not compromised, as we only store the last 4 digits of the credit card number, and all payment processing takes place through our payment processing partner. All user passwords are salted, and encrypted with the powerful bcrypt algorithm, which creates an irreversible hash which cannot be cracked. However, as an added precaution, we suggest that users change their BrowserStack account passwords. We were able to verify the actions of the hacker using AWS Cloud Trail, which confirmed that no other services were compromised, no other machines were booted, and our AMIs and other data stores were not copied. In addition, our production web server logs indicate that we were experiencing shellshock attempts, but they failed because the production web server has the necessary patches to foil all such attempts. Points in the email We would now like to address the points raised in the email. The hacker quoted three paragraphs from our Security documentation, as follows: after the restoration process is complete, the virtual machines are guaranteed to be tamper-proof. ? Our restoration process is indeed tamper-proof. When we create a test machine from scratch, we take a snapshot. After every user session, the test machine is restored to its original state using that snapshot. Even if a previous user manages to install a malicious software, it is always erased due to the restoration process. The machines themselves are in a secure network, and behind strong firewalls to present the safest environment possible. ? Every single machine has an OS firewall, in addition to the hardware network firewalls we use. On EC2, we use security groups as an equivalent safety measure. We also use industry-standard brute force-throttling measures. ? Every single machine has an OS firewall, in addition to the hardware network firewalls we use. On EC2, we use security groups as an equivalent safety measure. We also use industry-standard brute force-throttling measures. At any given time, you have sole access to a virtual machine. Your testing session cannot be seen or accessed by other users, including BrowserStack administrators. Once you release a virtual machine, it is taken off the grid, and restored to its initial settings. All your data is destroyed in this process. ? The application ensures that a machine is allocated to only one person at a time, and VNC passwords are randomly generated for each session. Thus, even our administrators cannot see your test session. With respect to the plaintext passwords on the VMs, this is certainly not the case, as we moved to key-based authentication years ago. Moreover root login is disabled in our SSH configuration. Both the passwords mentioned, ‘nakula’ and ‘c0stac0ff33’, were indeed in use a couple of years ago during our prototyping phase, and thus were present in the old prototype machine that was hacked. ‘nakula’ was previously our VNC password, and was hashed. However, unlike the hash used for the user passwords, this hash is much weaker. This was due to a limitation in VNC protocol, and we had overcome this liability by regenerating a new password for every session, and thus ‘nakula’ has not been in use for years. ‘c0stac0ff33’ was one of our system user passwords on the prototype machine, before we moved to key-based authentication. It is true that we still run our VNC server on port 5901, but we do not believe that it is a security vulnerability because a current password is still required for access. As mentioned before, the passwords are changed every test session. Where did we go wrong? All our servers, running or not, whether in active use or not, should have been patched with the latest security upgrades and updates including the shellshock one. Moreover, servers not in active use should have been stopped and the server shouldn’t have had the AWS keys. Additionally, our communication could have been better. Instead of intermittent updates, we preferred to present a complete, honest picture of the attack to our users once our analysis was done. Security measures taken to mitigate and prevent further incidents After taking down the service, we revoked all the existing AWS keys and passwords, and generated new ones immediately, as an added security measure. Subsequently, we went through all the SSH logs, web server logs, as well as AWS Cloud Trail logs, to ensure that no more damage was done. We are migrating all backups to encrypted backups, and removing all unencrypted ones. We have also put in several additional checks and alerts, which are triggered on specified AWS actions. As a precautionary measure we have also created new VM snapshots and have replaced all the existing ones. To prevent further incidents, we are in the process of evaluating certain VPC/VPN options to enhance our security measures. We’re going to have a security audit conducted by an external, independent agency. Once again we apologise for the inconvenience. BrowserStack is deeply committed to providing the best and most secure testing infrastructure for our users. We will be forging ahead with exciting new releases in the next few weeks and look forward to continue serving you. We have a trace and the IP of the hacker. We will be in touch with authorities soon to register an official complaint.
package com.doodl6.wechatrobot.controller; import com.doodl6.wechatrobot.service.WeChatService; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.springframework.http.HttpMethod; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; import javax.annotation.Resource; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @RestController @RequestMapping("/weChat") @Slf4j public class MainController { @Resource private WeChatService weChatService; /** * 接收来至微信服务器的消息 **/ @RequestMapping("receiveMessage") public String receiveMessage(String signature, String timestamp, String nonce, String echostr, HttpServletRequest request, HttpServletResponse response) { String result; if (HttpMethod.GET.name().equals(request.getMethod())) { // 验证签名是否有效 if (weChatService.checkSignature(signature, timestamp, nonce)) { result = echostr; } else { result = "你是谁?你想干嘛?"; } } else { try { result = weChatService.processReceived(request.getInputStream()); } catch (Exception e) { log.error("获取来自微信的消息异常", e); result = StringUtils.EMPTY; } } return result; } }
Stressful Life Events for Suicide. Suicide in Intensive Care Units and in Primary Care Units The impact of stressful life events (stressful life events, eg, death, divorce, etc.) or the various changes of life (eg, change house, retirement, etc.) on the mental and physical health of a person, it has been investigated by several researchers worldwide. The term stressful life events (stressful life events), it is frequently found in the literature of the science that deals with suicide and negative life events (negative life events), as well as adverse life events (adverse life events). Introduction The impact of stressful life events (stressful life events, eg, death, divorce, etc.) or the various changes of life (eg, change house, retirement, etc.) on the mental and physical health of a person, it has been investigated by several researchers worldwide. The term stressful life events (stressful life events), it is frequently found in the literature of the science that deals with suicide and negative life events (negative life events), as well as adverse life events (adverse life events). As a stressful life event characterized and considered, an event that periods preceding the occurrence of a disease and / or a self-destructive behavior, which results in a change or imbalance in the level of the individual's adaptation to its environment without it the definition excludes events occurring in past tense, which may carry long stressful effect on the individual. A stressful event can be independent of the personal responsibility of the individual (eg, loss of loved one) or to be dependent directly and / or indirectly to the person (eg, job loss). Particular issues in research of stressful life events are the concept of change occurring in the person's life after the existence of such an event, the desirability or not of the fact, the gravity, and the address of the person. Accepting the importance of stressful life events and their association with adverse health dimensions in person, led several researchers to develop questionnaires and semi structured interviews for more detailed information and recording of the person's living conditions. Quantitative assessment of stressful life events initiated by Holmes and Rahe who created the Social Readjustment Rating Scale (SRSS), and then internationally similar questionnaires was presented by several researchers, while in Greece built by Madianos. As far as that concern the gender and specifically for men, the main reason for admission to the ICU was car accidents (64.4%) followed by the pathology problems (12%), postoperative monitoring (6.2%), work accidents (5.6%), crimes (5.3%), drugs (3.6%), and finally the suicide attempt (2.9%). Surveys have shown that as many as 83% of suicide attempters are not identified as a danger to themselves by healthcare providers, even when examined by professionals in the months before their suicide attempt. Stressful Life Events in Suicide An extensive body of psychological autopsy studies during the last half century, says that almost all suicides had experienced at least one stressful life event (usually more than one), within the last year before their death. They have play role in recent months and/or weeks before the self-destructive behavior. Specific events seem to have a special weight and increase the risk of suicide of a person, such as interpersonal conflicts (family or relationships with third parties), separation/divorce, physical illness, unemployment, problems at work, financial problems, serious injury or attack, mourning the person loss, domestic violence, problems with the law (imprisonment). In men suicides, presented more frequent stressful life events such as separation/divorce, physical illness, unemployment, problems at work, financial problems, and grief. In younger persons, seems to precede suicidal behavior stressful events such as interpersonal problems/loss, family relationship problems, rejection, unemployment/employment problems, violation of law, economic problems, change of residence/move, and events that are the result of personal their behavior, while in older people physical diseases is most often a predisposing factor. However, most research on the nature and effect of stressful life events on suicide have been conducted in developed countries. Vijayakumar, John, Pirkis, and Whiteford support that a variation is observed as to which events are related to subsequent suicide of a person in developing countries. Problems in marriage and other family problems recorded in India and Taiwan, while events such as a social change occurs only in developing countries. In particular, data from mainly descriptive studies, indicate that modernization in many Asian countries has resulted the birth of cultural tensions, the presence of socio-economic stressors, and the collapse of the traditional family system. These facts led some people in suicidal behavior. However, Yufit & Bongar support that although the recent stressful events may be plays an important role for the subsequent suicide of a person, these events should be seen in a broader context, and examine the structure of the personality, and the ability to address (or is vulnerable) stress, failure and loss. Moreover, Maris et al. argued that most people do not commit suicide because of anxiety and stressful life events, but those that commit a suicide or make an attempt are particularly sensitive and vulnerable to face a stressful event and fail to cope. They point out that all people have a pain threshold, beyond which the individual can not cope with the stress and then the self-destructive behaviors are the last resort. Suicide in Intensive Care Unit A study conducted on the medical intensive care unit of the university hospital in Germany, between January 1993 and December 1999 showed that because of the excellent care in the prehospital phase and in the emergency room the number of patients requiring treatment on the intensive care unit was rather low. The aim of a study that took place in Greece was to explore the attitudes of Greek nurses' working in surgical, medical, orthopedic, Emergency department and Intensive Care Units towards attempted suicide. Nursing staff frequently encounter suicidal patients and therefore must be aware of their attitudes towards this group of patients as part of their therapeutic role. A nurse's positive attitudes towards attempted suicide can play an important role in preventing a future suicide attempt or a fatal suicide. Suokas and Lonnqvist, compared emergency department nursing staff attitudes to self-harming clients with intensive care staff and found that emergency nursing staff to be more negative towards these clients than the intensive care staff. Suicide in Primary Care Craig et al. in a in a survey that took place in primary care a novel contextual rating of the potential of stressors in to produce symptoms for 'secondary gain'. In the 38 weeks before symptom onset, psychologisers and somatisers were more likely to have experienced at least one stressful event. Prevention programs have recommended for the improving detection of depression by primary care professionals and this may reduce suicides among women. According to Goodwin et al. supports that some physical disorders may be associated with increased suicidal ideation and may also play an important role in the relationship between suicidal ideation and depression among patients in primary care. Doctors in Primary care may wish to attend an in depth evaluation of psychiatric problems, especially current suicidal ideation, among patients with determinate physical illnesses. Moreover, Maniou et al. in a primary care survey conducted in Crete say that suicide committed the 4.4% of the general population that took part in the research. Moreover, in the same survey the 1% thinks to hurt himself and the 11.8% says that no one deserves to live. Conclusion The involvement of stressful life event plays an important role in suicide event. Nursing and medical staff in the Intensive Care Units must be aware of their attitudes towards the patients that have committed suicide. Future surveys must focus on the prevention, treatment and development protocols of suicide in Primary Care.
Q: Repeating basic material in exercise class at university I am a teaching assistant in mathematics at a university in Continental Europe. The course for which I am an assistant is a third year course, so usually the students are expected to know the basic notions from calculus and linear algebra. However, I often notice that students are not as familiar with the basic material as I would wish them to be$^1$. So, the natural question to ask is: What should be my teaching strategy? There are several possibilities. Each of them has its own pros and cons, which might contradict each other; but in fact, they don't, because you can regard it as a pro or a con, depending on your general attitude towards teaching. 1. Go very slowly and repeat everything. Pros: They can actually learn a lot from the exercise class. Everything I do in the exercise class is self-contained; they find everything relevant to a certain topic concentrated on a few sheets of paper. Whence: Learning the material is made easier for the students (at least in a certain way). Cons: People who already know the basic material get bored easily. I have trouble finishing; I lose plenty of time repeating material that should be known. When learning the material, they lose focus on what's important and what's not. This goes hand in hand with: Due to me repeating everything, the density "content per time" seems to be very high for the students, although they should already know a lot of things I say. A funny aspect which I encountered is the following: If I repeat basic material, students do not seem to understand the material, but if I skip the repetition and just use the results (e.g., I say "follows from linear algebra"), they think that they understand. But in fact, they don't, because they do not really understand what happens at a particular step in an exercise. 2. Just assume that they know the material. Pros: I can actually finish in time, so they get solutions for every exercise. The students who know the material can follow and don't get bored. The students should look up the material on their own; in fact, one goal of the university is to make people work independently and scientifically.$^2$ Cons: They don't really understand what happens during a particular step of an exercise. As a TA, I somehow feel bad when using this method, because I know that there are a lot of students who don't understand the material. Of course, I want them to learn as much as possible. At the latest they should be able to apply the material in the exam. If they don't learn it fast, they never will, and they will most likely fail the exam. (See footnote 2: With this method, I don't encourage them as much as with method 1 to really learn the material.) 3. A mixture of 1. and 2. Somehow combines a few pros and cons of each 1. and 2. Which method do I apply? Usually I go by method 1., because my aim is to make the students (all students! - even if this is utopian) fully understand the material. However, I am a bit sceptical because of the cons. Should I even care about students as in footnote 1? Assume that the students would be interested in the material but forgot a lot of basic notions. Does this change the answer to the main question? Would it be a good idea to go by method 2. and give a sheet of paper with the used basic theorems to the students? $^1$ This is actually an euphemism. Unfortunately, the students I talk about do not care about grades or even understanding the material. They just want to pass the exam. $^2$ ... but this does not work very well in practice. The kind of students mentioned in the footnote above just don't care and blame the TA for giving bad exercise classes. They don't understand that at a university not everything is served up on a plate so that they only have to eat the meal - they also have to cook. But it is a matter of the TA's attitude if one should prepare the ingriedients for them (like in a cooking class) or just give them the full recipe. A: This is a good question, and I hope my answer is just the first among other (likely better!) ones. [I'll try not to rant, but I'm not making any promises.] Let me start with a few general remarks. This is a good question, and the fact that you are thinking about teaching in this way is (obviously) a very good sign. Keep reflecting, keep asking, and keep experimenting! (Try incorporating new ideas into your teaching approach, and see what happens. Reflect, discuss, experiment, reflect, discuss, ...). In general, there's a dangerous trap in all questions like this. Namely, we need to avoid the temptation of thinking that there is a best answer. What works well is highly dependent on the individual students, the instructor, the material, the course format, et cetera. And even with all things being equal, there are still many different effective approaches. So going along my first point, take my answer (and any others) with a grain of salt, and be sure to develop a style that works best for you. Now let me address some of your questions one at a time. Should the course be too fast or too slow? Given the choice between those, I (like you) prefer to err on the side of too slow; your job is for them to learn, not for you to prevent boredom. That said, naturally the best is somewhere "just right," and if you're close enough to that sweet spot, then it wouldn't matter whether you're a little too fast or too slow. [Opinions differ on this point, but we can all agree that the course should ideally be at just the "right" pace] How do we know how fast is too fast? I wondered this question for a while until one day I realized the answer. Although I may have some idea, I ultimately don't really know what's too fast---but the students do! To say it differently, you may be an expert in the material, but only the students can learn it. In my opinion, the best thing you can do is (1) ask plenty of questions [in class, quizzes, take-home assignments] so that you [and ideally the students!] can discover what the students do and do not know, and (2) let the students dictate the pace. As an example, you might raise a question for the class and have them work on it individually or discuss it in groups. You'll be able to see how quickly they work through things, and you can walk around and help out students individually [this lets you get a finer understanding of each student's background, and it also lets you have really personalized instruction]. If you see (perhaps to your surprise) that many students are struggling with some concept that they should have known, then you can address this as a class or give some extra homework on it. How do we teach students who don't know the pre-requisites? So this is tricky, and depends on many things. Let me give some scenarios to illustrate what I might do. Scenario 1: Say you're teaching differential equations. I would assume the students are all proficient in the mechanics of first-semester calculus (otherwise I couldn't possibly teach everything), and on the first day I would probably say something to them along the lines of "To do well in this course, you'll need to know topics X, Y, and Z. Here is a quiz on these topics that you should already know. [they take quiz, then perhaps we discuss the answers] I don't really care if you know this material right now, but I will give you another quiz on this material at the end of the week. If you don't ace that quiz, you should change out of this class because I literally cannot imagine how you could possibly pass the course otherwise." Scenario 2: Suppose you're teaching some class, and there's material from a few classes back that they maybe forgot [for example, trigonometric identities used to compute integrals like $\cos^2 (x)$]. In this situation, I would spend some time reminding the students of the facts we need, but I wouldn't address it nearly at the level that would be expected in their previous class. What about individual students who are falling behind? In office hours, if a student asks me to go over section 4, I usually start by asking them a question about section 3. I then go back in time until we can put our finger on what it is they don't know. Students almost never like "relearning" things that they "already know," but they need it. After you figure out what they need to work on, ask them plenty of questions and let them drive the pace entirely.
HARRISBURG � The Pennsylvania Department of Health is demanding $3.3 million in recouped expenses and lost savings if ordered to reopen the health centers it began shutting down in late May as part of a statewide consolidation plan. The demand is the latest motion filed by the state to fight a legal challenge by the Service Employees International Union Healthcare Pennsylvania over the Corbett administration's plan to shut down 26 of the state's 60 public health centers. The so-called modernization plan aims to streamline public health services and shed costly leases by merging offices in rural areas. As the court battle drags on, it appears that recently closed centers like the one in Vanport Township, where the state terminated its lease on June 30, will remain closed. The state health centers serve primarily uninsured and underinsured residents, and offer services including vaccinations, STD and HIV testing and investigations into disease outbreaks. The SEIU, joined by several nurses and five Democratic lawmakers, sued the state in April over the modernization plan, arguing that a 1996 law prevents such a reduction to public health services without legislative approval. State health officials say that the goal is to make an outdated system more efficient while achieving cost savings, and that services will actually be improved and even expanded under the plan. "These services will not be disrupted just because a building is being closed," health department spokesman Aimee Tysarczyk said. "Instead, we will mobilize public health services within the same county and make them more available in locations you know and use most, such as community centers or schools." The state Supreme Court issued a temporary injunction on July 17 calling on the state to halt its consolidation plan until a full hearing determined its fate, but by then the state had already closed seven health centers and was in the process of shuttering seven more. Last week the state asked the high court to clarify the temporary injunction. In a 55-page filing, the state argued the "more reasonable interpretation" of that order called on the state to pause plans to close the 18 facilities slated for closure. Should the state be forced to reopen closed centers, the SEIU and fellow appellants owe the state $3.32 million to account for move-in costs, equipment purchases, lease payments and other expenses not included in the 2013-14 budget, the state argued. If the state merely had to hold off on future closures, the state demanded the appellants post a $585,000 bond to keep 18 leases open. "We obviously think that's kind of extreme and not practical," said Kevin Hefty, SEIU Pennsylvania Healthcare vice president for the state sector. On Monday, the SEIU filed a 20-page response insisting the injunction meant the state must reopen the facilities the state has already closed. The response stated the SEIU could not afford to post the requested bonds, and shouldn't have to. The SEIU further argued that the proposed amounts were "exaggerated and speculative," and that the state had incurred any such costs "at their own risk" amid an ongoing lawsuit. "That the Department chose to move ahead with its plans, including negotiating the early termination of leases for the State Health Centers which are in contention, while the litigation was pending was a decision it made at its peril," wrote Bruce M. Ludwig, attorney for the SEIU. Granting the state its bond requests would effectively force the SEIU to lose its appeal, and could also set a precedent that would "deter legitimate lawsuits seeking to vindicate important rights," Ludwig said. Those opposed to the consolidation plan argue that residents in counties where centers have shut down will lose access to important services, and that the local areas won't be as well prepared to handle public health threats. Workers at the former Vanport Township facility helped provide vaccines during the H1N1 virus outbreak of 2009, and during the 2003 Hepatitis A outbreak traced to a local Chi-Chi's restaurant. "The public health professionals that work in the state health center are really like the eyes and ears on the ground. They help to monitor local issues," said Tammi Stuck, who retired last year after working 35 years as a community health nurse in Fayette County. "The needs vary from county to county. It's really important to have a local presence there to determine what those needs are." The two Vanport Township center nurses have moved to a health center in Butler County, about 35 miles northwest of the former Beaver County center on Walnut Lane near State Street. The state's plan aims to cut back on the cost of facilities, with 77 percent of the centers' operational funds going toward leases and not services, according to Tysarczyk. "We would like to see those funds go to improving and increasing access to services, especially when many of these facilities see only a handful of walk-ins per week or even per month," Tysarczyk said. Joe Donahue, a former school nurse consultant who lost his job in May, said those walk-in figures are misleading, as most of the centers focus on planned appointments and clinics. "I'm not saying that their efficiencies probably couldn't have been improved upon, but one of the things that concerns me is that we now have our leadership in Harrisburg running the Department of Health who don't have health backgrounds," Donahue said. The July 17 injunction by the Supreme Court also called for an expedited hearing to settle the case, but it's unclear just how quickly that ruling will move the larger lawsuit along.
Effect of Cypermethrin on the Growth of Ciliate Protozoan Paramecium caudatum Objective: The objective of this study is to assess the effect of cypermethrin on the growth of ciliate protozoan Paramecium caudatum. Materials and Methods: Monoxenic culture of P. caudatum, were exposed to different doses (0.01, 0.05, 0.1, 0.15, and 0.2 g/L) of cypermethrin along with control for 24, 48, 72, and 96 h time interval. The total numbers of live and dead cells were counted after trypan blue staining in Neubauer hemocytometer. Results: Marked decrease in the number of living cells with the increase in the concentration of cypermethrin and with increasing exposure time intervals was recorded. Conclusion: The results indicate that cypermethrin is toxic to P. caudatum even at low concentrations when it enters in the aquatic system through runoff. INTRODUCTION India is the second largest producer of fish and value of the output was about Rs. 91,541 crore during 2012-2013, which is about 4.36% of the value of agricultural and allied sector output at the current price. Increased agriculture and agriculture allied services have solved the problem of food shortages by increasing the frequency of using chemical pesticides. The indiscriminate use of synthetic chemical pesticides is often detected in the different water sources as contaminants, which also enters in sediment, nutrient cycle, pathogens, and salts with runoff or leaching that increases during rainfall. present study, Paramecium caudatum was used as a test organism to examine the effect of synthetic pyrethroids insecticide cypermethrin through an in vitro series of tests. The bioassays include the study of growth in number of organisms exposed to different concentration of the insecticide. Test compound Commercial grade cypermethrin insecticide (cybergun -25, 25% EC) used in this study was manufactured by Swastik Pesticide Ltd., India. Experimental set up The indoor microcosm (length and width: 80 cm; depth 40 cm) and the condition in the climate room (constant temperature 15°C ± 1°C; photoperiod 12 h) provided by cool fluorescent lamp. Unpolluted water from the deep well was used in the experiment, and the microcosms were replenished with the water from the same source at weekly intervals. Five of 10 microcosms were treated with cypermethrin while five of them were served as controls. The treatment concentrations (0.01, 0.05, 0.1, 0.15, and 0.2 g/L of cypermethrin) were prepared by diluting the stock solution of cypermethrin in acetone and subsequently mixed into the water column. Monoxenic culture of Paramecium caudatum P. caudatum was selected as test species for present studies because of its ease to culture and maintain in the laboratory. Monoxenic culture of P. caudatum was prepared and subsequently subcultured after every 5 days to ensure maintenance. Cell population growth studies For sampling 5 evenly distributed positions were chosen from each of the microcosm and depth-integrated samples were taken using plastic tubes. Aliquots of 100 L were taken from control and the exposed cultures at different time intervals. The samples were properly diluted in distilled water and fixed with Neutral Buffered Formalin containing, 10% (v/v) formalin in phosphate buffer saline pH 7.0 at a final concentration of 2-5% for 1 h. Total number of cells of paramecia from control and treated were recorded after 24, 48, 72, and 96 h time interval using optical microscope using 40 magnification and Neubauer hemocytometer. The dead and live specimens were differentiated by using trypan blue staining technique. Triplicates were maintained for all test concentrations. The results were analyzed statistically by applying Student's t-test to find the significance in comparison to control. Study on population growth The experiment was performed to observe the harmful effect of cypermethrin on the cell population growth of P. caudatum. The effects of cypermethrin on the population growth are shown in Figure 1. cypermethrin effect can be delineated from the control group as it has resulted in the increase in number of dead cells after the exposure of the organisms to 0.1 g/L of the insecticide. The overall response is shown by a marked decrease in the number of living cells with the increase in dosage of cypermethrin and with increasing time intervals. The significant increase (P < 0.05) in the number of dead cells can be observed after 48 h of the treated tanks with the insecticide. In the tank containing 0.15 g/L of the insecticide, the density of live cells reduced significantly after 24 h, however, the lethal effect can be observed after 48 h of exposure. The same trend is observed for the tanks containing 0.2 g/L of the insecticide wherein the lethality is noted even after 24 h of the treatment. The observation for the number of live or dead cells shows that the tank containing 0.15 and 0.2 g/L of cypermethrin has significant mortality (P < 0.01) from that of the control. DISCUSSION When pollutants are released into aquatic habitats, direct (toxic) effects on aquatic biota are possible. Important contribution of microorganisms in the present era is their usage as gauging agents of toxicosis stress, bioremediation, and as biomonitors in the aquatic bodies, which are getting polluted by pesticide residues, domestic sewage and industrial effluents. Protozoans such as Tetrahymena pyriformis, Spirostomum ambiguum, P. caudatum, Oxytricha fallax are mostly used for laboratory research. The cytotoxicity of many different xenobiotic compounds was evaluated using ciliates as these are sensitive to environmental alterations and have been proposed as the biological indicators of environmental pollution. Among the various end points recommended to evaluate the cytotoxic effects, population growth rate have been used extensively. Cypermethrin is a type of cyanophenoxybenzyl pyrethroid and is categorized as a restricted use pesticide by US Environmental Protection Agency because of its high toxicity to fish. The present work was focused on to study the effects of cypermethrin on the population growth; it clearly depicts that the insecticide is lethal. The trend of increase in the number of organisms at 0.01 g/L of cypermethrin at 24 and 48 h time interval and then the decline in growth of paramecium at higher doses (0.1, 0.15, 0.2 g/L) and time interval (48, 72, and 96 h) showing the toxic effect of insecticide. Results from the study agree with the previous studies as lower concentration of cypermethrin, does not have any adverse effect of the population growth of paramecium as the result obtained is nearly same as that of the control values. CONCLUSION The present study clearly emphasize that the cypermethrin insecticide, which naturally come along with the agricultural runoff in the aquatic system in high concentrations are lethal to P. caudatum. The reduction in population growth indicates that these pesticides in high concentration, are toxic to protozoans. Since aquatic environments serve as sinks for numerous environmental pollutants, the effects of these substances on the resident aquatic organisms can be quite serious. Financial support and sponsorship Nil. Conflicts of interest There are no conflicts of interest.
package com.aminebag.larjson.blueprint; import com.aminebag.larjson.resource.ResourceFactory; import com.aminebag.larjson.utils.ByteBufferUtils; import com.aminebag.larjson.utils.TemporaryFileFactory; import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.List; /** * @author <NAME> * * LarJsonBlueprintWriter allows writing binary values in a LarJson blueprint. Depending on configuration and on the * final size of the blueprint, the latter might be stored in a file or in memory. */ public class LarJsonBlueprintWriter implements Closeable { static final byte MAX_BYTE_VALUE = 0b00000000_00000000_00000000_00000000_00000000_00000000_00000000_01111111; static final short MAX_SHORT_VALUE = 0b00000000_00000000_00000000_00000000_00000000_00000000_00111111_11111111; static final int MAX_INT_VALUE = 0b00000000_00000000_00000000_00000000_00011111_11111111_11111111_11111111; static final long MAX_LONG_VALUE = 0b00001111_11111111_11111111_11111111_11111111_11111111_11111111_11111111L; static final long SHORT_FLAG = 0b00000000_00000000_00000000_00000000_00000000_00000000_10000000_00000000L; static final long INT_FLAG = 0b00000000_00000000_00000000_00000000_11000000_00000000_00000000_00000000L; static final long LONG_FLAG = 0b11100000_00000000_00000000_00000000_00000000_00000000_00000000_00000000L; private final ByteBuffer buffer = ByteBuffer.allocate(8192); private final BinaryWriter binaryWriter; private long position = 0L; public LarJsonBlueprintWriter(long maxMemoryBlueprintSize, TemporaryFileFactory temporaryFileFactory) throws IOException { this.binaryWriter = new SimpleBinaryWriter(maxMemoryBlueprintSize, temporaryFileFactory); this.buffer.order(ByteOrder.LITTLE_ENDIAN); } public void put(long value) throws IOException { if(value < 0L) { throw new IllegalArgumentException("Value cannot be negative : " + value); } else if(value <= MAX_BYTE_VALUE) { ensureRemaining(Byte.BYTES); buffer.put((byte)value); } else if(value <= MAX_SHORT_VALUE) { ensureRemaining(Short.BYTES); buffer.putShort((short) (SHORT_FLAG | value)); } else if(value <= MAX_INT_VALUE) { ensureRemaining(Integer.BYTES); buffer.putInt((int) (INT_FLAG | value)); } else if(value <= MAX_LONG_VALUE) { ensureRemaining(Long.BYTES); buffer.putLong(LONG_FLAG | value); } else { throw new IllegalArgumentException("Value " + value + " is greater than the max value (" + MAX_LONG_VALUE + ")"); } } private void ensureRemaining(int bytes) throws IOException { if(buffer.remaining() < bytes) { ByteBufferUtils.flip(buffer); binaryWriter.write(buffer); ByteBufferUtils.clear(buffer); } position += bytes; } public void putList(List<Long> values, long maxValue) throws IOException { if(values.size() > 0) { if (maxValue <= Byte.MAX_VALUE) { for (int i = values.size() - 1; i >= 0; i--) { ensureRemaining(Byte.BYTES); buffer.put(values.get(i).byteValue()); } ensureRemaining(Byte.BYTES); buffer.put((byte)Byte.BYTES); } else if (maxValue <= Short.MAX_VALUE) { for (int i = values.size() - 1; i >= 0; i--) { ensureRemaining(Short.BYTES); buffer.putShort(values.get(i).shortValue()); } ensureRemaining(Byte.BYTES); buffer.put((byte)Short.BYTES); } else if (maxValue <= Integer.MAX_VALUE) { for (int i = values.size() - 1; i >= 0; i--) { ensureRemaining(Integer.BYTES); buffer.putInt(values.get(i).intValue()); } ensureRemaining(Byte.BYTES); buffer.put((byte)Integer.BYTES); } else { for (int i = values.size() - 1; i >= 0; i--) { ensureRemaining(Long.BYTES); buffer.putLong(values.get(i)); } ensureRemaining(Byte.BYTES); buffer.put((byte)Long.BYTES); } } put(values.size()); } public long position() { return position; } @Override public void close() throws IOException { ByteBufferUtils.flip(buffer); binaryWriter.write(buffer); ByteBufferUtils.clear(buffer); binaryWriter.close(); } public ResourceFactory<? extends LarJsonBlueprintReader> getReaderFactory() { return binaryWriter.getBlueprintReaderFactory(); } public Closeable getOnErrorCleaner() { return binaryWriter.getPostErrorCleaner(); } }
The New England Patriots have restructured tight end Rob Gronkowski’s contract to boost his potential 2018 earnings by $4.3 million, according to agent Drew Rosenhaus. Gronkowski has a base salary of $8 million and can earn up to $1 million in roster bonuses if he is active for all 16 games, with $3.3 million achievable in additional incentives. The new deal has a max value of $13.05 million this year. The incentives will pay Gronkowski $1.1 million for hitting each of the following benchmarks (maxing out at three benchmarks for $3.3 million): playing 80 percent of the offensive snaps, making at least 70 catches, gaining at least 1,085 receiving yards or scoring at least nine touchdowns. The latter three figures are each exactly one unit more than he had in 2017 (69 catches, 1,084 yards, eight TDs). The restructure marks the second consecutive year in which the Patriots have boosted Gronkowski’s deal through incentives, and he made an extra $5.5 million last year after garnering his fourth career first-team All-Pro nod. He is scheduled to make $9 million in base salary and $1 million in roster and workout bonuses in 2019, the final year of his contract. —A day after becoming the highest-paid player in NFL history for the second time in his career, Green Bay Packers quarterback Aaron Rodgers said he explored the possibility of a uniquely structured deal before receiving a more traditional contract. —Colin Kaepernick’s collusion claim against the NFL will continue after system arbitrator Stephen B. Burbank denied the league’s request to dismiss the case. The ruling means Burbank found sufficient evidence for the case to continue and possibly go to trial. The NFL declined to comment on the decision. Pro Football Talk reported, citing a source, that further discovery is now expected, giving Kaepernick and Geragos more opportunities to gather information supporting their position. Eventually, the case could lead to a hearing in which owners would be required to testify. —The Los Angeles Rams created about $7 million of 2018 cap space by restructuring the contracts of left tackle Andrew Whitworth and wide receiver Robert Woods, according to an ESPN report. The moves raise the Rams’ cap space to around $10.1 million, per Spotrac, creating additional room to fit what is expected to be a record-breaking extension for defensive tackle Aaron Donald. Whitworth, 36, was scheduled to make $8 million in base salary this season, the second year of a three-year, $33.75 million deal. Per Spotrac, he had nearly $7 million of that converted into a signing bonus and prorated over the next two seasons. Woods, 26, had $4.7 million of his $5.5 million base salary for this season converted into a signing bonus, per Spotrac, stretching the cap hit over the next four years. —Elvis Dumervil announced his retirement from the NFL after 12 seasons in a social media post. A fourth-round pick out of Louisville selected to the Pro Bowl five times, Dumervil recorded 105.5 career sacks, which ranks 26th on the all-time list since the stat became official in 1982. He led the NFL with 17 sacks in 2009, the first of two first-team All-Pro appearances. —Arizona Cardinals wide receiver Larry Fitzgerald eulogized Sen. John McCain, telling stories of the man who became his close friend despite their differences. Fitzgerald was one of four speakers to offer eulogies at a memorial service in Phoenix for McCain, who died of brain cancer last Saturday at 81. Fitzgerald spoke immediately before former Vice President Joe Biden, who became one of McCain’s closest friends when they served together in the U.S. Senate. —Indianapolis Colts rookie defensive end Tyquan Lewis is expected to miss the first nine weeks of the season with a foot injury, according to the Indianapolis Star. Coach Frank Reich addressed the situation earlier this week but didn’t have a timetable for his return, saying he didn’t expect Lewis to have surgery. The Colts traded up in the 2018 NFL Draft to select Lewis, an Ohio State product, with the final pick of the second round. In his career with the Buckeyes, he had 23.5 sacks and 36.5 tackles. —The Houston Texans have signed center Greg Mancz to a two-year extension, the team announced. Figures for the deal were not announced, but Sirius XM NFL reported Mancz will make $3 million per year in 2019 and 2020. He was scheduled to make $1.9 million in 2018 on his restricted free agent tender. Mancz, 26, has started 23 games over the last two seasons, primarily at center but also at right guard. He is expected to back up starter Nick Martin at the pivot this year, while providing depth behind free agent signees Senio Kelemete and Zach Fulton, who will start at left and right guard, respectively. —The Tampa Bay Buccaneers waived running back Charles Sims III from injured reserve with an injury settlement, the team announced, making him a free agent. Sims was placed on IR last Thursday, five days after injuring his knee as he blocked during the opening kickoff of the Bucs’ second preseason game. Sims, who turns 28 next month, re-signed with Tampa Bay on a one-year, $1 million deal this offseason after hitting free agency. —Minnesota Vikings receiver Cayleb Jones was formally charged with felony theft, misdemeanor domestic assault as well as interfering with a 911 call in a court appearance at Dakota County Court in Hastings, Minn. Jones was arrested Tuesday, the day of the alleged incident. Bail was set Thursday at $5,000 without conditions and $1,000 with conditions by judge Richelle M. Wahi. Jones’ next court date is slated for Oct. 4. According to a police report, the incident occurred on Tuesday night at a hotel in Eagan, Minn. A woman identifying herself as Jones’ girlfriend called 911 and told authorities Jones put his hands on her. She told police he picked her up and threw her down, before later kicking her and stealing her phone as she called for help. —The Dallas Cowboys agreed to trade undrafted rookie cornerback Charvarius Ward to the Kansas City Chiefs, according to an ESPN report. Per ESPN, the deal is expected to be finalized Friday. Ward was not picked out of Middle Tennessee State in April’s draft and signed with Dallas. He has seven tackles and two passes defensed in two appearances this preseason after posting 10 passes defensed in his final year with the Blue Raiders.
The present disclosure of invention relates generally to monolithic integrated circuits, and more specifically to a repeated macrocell module design for use within Programmable Logic Devices (PLD""s). The disclosure relates even more specifically to a macrocell module design as applied to a subclass of PLD""s known as Complex Programmable Logic Devices (CPLD""s) and High-Density Complex Programmable Logic Devices (HCPLD""s). After this disclosure is lawfully published, the owner of the present patent application has no objection to the reproduction by others of textual and graphic materials contained herein provided such reproduction is for the limited purpose of understanding the present disclosure of invention and of thereby promoting the useful arts and sciences. The owner does not however disclaim any other rights that may be lawfully associated with the disclosed materials, including but not limited to, copyrights in any computer program listings or art works or other works provided herein, and to trademark or trade dress rights that may be associated with coined terms or art works provided herein and to other otherwise-protectable subject matter included herein or otherwise derivable herefrom. If any disclosures are incorporated herein by reference and such incorporated disclosures conflict in part or whole with the present disclosure, then to the extent of conflict, and/or broader disclosure, and/or broader definition of terms, the present disclosure controls. If such incorporated disclosures conflict in part or whole with one another, then to the extent of conflict, the later-dated disclosure controls. Field-Programmable Logic Devices (FPLD""s) have continuously evolved to better serve the unique needs of different end-users. From the time of introduction of simple PLD""s such as the Advanced Micro Devices 22V10(trademark) Programmable Array Logic device (PAL), the art has branched out in several different directions. One evolutionary branch of FPLD""s has branched out along a paradigm known as Complex PLD""s or CPLD""s. This paradigm is characterized by devices such as the ispMACH(trademark) family (available from Lattice Semiconductor Corp. of Oregon). Examples of CPLD circuitry are seen in U.S. Pat. No. 5,015,884 (issued May 14, 1991 to Om P. Agrawal et al.) and U.S. Pat. No. 5,151,623 (issued Sep. 29, 1992 to Om P. Agrawal et al.) as well as in other CPLD patents cited above, including U.S. Pat. No. 6,150,841 which will be specifically addressed herein. A CPLD device may be characterized as being constituted by a monolithic, integrated circuit (IC) that typically has four major features as follows. (1) A user-accessible, configuration-defining memory means, such as EPROM, EEPROM, anti-fused, fused, SRAM, or other, is provided in the CPLD device so as to be at least once-programmable by device users for defining user-provided configuration instructions. Static Random Access Memory or SRAM is of course, a form of reprogrammable memory that can be differently programmed many times. Electrically Erasable and reProgrammable ROM or EEPROM is an example of nonvolatile reprogrammable memory. The configuration-defining memory of a CPLD device can be formed of a mixture of different kinds of memory elements if desired (e.g., SRAM and EEPROM). Typically it is of the nonvolatile, In-System reProgrammable (ISP) kind such as EEPROM. (2) Input/Output means (IO""s) are provided for interconnecting internal circuit components of the CPLD device with external circuitry. The IO""s may have fixed configurations or they may include configurable features such as variable slew-output drivers whose characteristics may be fine tuned in accordance with user-provided configuration instructions stored in the configuration-defining memory means. (3) Programmable Logic Blocks (PLB""s) are provided for carrying out user-programmed logic functions as defined by user-provided configuration instructions stored in the configuration-defining memory means. Typically, each of the many PLB""s of a CPLD has at least a Boolean sum-of-products generating circuit (e.g., an AND/OR array or an equivalent such as a NAND/NAND array) or a Boolean product-of-sums generating circuit (e.g., an OR/AND array or an equivalent such as a NOR/NOR array) that is user-configurable to define a desired Boolean function, xe2x80x94to the extent allowed by the number of product terms (PT""s) or sum terms (ST""s) that are combinable by that circuit. Each PLB may have other resources such as input signal pre-processing resources and output signal post-processing resources. The output signal post-processing resources may include result storing and/or timing adjustment resources such as clock-synchronized registers. Although the term xe2x80x98PLBxe2x80x99 was adopted by early pioneers of CPLD technology, it is not uncommon to see other names being given to the repeated portion of the CPLD that carries out user-programmed logic functions and timing adjustments to the resultant function (4) An interconnect network is generally provided for carrying signal traffic within the CPLD between various PLB""s and/or between various IO""s and/or between various IO""s and PLB""s. At least part of the interconnect network is typically user-configurable so as to allow for programmably-defined routing of signals between various PLB""s and/or IO""s in accordance with user-defined routing instructions stored in the configuration-defining memory means. In contrast to FPGA""s, which are LUT-based PLD""s (where a LUT in this context is a user-programmable Look-Up Table), gate-based CPLD""s are generally recognized in the art as having a comparatively less-expansive capability of implementing a wide variety of functions, in other words, not being able to implement all Boolean functions for a given input space as can a LUT. CPLD""s however, are expected to provide their lesser variety of logic functions with comparatively higher throughput speeds (smaller signal-propagation delays). In other words, wide functionality is sacrificed to obtain shorter, pin-to-pin signal delays. Thus pin-to-pin delay is an important measure of CPLD performance. Also, because length of signal routings through the programmable interconnect of a CPLD is often arranged so it will not vary significantly despite different signal routings, CPLD""s are generally recognized as being able to provide relatively consistent signal delays whose values often do not vary substantially in spite of how the corresponding CPLD configuring software (the partitioning, placement and routing software which configures the CPLD) behaves. Many devices in the Lattice/Vantis ispMACH(trademark) family provide such a consistent signal delay characteristic under the trade name of SpeedLocking(trademark). The more generic term, Speed-Consistency will be used interchangeably herein with the term, SpeedLocking(trademark). A newly evolving sub-branch of the growing families of CPLD devices is known as High-Density Complex Programmable Logic Devices (HCPLD""s). This sub-branch may be generally characterized as being constituted by monolithic IC""s that each have large numbers of I/O terminals (e.g., Input/Output pins) in the range of about 32 or more (e.g., 64, 96, 128, 192, 256, 320, etc.) and/or have large numbers of result-storing macrocell units in the range of about 32 or more (e.g., 64, 128, 256, 320, 512, 1024, etc.). The process of concentrating large numbers of I/O pins and/or large numbers of macrocells into a single CPLD device raises new challenges for achieving relatively broad functionality, high speed, and Speed-Consistency (SpeedLocking(trademark)) in the face of wide varieties of configuration software. More detailed discussion regarding different HCPLD architectures (1, 2, or 3 level hierarchical interconnects) and interrelated topics (e.g., adaptability to configuration software) are provided in the above-cited U.S. Pat. No. 6,184,713. As such they will not be repeated here except to briefly note the following. Configuration software can produce different results, good or bad, depending in part on what broadness of functionalities, what routing flexibilities and what timing flexibilities are provided by the architecture of a target CPLD. The present disclosure focuses on how some minor sacrifices in broadness of functionalities can provide more timing flexibilities, where such improvements are made in repeated structures referred to herein as macrocell modules. The macrocell modules of a CPLD are typically configured at the same time that other programmable resources of the CPLD are configured. When the CPLD-configuring software is confronted with a given design problem (a supplied design specification that is to be realized by one or more CPLD""s), the CPLD-configuring software typically cycles through a series of phases, that are referred to commonly as xe2x80x98synthesisxe2x80x99, xe2x80x98mappingxe2x80x99, xe2x80x98partitioningxe2x80x99, xe2x80x98placementxe2x80x99, and xe2x80x98routingxe2x80x99. Results can vary because differently designed CPLD""s can have differently designed PLB""s (and differently designed macrocell modules therein) with respectively different, logic-implementing capabilities, resource-utilization efficiencies, and/or signal-propagation timing control capabilities. Partitioning and routing software operations typically have to account for the maximum size and speed of circuitry that each PLB is able to implement within the specific CPLD device and to further account for pin-to-pin delay in over-all implementation of the circuit design. If all goes well in the partitioning, placement, and routing phases, the CPLD configuring software may determine that it has found a workable xe2x80x98solutionxe2x80x99 comprised of a specific partitioning of the original circuit into placeable chunks, a specific set of primitive placements of the chunks into specific PLB""s, and a specific set of interconnect usage decisions (routings). The software can then deem its mission to be complete and it can use the placement and routing results to generate the configuring code (e.g., the configuration bit stream) that will be used to correspondingly configure the designated CPLD. In various instances, the CPLD configuring software may find that it cannot complete its mission successfully on a first try. It may find, for example that the initially-chosen placement and routing strategies prevent time-critical signals from reaching their desired destinations quickly enough to satisfy timing requirements of the input logic specification. Moreover, if the CPLD does not have enough resources, the CPLD configuring software may find that it has exhausted CPLD resources (e.g., inter-block interconnect) without completing the to-be-implemented design. It is desirable, in view of this, to have a CPLD structure which features small signal propagation times for implementing speed-critical parts of the to-be-implemented circuit, and in contradiction to this first desire, to have a CPLD structure which has the ability to densely implement various logic functions such that CPLD resources (e.g., inter-block interconnect) will not be exhausted by complex designs. It is also desirable to have a CPLD whose architecture eases the partitioning, placement, and routing chores of CPLD-configuring software. Aside from speed and full function implementation, users of CPLD""s also usually want a certain degree of re-design agility (flexibility). Even after an initial design is successfully implemented by a CPLD, users may wish to make slight tweaks or other changes to their original design. The re-design agility of a given CPLD architecture may include the ability to re-design certain internal circuits without changing I/O timings. Re-design agility may also include the ability to re-design certain internal circuits without changing the placement of various I/O terminals (e.g., pins). Such re-design agilities are sometimes referred to respectively as re-design Speed-Locking(trademark) and Pin-Retention (the former term is a trademark of Lattice Corp., headquartered in Hillsboro, Oreg.). The more generic terms of: xe2x80x98re-design Speed-Consistencyxe2x80x99 and xe2x80x98re-design PinOut-Consistencyxe2x80x99 or xe2x80x98terminal-retentionxe2x80x99 may be respectively used herein interchangeably with xe2x80x98re-design Speed-Locking(trademark)xe2x80x99 and xe2x80x98re-design Pin-Retentionxe2x80x99. In addition to speed, re-design agility, and full Boolean correctness, users of CPLD""s typically ask for optimal emulation of an original design or a re-design in terms of good function packing density, low cost, low power usage, synchronous signal flow, and so forth. It is not a trivial matter to satisfy all these desires because often times they conflict with one another. One solution for trying to satisfy these conflicting desires is presented by the unique macrocell design of the above-cited, U.S. Pat. No. 6,150,841. One or more improvements over that macrocell design are disclosed herein. Structures and methods may be provided in accordance with the present disclosure of invention for improving over the above-described macrocell design and/or providing other advancements over prior CPLD designs. (A) More specifically, in accordance with one set of aspects of the present disclosure, techniques are provided for allowing one or more of the following: 1) Elective use of a fast, allocator-bypassing path (e.g., fast 5-PT path) in combination with in-block simple or super-allocation; 2) Elective use of an OSM-bypassing path for signals that do not need pin-consistency (pin-retention); 3) Automatic re-routing of output enable signals that corresponding to output signals which are re-routed for pin-consistency purposes; 4) Global distribution of globally-usable output enable signals; 5) Elective use of two-stage steering to develop complex sum-of-clusters terms; and 6) Use of unidirectional super-allocation with stage-2 wrap-around in designs having about 20 or less macrocell units per logic block. (B) In accordance with a second aspect of the present disclosure, techniques are provided for concentrating the development of complex function signals (e.g., xe2x89xa680 PT""s) within singular logic blocks so that the development of such complex function signals does not consume inter-block interconnect resources. A CPLD configuring method in accordance with the present disclosure may include the machine-implemented steps of first identifying middle-complexity functions that are to be implemented by the CPLD where each such middle-complexity function is achievable by combined simple or super-allocation based development in one logic block and fast-path completion in the same or a second logic block; second identifying for those middle-complexity functions that satisfy the first identification criteria, those that have critical timing constraints that are achievable by combined simple or super-allocation based development in one logic block and fast-path completion in the same or a second logic block; and configuring the CPLD to realize one or more of the functions identified in the first and second identification steps by simple or super-allocation based development in one logic block and fast-path completion in the same or a second logic block. Other aspects of the disclosure will become apparent from the below detailed description.
<gh_stars>1-10 // // StunClient.h // CDNByeKit // // Created by Timmy on 2020/1/15. // Copyright © 2020 cdnbye. All rights reserved. // #import <Foundation/Foundation.h> #import "StunResult.h" #import "CBSocket.h" NS_ASSUME_NONNULL_BEGIN @interface StunClient : NSObject - (StunResult *)queryWithLocalIp:(NSString *)localIp; - (StunResult *)queryWithLocalIp:(NSString *)localIp stunHost:(NSString *)host stunPort:(int)port; - (StunResult *)queryWithLocalIp:(NSString *)localIp stunHost:(NSString *)host stunPort:(int)port socket:(CBSocket *)socket; - (instancetype)initWithLocalPort:(int)port; @end NS_ASSUME_NONNULL_END
‘Thus man is definitely master of the woman”, states rule number one on the checklist for children in a book kept in the library at one Islamic school. It’s part of a shocking dossier of material uncovered by Ofsted inspectors on recent visits to faith-based institutions in both the private and state sector. While most institutions are moderate, some do not promote British values Clara Molden/Times Newspapers Ltd Photographs of texts in the school libraries as well as examples of pupils’ own work — which I have seen — raise serious questions about the government’s campaign to uphold so-called “British values” in the education system. Despite promising to defend equality, tolerance and mutual respect in schools as part of the drive against extremism, ministers appear to be turning a blind eye to taxpayers’ money being used to promote the idea…
Consumer prices slipped 0.1 percent last month, pulled down by sharply lower gas prices and cheaper air fares, used cars, and mobile phone plans. The figures suggest that the healthy economy is not yet creating widespread inflation pressures. That gives the Federal Reserve more leeway in deciding whether to raise interest rates. Fed Chair Jerome Powell has said the Fed can be “patient” regarding rate hikes this year.
/** * Async tasks to populate graphs/charts with user summary data. */ private static class PieChartTask extends AsyncTask<Void, Void, ArrayList<PieModel>> { WeakReference<Context> contextWeakReference; private ProgressDialog mProgressDialog; private PieChart mChart; Map<Integer, TextView> mLegendMap; // Pie chart legend to pie element Map<TextView, Integer> mIndexMap; Map<String, TextView> mViewLabels; public PieChartTask(WeakReference<Context> contextWeakReference, PieChart chart, Map<Integer, TextView> legendMap, // Pie chart legend to pie element Map<TextView, Integer> indexMap, Map<String, TextView> viewLabels) { this.contextWeakReference = contextWeakReference; this.mChart = chart; this.mLegendMap =legendMap; this.mViewLabels = viewLabels; this.mIndexMap = indexMap; } @Override protected void onPreExecute() { if (contextWeakReference != null) { Context context = contextWeakReference.get(); mProgressDialog = new ProgressDialog(context); mProgressDialog.show(); } } @Override protected ArrayList<PieModel> doInBackground(Void... voids) { if (contextWeakReference != null) { Context context = contextWeakReference.get(); PresenterContracts.DatabasePresenter dbHelper = DatabaseHelperImpl.getInstance(context); ArrayList<PieModel> slices = new ArrayList<>(); HashMap<StatusEnum.StatusType, Integer> mData = dbHelper.getStatusHistory(); String persianLabel; String statusColor; // Set chart data if (isValidDataset(mData)) { for (StatusEnum.StatusType type : mData.keySet()) { persianLabel = UtilWrapper.getStatusLabel(context, type); statusColor = String.format("#%06X", UtilWrapper.getStatusColor(context, type)); slices.add(new PieModel(persianLabel, mData.get(type), Color.parseColor(statusColor))); } return slices; } } return null; } @Override protected void onPostExecute(ArrayList<PieModel> result) { if (mProgressDialog.isShowing()) { mProgressDialog.dismiss(); } if (result == null) { mChart.clearChart(); // No focused textviews for (TextView t : mViewLabels.values()) { t.setTextColor(Color.LTGRAY); } } else { for (PieModel slice : result) { slice.setShowLabel(true); slice.setShowFormattedValue(true); slice.setFormattedValue(Utils .formatNumber(Math.round(slice.getValue()))); if (slice.getValue() > 0) { mChart.addPieSlice(slice); TextView targetTextView = mViewLabels.get(slice.getLegendLabel()); if (targetTextView != null) { mLegendMap.put(mChart.getData().indexOf(slice), targetTextView); mIndexMap.put(targetTextView, mChart.getData().indexOf(slice)); } } } mChart.setUseCustomInnerValue(true); if (!mChart.getData().isEmpty()) { mChart.setCurrentItem(0); } mChart.startAnimation(); } } // Check if at least one category has reported data. private boolean isValidDataset(HashMap<StatusEnum.StatusType, Integer> dataSet) { boolean hasData = false; for (StatusEnum.StatusType type : dataSet.keySet()) { if (dataSet.get(type) > 0) { hasData = true; break; } } return hasData; } }
<filename>src/bullet/bullets.rs<gh_stars>10-100 use crate::bullet::{BulletDrawable, collision::Collision}; use crate::graphics::direction_movement; use crate::shaders::Position; pub struct Bullets { pub bullets: Vec<BulletDrawable>, } impl Bullets { pub fn new() -> Bullets { Bullets { bullets: Vec::new() } } pub fn add_bullet(&mut self, position: Position, direction: f32) { let movement_direction = direction_movement(direction); self.bullets.push(BulletDrawable::new(position, movement_direction, direction)); } pub fn remove_old_bullets(&mut self) { self.bullets.retain(|ref mut b| b.status == Collision::Flying); } } impl specs::prelude::Component for Bullets { type Storage = specs::storage::VecStorage<Bullets>; }
Microsoft Microsoft is continuing a push to turn its Kinect motion-sensing game controller into a natural user-interface device for PCs, by sharing samples of the Kinect for Windows code under an open source license. The idea is to give developers the opportunity to reuse the code and help Microsoft figure out how to improve it, Ben Lower, Kinect for Windows developer community manager, wrote in a blog post last week. The company has posted 22 samples -- including code for face tracking, its skeletal viewer, and slideshow gestures -- in C#, C++, and Visual Basic. The code is available under an Apache 2.0 open source license. In addition, Microsoft has launched a blog for the Kinect for Windows developer community to build support for programmers interested in creating applications that use motion sensing and voice for PC. In 2011, Microsoft released a software development kit for Kinect for Windows, the first step in encouraging developers to come up with new applications. A year ago, it launched a version of the controller that works with PCs, a device intended to give developers a tool to create commercial programs that use the Kinect. Companies such as United Health Group, American Express, Mattel, Telefonica, and Toyota have created computer programs that allow users to use gestures or their voices to interact with the software.
/* * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __XFS_INODE_BUF_H__ #define __XFS_INODE_BUF_H__ struct xfs_inode; struct xfs_dinode; /* * In memory representation of the XFS inode. This is held in the in-core struct * xfs_inode and represents the current on disk values but the structure is not * in on-disk format. That is, this structure is always translated to on-disk * format specific structures at the appropriate time. */ struct xfs_icdinode { __int8_t di_version; /* inode version */ __int8_t di_format; /* format of di_c data */ __uint16_t di_flushiter; /* incremented on flush */ __uint32_t di_uid; /* owner's user id */ __uint32_t di_gid; /* owner's group id */ __uint16_t di_projid_lo; /* lower part of owner's project id */ __uint16_t di_projid_hi; /* higher part of owner's project id */ xfs_fsize_t di_size; /* number of bytes in file */ xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */ xfs_extlen_t di_extsize; /* basic/minimum extent size for file */ xfs_extnum_t di_nextents; /* number of extents in data fork */ xfs_aextnum_t di_anextents; /* number of extents in attribute fork*/ __uint8_t di_forkoff; /* attr fork offs, <<3 for 64b align */ __int8_t di_aformat; /* format of attr fork's data */ __uint32_t di_dmevmask; /* DMIG event mask */ __uint16_t di_dmstate; /* DMIG state info */ __uint16_t di_flags; /* random flags, XFS_DIFLAG_... */ __uint64_t di_flags2; /* more random flags */ __uint32_t di_cowextsize; /* basic cow extent size for file */ xfs_ictimestamp_t di_crtime; /* time created */ }; /* * Inode location information. Stored in the inode and passed to * xfs_imap_to_bp() to get a buffer and dinode for a given inode. */ struct xfs_imap { xfs_daddr_t im_blkno; /* starting BB of inode chunk */ unsigned short im_len; /* length in BBs of inode chunk */ unsigned short im_boffset; /* inode offset in block in bytes */ }; int xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *, struct xfs_imap *, struct xfs_dinode **, struct xfs_buf **, uint, uint); int xfs_iread(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, uint); void xfs_dinode_calc_crc(struct xfs_mount *, struct xfs_dinode *); void xfs_inode_to_disk(struct xfs_inode *ip, struct xfs_dinode *to, xfs_lsn_t lsn); void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from); void xfs_log_dinode_to_disk(struct xfs_log_dinode *from, struct xfs_dinode *to); bool xfs_dinode_good_version(struct xfs_mount *mp, __u8 version); #if defined(DEBUG) void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); #else #define xfs_inobp_check(mp, bp) #endif /* DEBUG */ #endif /* __XFS_INODE_BUF_H__ */
#!/usr/bin/env python #=========================================================================== # # Produce plots for ZDR bias # #=========================================================================== import os import sys import subprocess from optparse import OptionParser import numpy as np from numpy import convolve import matplotlib.pyplot as plt from matplotlib import dates import math import datetime def main(): # globals global options global debug # parse the command line usage = "usage: %prog [options]" parser = OptionParser(usage) parser.add_option('--debug', dest='debug', default=False, action="store_true", help='Set debugging on') parser.add_option('--verbose', dest='verbose', default=False, action="store_true", help='Set verbose debugging on') parser.add_option('--cp_file', dest='cpFilePath', default='../data/pecan/spol_pecan_CP_analysis_20150524_000021.txt', help='CP results file path') parser.add_option('--bias_file', dest='biasFilePath', default='../data/pecan/spol_zdr_bias_in_snow.txt', help='File path for bias results') parser.add_option('--title', dest='title', default= \ 'SPOL daily mean ZDRM bias in ice/snow, ' + \ 'compared with VERT and CP results', help='Title for plot') parser.add_option('--width', dest='figWidthMm', default=400, help='Width of figure in mm') parser.add_option('--height', dest='figHeightMm', default=320, help='Height of figure in mm') parser.add_option('--lenMean', dest='lenMean', default=301, help='Len of moving mean filter') (options, args) = parser.parse_args() if (options.verbose == True): options.debug = True if (options.debug == True): print >>sys.stderr, "Running %prog" print >>sys.stderr, " cpFilePath: ", options.cpFilePath print >>sys.stderr, " biasFilePath: ", options.biasFilePath # read in column headers for bias results iret, biasHdrs, biasData = readColumnHeaders(options.biasFilePath) if (iret != 0): sys.exit(-1) # read in data for bias results biasData, biasTimes = readInputData(options.biasFilePath, biasHdrs, biasData) # read in column headers for CP results iret, cpHdrs, cpData = readColumnHeaders(options.cpFilePath) if (iret != 0): sys.exit(-1) # read in data for CP results cpData, cpTimes = readInputData(options.cpFilePath, cpHdrs, cpData) # render the plot doPlot(biasData, biasTimes, cpData, cpTimes) sys.exit(0) ######################################################################## # Read columm headers for the data # this is in the first line def readColumnHeaders(filePath): colHeaders = [] colData = {} fp = open(filePath, 'r') line = fp.readline() fp.close() commentIndex = line.find("#") if (commentIndex == 0): # header colHeaders = line.lstrip("# ").rstrip("\n").split() if (options.debug == True): print >>sys.stderr, "colHeaders: ", colHeaders else: print >>sys.stderr, "ERROR - readColumnHeaders" print >>sys.stderr, " First line does not start with #" return -1, colHeaders, colData for index, var in enumerate(colHeaders, start=0): colData[var] = [] return 0, colHeaders, colData ######################################################################## # Read in the data def readInputData(filePath, colHeaders, colData): # open file fp = open(filePath, 'r') lines = fp.readlines() # read in a line at a time, set colData for line in lines: commentIndex = line.find("#") if (commentIndex >= 0): continue # data data = line.strip().split() for index, var in enumerate(colHeaders, start=0): if (var == 'count' or var == 'year' or var == 'month' or var == 'day' or \ var == 'hour' or var == 'min' or var == 'sec' or \ var == 'unix_time' or var == 'npts'): colData[var].append(int(data[index])) elif (var == 'fileName' or var.find('Time') >= 0): colData[var].append(data[index]) else: colData[var].append(float(data[index])) fp.close() # load observation times array year = colData['year'] month = colData['month'] day = colData['day'] hour = colData['hour'] minute = colData['min'] sec = colData['sec'] obsTimes = [] for ii, var in enumerate(year, start=0): thisTime = datetime.datetime(year[ii], month[ii], day[ii], hour[ii], minute[ii], sec[ii]) obsTimes.append(thisTime) return colData, obsTimes ######################################################################## # Moving average filter def movingAverage(values, window): weights = np.repeat(1.0, window)/window sma = np.convolve(values, weights, 'same') return sma ######################################################################## # Plot def doPlot(biasData, biasTimes, cpData, cpTimes): widthIn = float(options.figWidthMm) / 25.4 htIn = float(options.figHeightMm) / 25.4 fig1 = plt.figure(1, (widthIn, htIn)) ax1 = fig1.add_subplot(1,1,1,xmargin=0.0) ax1.plot(biasTimes, np.zeros(len(biasTimes)), linewidth=1, color = 'gray') fileName = options.biasFilePath titleStr = "File: " + fileName hfmt = dates.DateFormatter('%y/%m/%d') lenMeanFilter = int(options.lenMean) # set up np arrays btimes = np.array(biasTimes).astype(datetime.datetime) biasMean = np.array(biasData["ZdrBiasMean"]).astype(np.double) biasMean = movingAverage(biasMean, lenMeanFilter) biasPercent15 = np.array(biasData["ZdrBiasPercentile15"]).astype(np.double) biasPercent15 = movingAverage(biasPercent15, lenMeanFilter) biasPercent20 = np.array(biasData["ZdrBiasPercentile20"]).astype(np.double) biasPercent20 = movingAverage(biasPercent20, lenMeanFilter) biasPercent25 = np.array(biasData["ZdrBiasPercentile25"]).astype(np.double) biasPercent25 = movingAverage(biasPercent25, lenMeanFilter) biasPercent33 = np.array(biasData["ZdrBiasPercentile33"]).astype(np.double) biasPercent33 = movingAverage(biasPercent33, lenMeanFilter) validMean = np.isfinite(biasMean) validPercent15 = np.isfinite(biasPercent15) validPercent20 = np.isfinite(biasPercent20) validPercent25 = np.isfinite(biasPercent25) validPercent33 = np.isfinite(biasPercent33) #ax1.plot(btimes[validMean], biasMean[validMean], \ # label = 'ZDR Bias mean', color='red') ax1.plot(btimes[validPercent15], biasPercent15[validPercent15], \ label = 'ZDR Bias percentile 15', linewidth=1, color='red') ax1.plot(btimes[validPercent20], biasPercent20[validPercent20], \ label = 'ZDR Bias percentile 20', linewidth=1, color='blue') ax1.plot(btimes[validPercent25], biasPercent25[validPercent25], \ label = 'ZDR Bias percentile 25', linewidth=1, color='magenta') ctimes = np.array(cpTimes).astype(datetime.datetime) ZdrmVert = np.array(cpData["ZdrmVert"]).astype(np.double) validZdrmVert = np.isfinite(ZdrmVert) ax1.plot(ctimes[validZdrmVert], ZdrmVert[validZdrmVert], \ "b^", markersize=10, linewidth=1, label = 'Zdrm Vert (dB)', color = 'black') SunscanZdrm = np.array(cpData["SunscanZdrm"]).astype(np.double) validSunscanZdrm = np.isfinite(SunscanZdrm) ax1.plot(ctimes[validSunscanZdrm], SunscanZdrm[validSunscanZdrm], \ linewidth=2, label = 'Zdrm Sun/CP (dB)', color = 'green') legend1 = ax1.legend(loc='upper right', ncol=7) for label in legend1.get_texts(): label.set_fontsize('x-small') ax1.set_xlabel("Date") ax1.set_ylabel("ZDR Bias (dB)") ax1.grid(True) ax1.set_ylim([-0.4, +0.25]) hfmt = dates.DateFormatter('%y/%m/%d') ax1.xaxis.set_major_locator(dates.DayLocator()) ax1.xaxis.set_major_formatter(hfmt) for tick in ax1.xaxis.get_major_ticks(): tick.label.set_fontsize(8) fig1.suptitle(options.title) fig1.autofmt_xdate() plt.tight_layout() plt.subplots_adjust(top=0.96) plt.show() ######################################################################## # Run a command in a shell, wait for it to complete def runCommand(cmd): if (options.debug == True): print >>sys.stderr, "running cmd:",cmd try: retcode = subprocess.call(cmd, shell=True) if retcode < 0: print >>sys.stderr, "Child was terminated by signal: ", -retcode else: if (options.debug == True): print >>sys.stderr, "Child returned code: ", retcode except OSError, e: print >>sys.stderr, "Execution failed:", e ######################################################################## # Run - entry point if __name__ == "__main__": main()
1. Field of the Invention The present invention relates to a bar code reader, and more particularly to a portable, small-size hand-supported bar code reader. 2. Description of the Prior Art Some portable hand-held scanning bar code readers employ a charge-coupled device (CCD) combined with an image-reducing optical system including a spherical lens. A light beam emitted by a light source is applied to a bar code, and reflected light from the bar code is reflected by a mirror and then focused by the spherical lens onto the CCD. The CCD converts the focused light into an electric signal whose intensity varies with time depending on the information represented by the bar code. The electric signal produced by the CCD is then amplified by an amplifier, converted into a binary signal by a binary signal generator, and decoded into a decoded signal by a decoder. The decoded signal is outputted from the bar code reader through an interface. During use of such a portable hand-held scanning bar code reader, the operator is required to hold the bar code reader by hand. Since one hand of the operator is occupied by the bar code reader, the operator can only use the other hand or may have to pass the bar code reader to the other hand to do some other work. Therefore, the efficiency of the job which the operator performs while holding the bar code reader by hand is considerably low. Attempts have been made to reduce the size of portable scanning bar code readers. One small-size portable bar code reader is disclosed in Japanese laid-open utility model publication No. 58-138157. The disclosed bar code reader is a pencil-shaped bar code reader that can be mounted on an operator's finger. The pencil-shaped bar code reader is relatively long, and suffers frequent reading errors because it is required to trace bar codes in order to read them. U.S. Pat. No. 4,766,299 discloses a hand-mounted bar code reader. The disclosed hand-mounted bar code reader is however relatively large in size as its housing accommodates a conventional laser and scanning mechanism for producing a scanning laser beam. U.S. Pat. No. 3,978,318 shows a hand-operated scanner that can be worn by an operator's finger. However, the disclosed hand-operated scanner is also relatively large in size, and must to be moved over a data bearing medium in order to read the data. Furthermore, the optical system of the conventional scanning bar code readers has imposed certain limitations on efforts to make them smaller in size. Inasmuch as the scanning bar code readers have an image- reducing optical system including a spherical lens, as described above, if the focal length of the optical system between the bar code and the spherical lens is to be shortened to reduce the size of the bar code reader, then it is necessary to employ a smaller spherical lens with a reduced radius of curvature. The smaller spherical lens would however be unable to provide a light intensity strong enough to detect a bar code image on the CCD. Since the spherical lens is required to focus the reflected light from the bar code into a smaller bar code image on the CCD, the spherical lens should have a certain focal length that is necessary between itself and the CCD. Therefore, it has been impossible for scanning bar code readers to be greatly reduced in size because of those optical system limitations.
/** * Exception for errors where the API returned an entity of type {@code SevereCtcError}. */ public class ApiClientErrorWithSevereCtcErrorEntityException extends ApiClientErrorWithEntityException { public ApiClientErrorWithSevereCtcErrorEntityException(ApiResponse response) { super(response); } @Override public SevereCtcError getEntity() { return (SevereCtcError) super.getEntity(); } }
/* The MIT License (MIT) Copyright (c) 2013-2014 winlin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef SRS_APP_SOURCE_HPP #define SRS_APP_SOURCE_HPP /* #include <srs_app_source.hpp> */ #include <srs_core.hpp> #include <map> #include <vector> #include <string> #include <srs_app_st.hpp> #include <srs_app_reload.hpp> class SrsPlayEdge; class SrsPublishEdge; class SrsSource; class SrsMessage; class SrsOnMetaDataPacket; class SrsSharedPtrMessage; class SrsForwarder; class SrsRequest; class SrsSocket; class SrsRtmpServer; class SrsEdgeProxyContext; #ifdef SRS_AUTO_HLS class SrsHls; #endif #ifdef SRS_AUTO_DVR class SrsDvr; #endif #ifdef SRS_AUTO_TRANSCODE class SrsEncoder; #endif class SrsStream; /** * time jitter detect and correct, * to ensure the rtmp stream is monotonically. */ class SrsRtmpJitter { private: int64_t last_pkt_time; int64_t last_pkt_correct_time; public: SrsRtmpJitter(); virtual ~SrsRtmpJitter(); public: /** * detect the time jitter and correct it. */ virtual int correct(SrsSharedPtrMessage* msg, int tba, int tbv); /** * get current client time, the last packet time. */ virtual int get_time(); }; /** * the message queue for the consumer(client), forwarder. * we limit the size in seconds, drop old messages(the whole gop) if full. */ class SrsMessageQueue { private: int64_t av_start_time; int64_t av_end_time; int queue_size_ms; std::vector<SrsSharedPtrMessage*> msgs; public: SrsMessageQueue(); virtual ~SrsMessageQueue(); public: /** * set the queue size * @param queue_size the queue size in seconds. */ virtual void set_queue_size(double queue_size); public: /** * enqueue the message, the timestamp always monotonically. * @param msg, the msg to enqueue, user never free it whatever the return code. */ virtual int enqueue(SrsSharedPtrMessage* msg); /** * get packets in consumer queue. * @pmsgs SrsMessages*[], output the prt array. * @count the count in array. * @max_count the max count to dequeue, 0 to dequeue all. */ virtual int get_packets(int max_count, SrsSharedPtrMessage**& pmsgs, int& count); private: /** * remove a gop from the front. * if no iframe found, clear it. */ virtual void shrink(); virtual void clear(); }; /** * the consumer for SrsSource, that is a play client. */ class SrsConsumer { private: SrsRtmpJitter* jitter; SrsSource* source; SrsMessageQueue* queue; bool paused; public: SrsConsumer(SrsSource* _source); virtual ~SrsConsumer(); public: virtual void set_queue_size(double queue_size); public: /** * get current client time, the last packet time. */ virtual int get_time(); /** * enqueue an shared ptr message. * @param tba timebase of audio. * used to calc the audio time delta if time-jitter detected. * @param tbv timebase of video. * used to calc the video time delta if time-jitter detected. */ virtual int enqueue(SrsSharedPtrMessage* msg, int tba, int tbv); /** * get packets in consumer queue. * @pmsgs SrsMessages*[], output the prt array. * @count the count in array. * @max_count the max count to dequeue, 0 to dequeue all. */ virtual int get_packets(int max_count, SrsSharedPtrMessage**& pmsgs, int& count); /** * when client send the pause message. */ virtual int on_play_client_pause(bool is_pause); }; /** * cache a gop of video/audio data, * delivery at the connect of flash player, * to enable it to fast startup. */ class SrsGopCache { private: /** * if disabled the gop cache, * the client will wait for the next keyframe for h264, * and will be black-screen. */ bool enable_gop_cache; /** * the video frame count, avoid cache for pure audio stream. */ int cached_video_count; /** * cached gop. */ std::vector<SrsSharedPtrMessage*> gop_cache; public: SrsGopCache(); virtual ~SrsGopCache(); public: virtual void set(bool enabled); /** * only for h264 codec * 1. cache the gop when got h264 video packet. * 2. clear gop when got keyframe. */ virtual int cache(SrsSharedPtrMessage* msg); virtual void clear(); virtual int dump(SrsConsumer* consumer, int tba, int tbv); /** * used for atc to get the time of gop cache, * the atc will adjust the sequence header timestamp to gop cache. */ virtual bool empty(); virtual int64_t get_start_time(); }; /** * live streaming source. */ class SrsSource : public ISrsReloadHandler { private: static std::map<std::string, SrsSource*> pool; public: /** * find stream by vhost/app/stream. * @param req the client request. * @param ppsource the matched source, if success never be NULL. * @remark stream_url should without port and schema. */ static int find(SrsRequest* req, SrsSource** ppsource); /** * when system exit, destroy the sources, * for gmc to analysis mem leaks. */ static void destroy(); private: // deep copy of client request. SrsRequest* _req; // to delivery stream to clients. std::vector<SrsConsumer*> consumers; // hls handler. #ifdef SRS_AUTO_HLS SrsHls* hls; #endif // dvr handler. #ifdef SRS_AUTO_DVR SrsDvr* dvr; #endif // transcoding handler. #ifdef SRS_AUTO_TRANSCODE SrsEncoder* encoder; #endif // edge control service SrsPlayEdge* play_edge; SrsPublishEdge* publish_edge; // gop cache for client fast startup. SrsGopCache* gop_cache; // to forward stream to other servers std::vector<SrsForwarder*> forwarders; // for aggregate message SrsStream* aggregate_stream; private: /** * the sample rate of audio in metadata. */ int sample_rate; /** * the video frame rate in metadata. */ int frame_rate; /** * can publish, true when is not streaming */ bool _can_publish; /** * atc whether atc(use absolute time and donot adjust time), * directly use msg time and donot adjust if atc is true, * otherwise, adjust msg time to start from 0 to make flash happy. */ // TODO: FIXME: to support reload atc. bool atc; private: SrsSharedPtrMessage* cache_metadata; // the cached video sequence header. SrsSharedPtrMessage* cache_sh_video; // the cached audio sequence header. SrsSharedPtrMessage* cache_sh_audio; public: /** * @param _req the client request object, * this object will deep copy it for reload. */ SrsSource(SrsRequest* req); virtual ~SrsSource(); public: virtual int initialize(); // interface ISrsReloadHandler public: virtual int on_reload_vhost_atc(std::string vhost); virtual int on_reload_vhost_gop_cache(std::string vhost); virtual int on_reload_vhost_queue_length(std::string vhost); virtual int on_reload_vhost_forward(std::string vhost); virtual int on_reload_vhost_hls(std::string vhost); virtual int on_reload_vhost_dvr(std::string vhost); virtual int on_reload_vhost_transcode(std::string vhost); public: // for the SrsForwarder to callback to request the sequence headers. virtual int on_forwarder_start(SrsForwarder* forwarder); // for the SrsHls to callback to request the sequence headers. virtual int on_hls_start(); // for the SrsDvr to callback to request the sequence headers. virtual int on_dvr_request_sh(); public: virtual bool can_publish(); virtual int on_meta_data(SrsMessage* msg, SrsOnMetaDataPacket* metadata); virtual int on_audio(SrsMessage* audio); virtual int on_video(SrsMessage* video); virtual int on_aggregate(SrsMessage* msg); /** * publish stream event notify. * @param _req the request from client, the source will deep copy it, * for when reload the request of client maybe invalid. */ virtual int on_publish(); virtual void on_unpublish(); public: virtual int create_consumer(SrsConsumer*& consumer); virtual void on_consumer_destroy(SrsConsumer* consumer); virtual void set_cache(bool enabled); // internal public: // for consumer, atc feature. virtual bool is_atc(); // for edge, when play edge stream, check the state virtual int on_edge_start_play(); // for edge, when publish edge stream, check the state virtual int on_edge_start_publish(); // for edge, proxy the publish virtual int on_edge_proxy_publish(SrsMessage* msg); // for edge, proxy stop publish virtual void on_edge_proxy_unpublish(); private: virtual int create_forwarders(); virtual void destroy_forwarders(); }; #endif
<reponame>things-go/anyhow<filename>infra/id_group.go package infra import ( "strconv" "strings" "golang.org/x/exp/constraints" ) // ParseGroup 解析以','分隔的整数, 去除0值, 去除重复值 func ParseGroup[T constraints.Integer](s string) []T { if s == "" { return []T{} } ss := strings.Split(s, ",") res := make([]T, 0, len(ss)) mp := make(map[T]struct{}) for i := 0; i < len(ss); i++ { v, err := strconv.ParseInt(strings.TrimSpace(ss[i]), 0, 64) if err != nil || v == 0 { continue } vv := T(v) if _, ok := mp[vv]; ok { continue } mp[vv] = struct{}{} res = append(res, vv) } return res } // JoinGroup 以','分隔的字符串为字符串, 去除0值, 去除重复值 func JoinGroup[T constraints.Integer](vs []T) string { sep := "," switch len(vs) { case 0: return "" case 1: return strconv.FormatInt(int64(vs[0]), 10) } strElems := make([]string, 0, len(vs)) n := 0 mp := make(map[T]struct{}) for i := 0; i < len(vs); i++ { if vs[i] == 0 { continue } if _, ok := mp[vs[i]]; ok { continue } mp[vs[i]] = struct{}{} v := strconv.FormatInt(int64(vs[i]), 10) strElems = append(strElems, v) n += len(v) } n += len(sep) * (len(strElems) - 1) var b strings.Builder b.Grow(n) b.WriteString(strElems[0]) for _, s := range strElems[1:] { b.WriteString(sep) b.WriteString(s) } return b.String() }
<gh_stars>0 /* * @Author: EagleXiang * @LastEditors: EagleXiang * @Email: <EMAIL> * @Github: https://github.com/eaglexiang * @Date: 2019-06-02 10:49:54 * @LastEditTime: 2019-12-07 17:17:58 */ package tunnel import ( "io" "net" "time" ) // NewVirtualConn 创建新的虚拟连接 func NewVirtualConn() net.Conn { return &virtualConn{ msgs: make(chan []byte, 2), } } type virtualConn struct { msgs chan []byte } func (conn virtualConn) Write(b []byte) (n int, err error) { conn.msgs <- b return len(b), nil } func (conn virtualConn) Read(b []byte) (n int, err error) { tmp, ok := <-conn.msgs if !ok { return 0, io.EOF } copy(b, tmp) return len(tmp), nil } func (conn virtualConn) Close() (err error) { close(conn.msgs) return nil } func (conn virtualConn) LocalAddr() (addr net.Addr) { return nil } func (conn virtualConn) RemoteAddr() (addr net.Addr) { return nil } func (conn virtualConn) SetWriteDeadline(t time.Time) error { return nil } func (conn virtualConn) SetReadDeadline(t time.Time) error { return nil } func (conn virtualConn) SetDeadline(t time.Time) error { return nil }
<reponame>nvzard/Data-Structures-Lab-Programs<filename>LAB_7/binary_tree_array.c<gh_stars>1-10 #include<stdio.h> #include<stdlib.h> struct Tree{ int
A 17-year-old boy has been charged with attempted murder in a bedside court appearance, after a random attack on three people, including an elderly man now fighting for his life in hospital. The boy allegedly assaulted the man, along with two other people as they pulled up at a house in Palmerston in Canberra's north about 10:15pm on Friday. Police said when they arrived at the Tamborine Close home, the boy "engaged" police and was only restrained after several officers intervened and capsicum spray was used on him. The boy's 83-year-old victim is in hospital with critical injuries. Two other victims were also taken to hospital after the attack. Police claimed the boy had attacked the car's occupants at random. The young man, who was also taken to hospital, was this afternoon charged at his bedside with attempted murder, assault, making threats to kill, and obstructing a territory official. He did not apply for bail and was remanded in custody. Police have reiterated calls for witnesses, or anybody who saw the boy in the area around Tamborine Close on Friday evening to come forward. The case will be back in court in April.
Purchasing-Power Parity: Definition, Measurement, and Interpretation This article examines the concept of purchasing-power parity (PPP) and its implications for the equilibrium value of the Canadian exchange rate. PPP has two main applications, as a theory of exchange rate determination and as a means to compare living standards across countries. Concerning exchange rate determination, PPP is mainly useful as a reminder that monetary policy has no long-run impact on the real exchange rate, since the exchange rate can deviate persistently from its PPP value in response to real shocks. To compare living standards across countries, PPP exchange rates constructed by comparing the prices of national consumption baskets are used to translate per capita national incomes into a common currency. These rates are useful because they offset differences in national price levels to obtain comparable measures of purchasing power, but they are not an accurate measure of the equilibrium value of the exchange rate. The authors conclude that the current deviation of the Canadian exchange rate from the PPP rate does not imply that the exchange rate is undervalued, but that this deviation reflects the impact of persistent real factors, in particular, lower commodity prices.
Facebook posted its best operating profit margin in years. Can investors expect it to continue expanding? "We still think we're early in investing in the business." Those are the words of Facebook (NASDAQ:FB) CFO Dave Wehner on the company's recent fourth-quarter earnings call. To his point, Facebook increased its spending significantly in 2015, up 55% on a non-GAAP basis for the year. At the same time, fourth-quarter operating margin increased to 60% last year, up from 58% in both 2013 and 2014. Despite its focus on investing for the long-term growth of the company, Facebook's 60% operating margin is much higher than comparable businesses such as Alphabet (NASDAQ:GOOG) (NASDAQ:GOOGL). Even the mature Google segment generated a non-GAAP operating margin of just 31% during 2015. Can Facebook investors expect it to continue to make twice as much operating income per dollar of revenue as Google? Wehner was clear that he doesn't think the company is done investing. Despite its spending over $20 billion on acquisitions WhatsApp and Oculus just a couple of years ago, Wehner told analysts "we're really investing in new areas today where we see a long-term opportunity for revenue growth." Research and development expenses grew more in absolute dollars than all of Facebook's other operating costs combined in 2015. Note that that includes a full year of stock-based compensation for WhatsApp and Oculus. On a non-GAAP basis, which doesn't include the increase in stock-based compensation to the WhatsApp and Oculus teams, R&D is the company's fastest-growing expense (percentage-wise). Investors shouldn't expect that to change anytime soon. But that's not the biggest thing that could drive down Facebook's operating margin. Up atop the list of Google's operating expenses is its cost of revenues. Cost of revenues totaled about 38% of Alphabet's total consolidated revenue in 2015. Comparatively, Facebook's cost of revenues totaled just 16% last year. Google's cost of revenues includes all the same things as Facebook's -- data center operations, employee compensation, credit card transaction fees, and so on. But there are also some large expenses that Facebook doesn't have to worry about. Google pays traffic acquisition costs to various Web browser developers to make sure its search engine pops up when someone types a search query into the navigation bar. It also includes the revenue shared with its network partners that place Google ads on their websites. Last year, Google paid over $14 billion in traffic acquisition costs, about 19% of Google's total revenue for the year and half of its total costs of revenue. Additionally, Google pays content acquisition costs for YouTube videos and digital downloads on Google Play. YouTube shares a percentage of ad revenue with its creators, and more recently it started commissioning original content for distribution on its platform. Google takes a 30% cut of digital download sales through Google Play, sending 70% to publishers. One other expense Google pays that Facebook doesn't is its revenue share with mobile carriers. Google pays carriers a share of ad revenue to pre-install its apps such as Gmail, Search, and Google Maps. Earlier this year, Facebook extended its ad network, the Facebook Audience Network, to include mobile websites in addition to its existing ability to advertise in other mobile apps. The Audience Network was first introduced in the spring of 2014, but it holds a lot of growth potential for Facebook. Facebook shares a majority of the ad revenue from its Audience Network with the app developer or publisher. Last month, Facebook announced the Audience Network reached a $1 billion run rate in the fourth quarter. That revenue, however, only has an operating margin around 30% due to the revenue split with publishers. Google, for example, only keeps about 32% of its Network Member's ad revenue after its revenue split. While $1 billion is still a small part of Facebook's expected revenue of $25.5 billion in 2016, it's enough to have an impact of more than one percentage point on operating margin. As the Audience Network grows, that impact will increase. Similarly, Facebook began rolling out Instant Articles earlier this year. The feature takes articles from various publishers and hosts them on Facebook's servers to improve load times. If a publisher taps Facebook to place ads in those articles, Facebook splits the revenue 70/30 in favor of the publisher. As Facebook users watch more video content on its platform, the company is experimenting with ways to share revenue with creators. While this will theoretically improve the quality of video content on Facebook, it also increase Facebook's cost of revenues. All told, Facebook investors shouldn't expect operating margins to remain high forever, but all of these efforts provide incremental revenue opportunities, and those dollars should trickle down to the bottom line. How much of that cash ultimately makes it there depends on the company's plans to continue investing in long-term opportunities for revenue growth, which may or may not provide operating margins as high as its core business. Margin profiles aside, Google's operating income was nearly four times higher than Facebook's last year. So, it certainly seems worthwhile for Facebook to extend into lower margin businesses.
def pull(): cmd(['fetch']) out = cmd(['pull']) return out
def figure1_plot(data1, data2): labels = [2001, 2006, 2011, 2016] x = np.arange(len(labels)) width = 0.5 fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(15, 6), tight_layout=True) bar1a = ax0.bar(x, data1, width) bar1b = ax1.bar(x, data2, width) autolabel(bar1a, ax0) autolabel(bar1b, ax1) ax0.set_title('Number of municipalities') ax0.set_xticks(x) ax0.set_xticklabels(labels) ax0.grid(True, axis='y') fig.tight_layout() ax1.set_title('Number of Council Candidates') ax1.set_xticks(x) ax1.set_xticklabels(labels) plt.grid(True, axis='y') fig.tight_layout() plt.show()
. OBJECTIVE To evaluate the antimicrobial effects of erythromycin microspheres against Mycoplasma pneumoniae in rats. METHODS With erythromycin lactobionate as the positive control, erythromycin microspheres at 3 non-toxic doses (0.1, 0.5, and 1.2 g.kg(-1).d(-1)) were administered intragastrically for 6 consecutive days in Wistar rats with Mycoplasma pneumoniae infection. The general condition and lung index of the rats were observed and measured to assess the therapeutic effects of the treatments against Mycoplasma pneumoniae infection. RESULTS The erythromycin microspheres at 0.1, 0.5, 1.2 g.kg(-1).d(-1) significantly alleviated the symptoms of the rats infected with Mycoplasma pneumoniae and reduced the pulmonary index of the infected rats from 1.75 to 1.45, 1.38 and 1.25, respectively (P < 0.01). An obvious dosage-effect relationship was noted between the dose of erythromycin microsphere and the tissue pathologies due to the infection. CONCLUSION Erythromycin microspheres possess strong activity against Mycoplasma pneumoniae in rats.
On Christmas Day, 15 year old David finds out that his boyfriend, Jonathan has taken another lover. The discovery leads him on the brink of depression making him think of ways to have him back at all cost. He has invited Jonathan to see him on this day for the last time. A documentary that explores the challenges that a life in music can bring. Three Yemeni teenage girls enter an entrepreneurship competition but along the way encounter the hardships of a country marked by a broken educational system, joblessness and a threatening Al-Qaeda presence.
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hudi.avro; import org.apache.avro.AvroRuntimeException; import org.apache.avro.Schema; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import static org.apache.hudi.common.util.ValidationUtils.checkState; public class AvroSchemaUtils { private AvroSchemaUtils() {} /** * Appends provided new fields at the end of the given schema * * NOTE: No deduplication is made, this method simply appends fields at the end of the list * of the source schema as is */ public static Schema appendFieldsToSchema(Schema schema, List<Schema.Field> newFields) { List<Schema.Field> fields = schema.getFields().stream() .map(field -> new Schema.Field(field.name(), field.schema(), field.doc(), field.defaultVal())) .collect(Collectors.toList()); fields.addAll(newFields); Schema newSchema = Schema.createRecord(schema.getName(), schema.getDoc(), schema.getNamespace(), schema.isError()); newSchema.setFields(fields); return newSchema; } /** * Passed in {@code Union} schema and will try to resolve the field with the {@code fieldSchemaFullName} * w/in the union returning its corresponding schema * * @param schema target schema to be inspected * @param fieldSchemaFullName target field-name to be looked up w/in the union * @return schema of the field w/in the union identified by the {@code fieldSchemaFullName} */ public static Schema resolveUnionSchema(Schema schema, String fieldSchemaFullName) { if (schema.getType() != Schema.Type.UNION) { return schema; } List<Schema> innerTypes = schema.getTypes(); Schema nonNullType = innerTypes.stream() .filter(it -> it.getType() != Schema.Type.NULL && Objects.equals(it.getFullName(), fieldSchemaFullName)) .findFirst() .orElse(null); if (nonNullType == null) { throw new AvroRuntimeException( String.format("Unsupported Avro UNION type %s: Only UNION of a null type and a non-null type is supported", schema)); } return nonNullType; } /** * Resolves typical Avro's nullable schema definition: {@code Union(Schema.Type.NULL, <NonNullType>)}, * decomposing union and returning the target non-null type */ public static Schema resolveNullableSchema(Schema schema) { if (schema.getType() != Schema.Type.UNION) { return schema; } List<Schema> innerTypes = schema.getTypes(); Schema nonNullType = innerTypes.stream() .filter(it -> it.getType() != Schema.Type.NULL) .findFirst() .orElse(null); if (innerTypes.size() != 2 || nonNullType == null) { throw new AvroRuntimeException( String.format("Unsupported Avro UNION type %s: Only UNION of a null type and a non-null type is supported", schema)); } return nonNullType; } /** * Creates schema following Avro's typical nullable schema definition: {@code Union(Schema.Type.NULL, <NonNullType>)}, * wrapping around provided target non-null type */ public static Schema createNullableSchema(Schema.Type avroType) { checkState(avroType != Schema.Type.NULL); return Schema.createUnion(Schema.create(Schema.Type.NULL), Schema.create(avroType)); } }
Brian Austin Green and his wife Megan Fox may be inseparable……but at one point things weren’t looking too good. In a recent interview, Green revealed that the two had split up and called their engagement off back in 2009 because he didn’t want Megan to feel pressured by a relationship. Well….we’re glad you guys were able to work it out. Nice to see a happy couple these days……..
Joining the long grain veneer sheets together is a little bit more difficult. These sheets need to be strong along the grain (which runs front the back on the deck), and adding a joint that is perpendicular to the grain will create a weak point in the veneer sheet. There are a couple ways that we can help prevent the joint on these layers from weakening: 1) Cut the veneer on an angle. If you create a joint that runs diagonally through the veneer sheet as opposed to right across the width, it will help disperse the pressure on any one spot of the joint. It will still be weaker than if there was no joint, but once glued up between the other layers, it will help make for a stronger joint. It also creates a longer edge for gluing which will help the two sides bond together. 2) Stagger your joints in between each layer. If you have all of your joints in the same spot for every layer of the board, it will inevitably be weaker in that area. You can see in the photos that the joint is in a different spot along the length of each sheet of veneer, and we also flip the angle of each one before pressing. This will drastically help the strength of your deck after it has been pressed. To create the diagonal cut on each sheet, we cut them on a miter saw. We set the angle at 31.6 degrees, which is a preset notch on our saw, and cut each sheet per layer to match our 60" design. Some of the joints needed a little extra sanding to help get tight, but this method proved to be pretty quick and accurate. Once all of the sheets were paired up at 60" long, we glued the diagonal edge and taped them together the same way as the cross grain sheets.
Zein, shellac and other materials have been used as encapsulating agents for high intensity sweeteners. The term "high intensity sweeteners" is defined herein to include sugarless sweeteners having a degree or intensity of sweetness which is many times greater than sucrose. High intensity sweeteners include, but are not necessarily limited to, aspartame, sucralose, salts of acesulfame, alitame, glycyrrhizin, saccharin and its salts, cyclamic acid and its salts, dihydrochalcones, thaumatin, monellin and the like, alone or in combination. High intensity sweeteners benefit from or require encapsulation in order to achieve a gradual and controlled release of sweetener during chewing and/or to promote their stability in chewing gum. For example, certain artificial dipeptide sweeteners such as aspartame (L-aspartyl-L-phenylanaline methyl ester) have been found to be excellent sugar substitutes in chewing gum. However, the stability of artificial dipeptide sweeteners depends on exposure to moisture, time, temperature and pH. Under unfavorable conditions, aspartame spontaneously converts to diketopiperazine with proportional loss of sweetness. Aspartame also degrades when exposed to aldehyde-type flavoring agents. In order to maintain the stability and sweetness of aspartame in chewing gum, it is necessary to minimize the exposure of aspartame to moisture, aldehyde-type flavors, and certain pH conditions. Zein, shellac and many other materials have been used to encapsulate aspartame and other high intensity sweeteners, in order to control the stabilities and rates of release of the sweeteners. For instance, U.S. Pat. No. 4,673,577 discloses the use of shellac, individually, to encapsulate high intensity sweeteners. U.S. Pat. No. 4,863,745 discloses the use of zein to coat high intensity sweeteners which have already been once coated with another material. The previous coating can be shellac. U.S. Pat. No. 4,931,295 discloses the coating of aspartame and other high intensity sweeteners with a mixture of zein and hydroxypropylmethyl cellulose. U.S. Pat. No. 4,384,004 discloses several coating materials for aspartame, including zein. PCT Publication WO 90/06061 discloses the use of equal amounts of zein and shellac mixed together, to coat alitame. European Patent Application No. 0 320 523 discloses the use of zein, shellac and other materials to agglomerate high intensity sweetener particles in a solvent. U.S. Pat. Nos. 4,122,195 and 4,139,639 disclose the fixation of aspartame in reaction product of a compound containing a polyvalent metal ion, with an ungelatinized starch acid-ester of a substituted dicarboxylic acid. U.S. Pat. No. 4,386,106 discloses an encapsulant for a flavoring material, composed of gelatin, a natural gum and a plasticizer. U.S. Pat. No. 4,485,118 discloses the use of shellac to encapsulate a flavoring material. U.S. Pat. No. 4,590,075 discloses an elastomer system for encapsulating flavor and sweetening agents. U.S. Pat. No. 4,722,845 discloses the encapsulation of dipeptide and amino acid sweeteners in a mixture of fat and polyethylene wax. U.S. Pat. No. 3,116,206 discloses generally the use of zein as an encapsulant.
In the vast area of piezo film applications, it is necessary to have a means of detecting the charge or voltage produced by a piezo or pyro event. An electrode is needed on each side of the film for any application. Most such applications require the electrodes on opposite surfaces of the film to be electrically isolated from each other. Thus, care must be observed when depositing the electrodes on film surfaces to insure that metal migration does not occur over the edges of the film to result in possible shorting out of the electrodes. It is sometimes necessary however to electrically connect an electrode on one film surface with an electrode on the opposing film surface or to an electrode on the surface of another film to control capacitance or impedance; to effect common electrical connection; to move a conductor from one film surface to another surface; to permit ease of lead attachment for termination, and the like. The present invention provides a facile and convenient method for electrically connecting electrodes on opposing surfaces of a piezoelectric film by forming at least one hole or channel through the film as well as the lead to be electrically connected or linked to another surface. The hole may be formed by pin, punch, or laser, for example. Then, by depositing conductive ink over the hole by silk-screening, for example, a portion of the ink will adhere to the hole sidewalls while penetrating the hole to effect the electrical connection with the pierced electrode. If it is desired to electrically move an electrode from the surface of one film to the surface of another film, it is only necessary to align the films, pierce the film whose surface is to be linked to the already deposited electrode, and silk-screen conductive ink over the pierced hole or channel. The invention is not intended to be limited to silk-screening processes for depositing a conductive ink over the hole, but may employ electrostatic printing and xerography techniques, spraying, painting, ink jet printing, carbon particle deposition, and the like.
/* Copyright (c) 2016 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Author: <NAME> */ #pragma once #include "library/vm/vm.h" namespace lean { /* ordering inductive datatype is defined as inductive cmp_result := | lt | eq | gt The following function converts lt -> -1 eq -> 0 gt -> 1 */ inline int ordering_to_int(vm_obj const & o) { return static_cast<int>(cidx(o)) - 1; } /* Convert an integer into a ordering value */ inline vm_obj int_to_ordering(int i) { if (i < 0) return mk_vm_simple(0); else if (i == 0) return mk_vm_simple(1); else return mk_vm_simple(2); } }
#!/usr/bin/env python3 # -*- coding:utf-8 -*- ############################################################# # File: save_heatmap.py # Created Date: Friday January 15th 2021 # Author: <NAME> # Email: <EMAIL> # Last Modified: Friday, 15th January 2021 10:23:13 am # Modified By: <NAME> # Copyright (c) 2021 Shanghai Jiao Tong University ############################################################# import os import shutil import seaborn as sns import matplotlib.pyplot as plt import cv2 import numpy as np def SaveHeatmap(heatmaps, path, row=-1, dpi=72): """ The input tensor must be B X 1 X H X W """ batch_size = heatmaps.shape[0] temp_path = ".temp/" if not os.path.exists(temp_path): os.makedirs(temp_path) final_img = None if row < 1: col = batch_size row = 1 else: col = batch_size // row if row * col <batch_size: col +=1 row_i = 0 col_i = 0 for i in range(batch_size): img_path = os.path.join(temp_path,'temp_batch_{}.png'.format(i)) sns.heatmap(heatmaps[i,0,:,:],vmin=0,vmax=heatmaps[i,0,:,:].max(),cbar=False) plt.savefig(img_path, dpi=dpi, bbox_inches = 'tight', pad_inches = 0) img = cv2.imread(img_path) if i == 0: H,W,C = img.shape final_img = np.zeros((H*row,W*col,C)) final_img[H*row_i:H*(row_i+1),W*col_i:W*(col_i+1),:] = img col_i += 1 if col_i >= col: col_i = 0 row_i += 1 cv2.imwrite(path,final_img) if __name__ == "__main__": random_map = np.random.randn(16,1,10,10) SaveHeatmap(random_map,"./wocao.png",1)
<gh_stars>0 import { deepMix } from '@antv/util'; import { BiaxOption, GeometryConfig, BiaxGeometry, LineConfig, AxisType } from './types'; import { DEFAULT_LINE_CONFIG, DEFAULT_COLUMN_CONFIG, DEFAULT_YAXIS_CONFIG, DEFAULT_OPTION, DEFAULT_LINE_COLOR, } from './constant'; /** * 获取 GeometryConfig * @param geometryConfig * @param axis */ export function getGeometryConfig(geometryConfig: GeometryConfig, axis: AxisType): GeometryConfig { const defaultStyle = { style: { stroke: DEFAULT_LINE_COLOR[axis], }, }; if (isLine(geometryConfig)) { return deepMix(defaultStyle, DEFAULT_LINE_CONFIG, geometryConfig || {}); } if (isColumn(geometryConfig)) { return deepMix(defaultStyle, DEFAULT_COLUMN_CONFIG, geometryConfig || {}); } return Object.assign({}, defaultStyle, DEFAULT_LINE_CONFIG); } /** * 获取 Option * @param options */ export function getOption(options: BiaxOption): BiaxOption { const { yAxis = [], geometryConfigs = [] } = options; // TODO 做些优化 const mixYAxis = [ yAxis[0] !== false ? deepMix({}, DEFAULT_YAXIS_CONFIG, yAxis[0]) : false, yAxis[1] !== false ? deepMix({}, DEFAULT_YAXIS_CONFIG, yAxis[1]) : false, ]; return deepMix({}, DEFAULT_OPTION, options, { yAxis: mixYAxis, geometryConfigs: [ getGeometryConfig(geometryConfigs[0], AxisType.Left), getGeometryConfig(geometryConfigs[1], AxisType.Right), ], }); } /** * 根据 GeometryConfig 判断 geometry 是否为 line */ export function isLine(geometryConfig: GeometryConfig): geometryConfig is LineConfig { return geometryConfig && geometryConfig.geometry && geometryConfig.geometry === BiaxGeometry.Line; } /** * 根据 GeometryConfig 判断 geometry 是否为 Column */ export function isColumn(geometryConfig: GeometryConfig): geometryConfig is LineConfig { return geometryConfig && geometryConfig.geometry && geometryConfig.geometry === BiaxGeometry.Column; }
#ifndef __SERIAL_PORT__ #define __SERIAL_PORT__ #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/ioctl.h> #include <fcntl.h> #include <termios.h> #include <cerrno> class SerialPort { public: SerialPort(); SerialPort(const char*,int,int,int,int); ~SerialPort(); bool Open(const char*); bool SetBaudRate(int); bool SetParity(int,int,int); void Close(); void Write(unsigned char*,int); void Read(unsigned char*,int); int Read(unsigned char*); private: const char *path; int device; int baud_rate; int databits; int stopbits; int parity; }; #endif
<filename>src/routes/directory_api.rs use actix_web::{web, HttpRequest, HttpResponse}; use image::image_dimensions; use std::collections::BTreeMap; use std::ffi::OsString; use std::fs::{read_to_string, File}; use std::io::BufReader; use std::path::Path; use crate::{ error::Result, model::{ config::AppConfig, directory::{AlbumInfo, DirectoryAlbum, DirectoryFile, DirectoryListing, ImageDimensions}, }, }; pub fn get_exif_data(path: &Path) -> Option<BTreeMap<String, String>> { let file = File::open(path).ok()?; let mut bufreader = BufReader::new(&file); let exifreader = exif::Reader::new(); let exif = exifreader.read_from_container(&mut bufreader).ok()?; let mut exif_map = BTreeMap::new(); for f in exif.fields() { let key = format!("{}", f.tag); exif_map.insert(key, f.display_value().with_unit(&exif).to_string()); } Some(exif_map) } pub fn get_image_dimensions(path: &Path) -> Option<ImageDimensions> { image_dimensions(path).ok().map(|x| ImageDimensions { width: x.0, height: x.1, }) } pub async fn dir_listing( _req: HttpRequest, path: web::Path<String>, config: web::Data<AppConfig>, ) -> Result<HttpResponse> { let listing = get_dir_listing(path.into_inner(), config.get_ref())?; Ok(HttpResponse::Ok().json(listing)) } fn get_album_data(dir: &Path) -> Option<AlbumInfo> { // TODO: Just push to the dir path, don't have to iterate // Look for _picatch.album.toml let album_conf = dir .read_dir() .ok()? .find(|entry| { entry .as_ref() .map(|e| e.file_name() == OsString::from("_picatch.album.toml")) .unwrap_or(false) })? .ok()?; let info: AlbumInfo = toml::from_str(&read_to_string(album_conf.path()).ok()?).ok()?; Some(info) } pub fn get_dir_listing(path: String, config: &AppConfig) -> Result<DirectoryListing> { let base = Path::new(&*config.original_photos_dir); // replacement should end in "/" to remove the "/" at beginning of relative_path // If relative_path starts with "/", it will replace base. let relative_path = path.replace("/api/photos/", ""); let album_path = base.join(relative_path); let mut albums = Vec::new(); let mut files = Vec::new(); // album_path.is_dir for entry in album_path.read_dir()? { let entry = match entry { Ok(e) => e, Err(e) => { warn!("Failed to read file in directory {}", e); continue; } }; // Ignore dotfiles if let Some(name) = entry.file_name().to_str() { if name.starts_with('.') { continue; } } // if file is a directory, add '/' to the end of the name if let Ok(metadata) = entry.metadata() { if metadata.is_dir() { let info = get_album_data(&entry.path()); let dir_item = DirectoryAlbum { name: format!("{}/", entry.file_name().to_string_lossy().to_string()), info, }; albums.push(dir_item); } else { let file_path = entry.path(); let extension = match file_path .extension() .map(|ext| ext.to_string_lossy().to_lowercase()) { Some(e) => e, None => continue, }; if extension != "jpg" && extension != "jpeg" { continue; } let dir_item = DirectoryFile { name: entry.file_name().to_string_lossy().to_string(), exif: get_exif_data(&file_path), dimensions: get_image_dimensions(&file_path), }; files.push(dir_item); } } else { continue; } } // Sort by filename, doesn't need to be stable files.sort_unstable_by(|a, b| a.name.cmp(&b.name)); albums.sort_unstable_by(|a, b| a.name.cmp(&b.name)); Ok(DirectoryListing { current: album_path.to_string_lossy().to_string(), files, albums, }) }
from r_uintindex import idx print idx(["buckle", "my", "shoe"], 2)
More than 58,000 children in the county have been vaccinated against flu with a nasal spray as part of a pilot programme. The childhood flu immunisations team from Leicestershire Partnership NHS Trust tested the new method on children aged five to 12, as part of the second year of a national NHS England scheme. The team visited 435 schools and units across Leicestershire between October to December 2014. The vaccine was offered to the parents and guardians of 97,555 healthy school children, and had an uptake of 59.1%. Suzanne Leatherland, project lead for Leicestershire Partnership NHS Trust, said: “The dedication and commitment of the 120–strong team has been outstanding throughout the programme and credit and thanks should be shared with each member of staff. We worked really hard this year to ensure that all parents and guardians felt well informed about the nasal flu vaccine with clear information sent in advance to seek their consent. “We consulted with local faith leaders to help communicate the extra information in leaflets around the use of gelatine in the vaccine.
Barcelona, Spain - Hans-Georg Gadamer, one of the greatest philosophers of the 20th century, died ten years ago, at the age of 102. As the last representative of the great German philosophical tradition of Leibniz, Hegel and Husserl, he is remembered all over the world with conferences, publications and tributes. This is a man who not only witnessed the sinking of the Titanic and the terrorist attacks of 9/11, but also wrote one of the last texts that could be considered a classic in the true meaning of the word: Truth and Method. This book, which he published at the age of 60, has been translated into a dozen languages. It outlined a new philosophical position that responded to our time by evading solutions that were hierarchically ordered in an absolute transcendental system: hermeneutics, the philosophy of interpretation. Gadamer was not simply an academic who managed to attract a number of followers, but a true philosopher whose interlocutors were such distinguished thinkers as Jean Grondin, Gianni Vattimo, Jacques Derrida, Jürgen Habermas and Richard Rorty. "Truth and Method, written by Gadamer, which he published at the age of 60, has been translated in a dozen languages." When Gadamer turned 100 on February 11, 2000, my philosophy teacher told me to drop everything to travel to Heidelberg, where the last living German master was being honoured by many of the world's philosophers, intellectuals and politicians, including the president of Germany. It was incredible to see a philosopher who worked together with Paul Natorp, Nicolai Hartmann, Martin Heidegger, Hannah Arendt and Theodore Adorno signing volumes of his complete works and shaking everyone's hand as if they were all friends. But what must we remember about Gadamer today? As with so many great philosophers, Gadamer was also a convinced traditionalist who believed that one of the unfortunate widespread characteristics of our age is that it has lost touch with the interpretation of the great texts of Western culture. He was convinced that only by re-establishing ties with the classics could humanity save itself from permanent annihilation caused by techno-scientific progress. Although Gadamer never induced anyone to denigrate science, he was concerned with the exaggerated fascination that idolising it engenders - as that which can be methodologically analysed is only a tiny part of our experience. Truly knowing does not simply mean certifying and controlling, but also interpreting and dialoguing, that is, critically engaging with the truths and methods that artificially sustain our beliefs. Human beings, for Gadamer, are creatures who must continually interpret their world, since they are not neutral, independent or objective observers, but rather existential finite interpreters, always expressing linguistically their relation to the world. If the realm of language was so important for the German master it's because it is impossible for us to know ourselves once and for all; self-understanding is a never-ending process, an activity that must be repeated, a task always still to be performed. Thus Gadamer's most famous dictum: "Being that can be understood is language," was meant primarily to underscore a crucial drawback that still today determines the limitations of many contemporary philosophers: ignorance of the other. "The soul of hermeneutics," Gadamer always said, "consists in the possibility that the other might be right." This is why the concept of dialogue, that is, the necessity to "understand other people", was so important for him; after all, he lived through a violent century of wars, during which nobody seemed to be listening or recognising others. "Human beings, for Gadamer, are creatures who must continually interpret their world since they are not neutral, independent or objective observers..." Probably this is what moved Gadamer in the first place to pursue and develop the hermeneutic tradition, which has always been concerned with the interpretations of others, that is, with pursuing a conversation with our tradition. In this decade since Gadamer's death, hermeneutics has expanded internationally to the point of becoming not only one of the most respected representatives of continental philosophy, but also the greatest enemy of analytic philosophy, a philosophy fascinated precisely with what the German master feared most: science's unfettered methodological development. Although analytic philosophy continues to control many philosophical departments in the United States and the United Kingdom by allying itself with private scientific corporations, Gadamer gave us the tools to respond to this technocratic age - by inviting us to respect and learn from others' interpretations of classic texts and authors. Although it is now ten years since Heidelberg gave sanctuary to the father of hermeneutics, hermeneutics keeps him alive by warning us of the political dangers of a technocratic culture and its submission to scientific methods. Santiago Zabala is ICREA Research Professor of Philosophy at the University of Barcelona. His books include The Hermeneutic Nature of Analytic Philosophy (2008), The Remains of Being (2009), and, most recently, Hermeneutic Communism (2011, co-authored with G Vattimo), all published by Columbia University Press.
/** * Increment the height of the this.scroller. * * @param dY how many pixels to increment. * @return return false if the scroller height cannot be incremented any * more, thus <code>this.lastHeight</code> value has been reached. */ private boolean incHeight(int dY) { int y = scroller.getOffsetHeight(); y += dY; if (y <= lastHeight) { scroller.setHeight("" + y + "px"); return true; } if(hasBeenResized) { scroller.setHeight("" + lastHeight + "px"); } else { scroller.setHeight(""); } return false; }
Optimizing Content Placement and Delivery in Wireless Distributed Cache Systems Through Belief Propagation Wireless distributed cache system (WDCS) is a promising technique to offload traffic and improve energy efficiency in content-centric networks. In this paper, the content sharing problem is considered by minimizing the average energy consumption for content caching and delivering in device-to-device (D2D) enabled cellular networks. The problem is formulated as a joint optimization problem of multiple interactive variables, which is NP-hard. In order to solve this problem, we first decompose the original problem into three subproblems, which are content helper (CH) selection, content caching, and content delivering. Specifically, in CH selection, we propose a PageRank based distributed algorithm to estimate the centrality of mobile users (MUs) by jointly analyzing their social relationships and channel conditions. Based on the selected CHs, the content caching and content delivering are formulated into two weighted matching problems. Two belief propagation (BP) based distributed algorithms are then proposed by considering content popularity and channel states to solve the aforementioned problems. Furthermore, based on the correlation of the three subproblems, a heuristic-based alternating iterative optimization method (HAIOM) is proposed to improve the average energy consumption performance of the content delivering of the WDCS. Simulation results show that by jointly optimizing the CH selection, content caching, and content delivering, the proposed scheme is capable of reducing the average energy consumption and the average content delivery delay, as well as of increasing the caching hit ratio.
# # Copyright (c) 2016, 2019, Oracle and/or its affiliates. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are # permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of # conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of # conditions and the following disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be used to # endorse or promote products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED # OF THE POSSIBILITY OF SUCH DAMAGE. # import re import shutil import mx, mx_benchmark, mx_sulong, mx_buildtools import os import mx_subst from os.path import join, exists from mx_benchmark import VmRegistry, java_vm_registry, Vm, GuestVm, VmBenchmarkSuite def _benchmarksDirectory(): return join(os.path.abspath(join(mx.suite('sulong').dir, os.pardir, os.pardir)), 'sulong-benchmarks') _env_flags = [] if 'CPPFLAGS' in os.environ: _env_flags = os.environ['CPPFLAGS'].split(' ') class SulongBenchmarkRule(mx_benchmark.StdOutRule): def __init__(self, pattern, replacement): super(SulongBenchmarkRule, self).__init__( pattern=pattern, replacement=replacement) def parseResults(self, text): def _parse_results_gen(): for d in super(SulongBenchmarkRule, self).parseResults(text): line = d.pop('line') for iteration, value in enumerate(line.split(',')): r = d.copy() r['score'] = value.strip() r['iteration'] = str(iteration) yield r return (x for x in _parse_results_gen()) class SulongBenchmarkSuite(VmBenchmarkSuite): def __init__(self, *args, **kwargs): super(SulongBenchmarkSuite, self).__init__(*args, **kwargs) self.bench_to_exec = {} def group(self): return 'Graal' def subgroup(self): return 'sulong' def name(self): return 'csuite' def run(self, benchnames, bmSuiteArgs): vm = self.get_vm_registry().get_vm_from_suite_args(bmSuiteArgs) assert isinstance(vm, CExecutionEnvironmentMixin) # compile benchmarks # save current Directory currentDir = os.getcwd() for bench in benchnames: try: # benchmark dir path = self.workingDirectory(benchnames, bmSuiteArgs) # create directory for executable of this vm if os.path.exists(path): shutil.rmtree(path) os.makedirs(path) os.chdir(path) env = os.environ.copy() env['VPATH'] = '..' env = vm.prepare_env(env) out = vm.out_file() cmdline = ['make', '-f', '../Makefile', out] if mx._opts.verbose: # The Makefiles should have logic to disable the @ sign # so that all executed commands are visible. cmdline += ["MX_VERBOSE=y"] mx.run(cmdline, env=env) self.bench_to_exec[bench] = os.path.abspath(out) finally: # reset current Directory os.chdir(currentDir) return super(SulongBenchmarkSuite, self).run(benchnames, bmSuiteArgs) def benchmarkList(self, bmSuiteArgs): benchDir = _benchmarksDirectory() if not exists(benchDir): mx.abort('Benchmarks directory {} is missing'.format(benchDir)) return [f for f in os.listdir(benchDir) if os.path.isdir(join(benchDir, f)) and os.path.isfile(join(join(benchDir, f), 'Makefile'))] def failurePatterns(self): return [ re.compile(r'error:'), re.compile(r'Exception') ] def successPatterns(self): return [re.compile(r'^(### )?([a-zA-Z0-9\.\-_]+): +([0-9]+(?:\.[0-9]+)?)', re.MULTILINE)] def rules(self, out, benchmarks, bmSuiteArgs): return [ SulongBenchmarkRule( r'^first [\d]+ warmup iterations (?P<benchmark>[\S]+):(?P<line>([ ,]+(?:\d+(?:\.\d+)?))+)', { "benchmark": ("<benchmark>", str), "metric.name": "warmup", "metric.type": "numeric", "metric.value": ("<score>", float), "metric.score-function": "id", "metric.better": "lower", "metric.iteration": ("<iteration>", int), }), SulongBenchmarkRule( r'^last [\d]+ iterations (?P<benchmark>[\S]+):(?P<line>([ ,]+(?:\d+(?:\.\d+)?))+)', { "benchmark": ("<benchmark>", str), "metric.name": "time", "metric.type": "numeric", "metric.value": ("<score>", float), "metric.score-function": "id", "metric.better": "lower", "metric.iteration": ("<iteration>", int), }), ] def workingDirectory(self, benchmarks, bmSuiteArgs): if len(benchmarks) != 1: mx.abort( "Please run a specific benchmark (mx benchmark csuite:<benchmark-name>) or all the benchmarks (mx benchmark csuite:*)") vm = self.get_vm_registry().get_vm_from_suite_args(bmSuiteArgs) assert isinstance(vm, CExecutionEnvironmentMixin) return join(_benchmarksDirectory(), benchmarks[0], vm.bin_dir()) def createCommandLineArgs(self, benchmarks, bmSuiteArgs): if len(benchmarks) != 1: mx.abort("Please run a specific benchmark (mx benchmark csuite:<benchmark-name>) or all the benchmarks (mx benchmark csuite:*)") vmArgs = self.vmArgs(bmSuiteArgs) runArgs = self.runArgs(bmSuiteArgs) return vmArgs + [self.bench_to_exec[benchmarks[0]]] + runArgs def get_vm_registry(self): return native_vm_registry class CExecutionEnvironmentMixin(object): def out_file(self): return 'bench' def bin_dir(self): return '{}-{}'.format(self.name(), self.config_name()) def prepare_env(self, env): return env class GccLikeVm(CExecutionEnvironmentMixin, Vm): def __init__(self, config_name, options): self._config_name = config_name self.options = options def config_name(self): return self._config_name def c_compiler(self): return self.compiler_name() def cpp_compiler(self): return self.compiler_name() + "++" def compiler_name(self): mx.nyi('compiler_name', self) def c_compiler_exe(self): mx.nyi('c_compiler_exe', self) def run(self, cwd, args): myStdOut = mx.OutputCapture() retCode = mx.run(args, out=mx.TeeOutputCapture(myStdOut), cwd=cwd) return [retCode, myStdOut.data] def prepare_env(self, env): env['CFLAGS'] = ' '.join(self.options + _env_flags + ['-lm', '-lgmp']) env['CC'] = self.c_compiler_exe() return env class GccVm(GccLikeVm): def __init__(self, config_name, options): super(GccVm, self).__init__(config_name, options) def name(self): return "gcc" def compiler_name(self): return "gcc" def c_compiler_exe(self): return "gcc" class ClangVm(GccLikeVm): def __init__(self, config_name, options): super(ClangVm, self).__init__(config_name, options) def name(self): return "clang" def compiler_name(self): mx_sulong.ensureLLVMBinariesExist() return mx_sulong.findLLVMProgram(mx_buildtools.ClangCompiler.CLANG) def c_compiler_exe(self): return mx_buildtools.ClangCompiler.CLANG class SulongVm(CExecutionEnvironmentMixin, GuestVm): def config_name(self): return "default" def name(self): return "sulong" def run(self, cwd, args): bench_file = args[-1] launcher_args = self.launcher_args(args[:-1]) + [bench_file] if hasattr(self.host_vm(), 'run_launcher'): result = self.host_vm().run_launcher('lli', launcher_args, cwd) else: def _filter_properties(args): props = [] remaining_args = [] vm_prefix = "--vm.D" for arg in args: if arg.startswith(vm_prefix): props.append('-D' + arg[len(vm_prefix):]) else: remaining_args.append(arg) return props, remaining_args props, launcher_args = _filter_properties(launcher_args) sulongCmdLine = self.launcher_vm_args() + \ props + \ ['-XX:-UseJVMCIClassLoader', "com.oracle.truffle.llvm.launcher.LLVMLauncher"] result = self.host_vm().run(cwd, sulongCmdLine + launcher_args) return result def prepare_env(self, env): env['CFLAGS'] = ' '.join(_env_flags + ['-lm', '-lgmp']) env['LLVM_COMPILER'] = mx_buildtools.ClangCompiler.CLANG env['CLANG'] = mx_buildtools.ClangCompiler.CLANG env['OPT_FLAGS'] = ' '.join(self.opt_phases()) return env def out_file(self): return 'bench.opt.bc' def opt_phases(self): return [ '-mem2reg', '-globalopt', '-simplifycfg', '-constprop', '-instcombine', '-dse', '-loop-simplify', '-reassociate', '-licm', '-gvn', ] def launcher_vm_args(self): return mx_sulong.getClasspathOptions() + \ [mx_subst.path_substitutions.substitute('-Dpolyglot.llvm.libraryPath=<path:SULONG_LIBS>')] def launcher_args(self, args): launcher_args = [ '--vm.Dgraal.TruffleInliningMaxCallerSize=10000', '--vm.Dgraal.TruffleCompilationExceptionsAreFatal=true', '--llvm.libraries=libgmp.so.10'] + args return launcher_args def hosting_registry(self): return java_vm_registry _suite = mx.suite("sulong") native_vm_registry = VmRegistry("Native", known_host_registries=[java_vm_registry]) native_vm_registry.add_vm(GccVm('O0', ['-O0']), _suite) native_vm_registry.add_vm(ClangVm('O0', ['-O0']), _suite) native_vm_registry.add_vm(GccVm('O1', ['-O1']), _suite) native_vm_registry.add_vm(ClangVm('O1', ['-O1']), _suite) native_vm_registry.add_vm(GccVm('O2', ['-O2']), _suite) native_vm_registry.add_vm(ClangVm('O2', ['-O2']), _suite) native_vm_registry.add_vm(GccVm('O3', ['-O3']), _suite) native_vm_registry.add_vm(ClangVm('O3', ['-O3']), _suite) native_vm_registry.add_vm(SulongVm(), _suite, 10)
/** * Saves the current stage, and returns true if the validation and save was successful * @return true if the validation and save was successful */ private boolean saveStage() { if(validate()) { Stage newStage; if (isEditMode) { newStage = stage; } else { newStage = new Stage(); } newStage.setStageName(stageNameEditText.getText().toString().replaceFirst("^ *", "")); if (yesComplete.isChecked()) newStage.setCompleted(true); else newStage.setCompleted(false); if (yesWaiting.isChecked()) newStage.setWaitingForResponse(true); else newStage.setWaitingForResponse(false); if (yesSuccessful.isChecked()) newStage.setSuccessful(true); else newStage.setSuccessful(false); if (deadlineDateEditText.getText().toString().contains("/")) newStage.setDateOfDeadline(deadlineDateEditText.getText().toString()); else newStage.setDateOfDeadline(null); if (startDateEditText.getText().toString().contains("/")) newStage.setDateOfStart(startDateEditText.getText().toString()); else newStage.setDateOfStart(null); if (completionDateEditText.getText().toString().contains("/")) newStage.setDateOfCompletion(completionDateEditText.getText().toString()); else newStage.setDateOfCompletion(null); if (replyDateEditText.getText().toString().contains("/")) newStage.setDateOfReply(replyDateEditText.getText().toString()); else newStage.setDateOfReply(null); newStage.setNotes(notesStageEditText.getText().toString().replaceFirst("^ *", "")); if (isEditMode) { mDataSource.updateStage(newStage); } else { mDataSource.createStage(newStage, parentApplicationID); } Intent intent = new Intent(getApplicationContext(), StageInformationActivity.class); intent.putExtra(StageTable.COLUMN_ID, newStage.getStageID()); intent.putExtra(ApplicationTable.APPLICATION_ID, parentApplicationID); intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP); startActivity(intent); return true; } else { Toast.makeText(this, getResources().getString(R.string.pleaseFillForm), Toast.LENGTH_SHORT).show(); return false; } }
Outcome after surgical intervention in children with chronic inflammatory bowel disease. A retrospective review of 32 children and adolescents (18 males and 14 females) with chronic inflammatory bowel disease (CIBD) requiring surgery was undertaken. These patients were followed between 1979 and 1992. Their age range was from 4 to 17 years at the time of diagnosis (mean age, 11 years). The interval between the time of diagnosis and surgery ranged from 2 months to 11 years (mean, 3.7 years). Ten patients had ulcerative colitis and 22 had Crohn's disease. These patients represented 12 per cent of patients with CIBD seen at the Gastroenterology Clinic of Children's Hospital of Michigan during these 13 years. Indications for surgery included failure of medical treatment (seven patients), localized disease with significant side effects of therapy (nine), partial or complete obstruction (five), growth retardation (six), perforation (two), abscess and fistula (three). The extent of disease was as follows: panenteric, 2 patients; enteric, 2 patients; ileocecal, 15 patients; and colonic, 13 patients. In 15 patients (47%) surgery led to complete relief of symptoms for a minimum of 1 year after surgery. Seven patients (22%) had recurrence of symptoms that were controlled by medical treatment. Two patients required a second surgery and additional medical and nutritional treatment. All six patients having surgery for growth retardation showed catch-up growth in weight and height. We conclude that surgery can decrease morbidity and improve quality of life in CIBD patients. Best results are obtained in patients with localized disease.
The development of HR support for alternative international assignments. From liminal position to institutional support for short-term assignments, international business travel and virtual assignments PurposeThe purpose of this paper is to explain how and why HR practitioners perceive the need to develop international HRM practices to support short-term assignments, international business travel and virtual assignments for internationally operating organizations.Design/methodology/approachThe authors interviewed 29 HR practitioners from multinationals located in the Netherlands.FindingsAlternative international assignments seem not to belong to the traditional expatriate jobs, nor to regular domestic jobs and show a liminal character. However, over the last few years we have gradually seen a more mature classification of the Short-term Assignment, International Business Traveler and Virtual Assignment categories and more active use of these categories in policymaking by organizations; this reflects a transition of these three categories from a liminal position to a more institutionalized position.Research limitations/implicationsFor this research, only international HRM practitioners were interviewed. Future studies should include a broader group of stakeholders.Practical implicationsInternational HRM departments should take a more proactive role regarding alternative forms of international assignees. Furthermore, HR professionals may develop training and coaching and consider rewards and benefits that could provide allowances for specific working conditions that are part of international work.Originality/valueThis study is among the first to relate the framework of institutional logic and liminality to explain the why of HR support for alternative international assignees.
<filename>BearEngineFramework/src/sandbox.cpp #include<iostream> #include<window\window.h> #include<core\bounding_box.h> #include<core\vector2.h> #include<graphics\graphics.h> #include<graphics/renderers/particle_source.h> #include<graphics/renderers/renderer.h> #include<memory/resource_manager.h> #include<core/random.h> #include<graphics/animated_sprite.h> #include<math.h> using namespace bear; constexpr auto WIDTH = 1920; constexpr auto HEIGHT = 1080; #define CREATE_TEXTURE(name, path, format) bear::ResourceManager::Instance()->CreateTexture(name, path, format) int main() { bear::window::Window myWindow(WIDTH, HEIGHT, "Let's go", true); myWindow.setVSync(true); if (!graphics::Graphics::init(WIDTH, HEIGHT)) std::cout << "Graphics failed to init send help\n"; // Create the framebuffer shader graphics::Shader* fbShader = ResourceManager::Instance()->CreateShaderFromFile("fbShader", "shaders\\fb_vertex.txt", "shaders\\fb_fragment2.txt", ""); fbShader->enable(); fbShader->setUniformInteger("texFramebuffer", 0); // Create the framebuffer graphics::Framebuffer* fb1 = new graphics::Framebuffer(WIDTH, HEIGHT); fb1->setShader("fbShader"); // Create the batch renderer //graphics::BatchRenderer _renderer; //_renderer.init(); graphics::View view; view.setPosition({ 500,500 }); core::Vector2f _x = { -500,-500 }; graphics::Renderer slow_fuck; slow_fuck.init(); //slow_fuck.setFramebuffer(fb1); // Create the particle renderer //graphics::ParticleSource pr; //pr.init(); CREATE_TEXTURE("fire", "shaders\\fire.png", graphics::image_format::RGBA); CREATE_TEXTURE("cat", "shaders\\cat.png", graphics::image_format::RGBA); CREATE_TEXTURE("floor", "shaders\\floor.png", graphics::image_format::RGBA); std::vector<graphics::Renderable> renderable_list; const int SIZE = 32; for (int i = 0; i <= 24; i++) { for (int j = 0; j <= 24; j++) { renderable_list.push_back(graphics::Renderable()); renderable_list.back().m_TextureName = "floor"; renderable_list.back().m_Transform.m_Size = core::Vector2f(SIZE, SIZE); renderable_list.back().m_Transform.m_Position = core::Vector2f(i*SIZE, j*SIZE); } } graphics::Renderable renderable; renderable.m_Layer = 10; renderable.setTextureNameWData("fire"); graphics::Renderable cat; cat.m_Layer = 11; cat.setTextureNameWData("cat"); core::Clock fpsTimer; unsigned int fps = 0; fpsTimer.reset(); fpsTimer.start(); int counter = 0; while (myWindow.isOpen()) { counter++; if (fpsTimer.getTicks() >= 1000) { fpsTimer.reset(); std::cout << "Measured FPS: " << fps << std::endl; fps = 0; } float dt = myWindow.getDeltaTime(); // Get the delta time for the last frame for (Event event : myWindow.getRegisteredEvents()) { // Process the events here if (event.type == EventType::WindowReiszed) { graphics::Graphics::window_resize_callback(event.size.x, event.size.y); //graphics::Graphics::set_uniform_size(event.size.x, event.size.y); float temp = (WIDTH / event.size.x); graphics::Graphics::set_zoom(temp); } } if (myWindow.isKeyDown(Key::D)) //_x.x -= 1 * dt; view.translate(core::Vector2f(-1 * dt, 0)); if (myWindow.isKeyDown(Key::A)) //_x.x += 1 * dt; view.translate(core::Vector2f(1 * dt, 0)); if (myWindow.isKeyDown(Key::S)) //_x.y += 1 * dt; view.translate(core::Vector2f(0, -1 * dt)); if (myWindow.isKeyDown(Key::W)) //_x.y -= 1 * dt; view.translate(core::Vector2f(0, 1 * dt)); if (myWindow.isKeyDown(Key::X)) graphics::Graphics::set_zoom(0.5f); if (myWindow.isKeyDown(Key::Z)) graphics::Graphics::set_zoom(1.0f); if (myWindow.isKeyDown(Key::C)) graphics::Graphics::set_zoom(1.5f); counter += 1; float mouse_x = myWindow.getMousePosition().x; mouse_x /= WIDTH; fbShader->setUniformFloat("vx_offset", mouse_x); //pr.update(dt); // =================================== RENDERING BEGINS HERE ===========================================0 myWindow.clear(); // Here is where the window is cleared and we can now render to the fresh window // Rendering begin //_renderer.begin(); core::Vector2f pos = view.getPosition(); pos.lerp(_x, 0.000005*dt); pos.x = round(pos.x); pos.y = round(pos.y); view.setPosition(pos); // Slow rendering BITCH slow_fuck.begin(); //slow_fuck.submit(renderable); //slow_fuck.submit(cat); for (graphics::Renderable &r : renderable_list) { slow_fuck.submit(r); } slow_fuck.flush(view); myWindow.display(); fps++; } graphics::Graphics::exit(); return 0; }
Shared Mental Models and Shared Displays: An Empirical Evaluation of Team Performance This study experimentally tested the use of shared mental models and shared displays as a means of enhancing team situation awareness (SA). Teams were tested using a simulation that incorporated features of a distributed team architecture. As hypothesized, the presence of shared displays and shared mental models improved team performance. However, the mechanism whereby the shared displays aided performance was not direct as expected. Teams were slower when given a shared display initially. After the shared display was removed, performance exceeded all other conditions, however. The combination of non-shared displays and no mental model was highly detrimental to performance. Teams who experienced this condition first were unable to ever develop very good performance. Overall, we found that effective team performance could be enhanced by providing teams with sufficient information to build a shared mental model of each other's tasks and goals, either through direct instruction, or through provision of shared displays. It is believed that the shared displays helped to build shared mental models which boosted later task performance.
// Copyright 2022 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.skyframe.serialization; import com.google.common.collect.ImmutableSet; import com.google.devtools.build.lib.skyframe.IgnoredPackagePrefixesValue; import com.google.devtools.build.lib.skyframe.serialization.testutils.SerializationTester; import com.google.devtools.build.lib.vfs.PathFragment; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** Tests for IgnoredPackagePrefixesValueCodec. */ @RunWith(JUnit4.class) public class IgnoredPackagePrefixesValueCodecTest { @Test public void testCodec() throws Exception { new SerializationTester( IgnoredPackagePrefixesValue.of(ImmutableSet.<PathFragment>of()), IgnoredPackagePrefixesValue.of(ImmutableSet.of(PathFragment.create("/foo"))), IgnoredPackagePrefixesValue.of( ImmutableSet.of(PathFragment.create("/foo"), PathFragment.create("/bar/moo")))) .runTests(); } }
/** * Save a page to the disk * @param pagePath: the path to save the file */ public void saveData(String pagePath) throws FileNotFoundException { PrintWriter printWriter = new PrintWriter(new FileOutputStream(pagePath), true); for (int i = 0; i < current; i++) { String[] record = data[i]; printWriter.println(String.join(",", record) + "," + deleted[i]); } printWriter.close(); }
As England breeze past Czech Republic in five-goal thrashing, here are four things to take from the opening Euro 2020 qualifier. Before England’s brilliant victory in Spain last October, Sterling had scored two goals in his first 45 appearances for his country. But a brilliant hat-trick made it five in three games, having also netted twice in Seville, and he went off to a deserved standing ovation. The forward also won the penalty from which Harry Kane scored. Sterling is a genuine candidate for this season’s Player of the Year award thanks to his Manchester City performances and he is transferring that brilliant form to England. Gareth Southgate has always known he had a world-class player on his hands in the form of Sterling and the rest of Europe will be worried to see that he is now bringing out the best of him on the international scene at the start of Euro 2020 qualifying. Now aged 24, Sterling looks more confident than ever for his country and he nearly had a hand in another goal when he ran at the Czech defence before releasing the ball to Kane, whose cross should have been turned in by Jadon Sancho. Despite several good performances, there was only one candidate for the man of the match award and that is hugely encouraging. The way in which he celebrated his hat-trick also underlined how much it all meant to Sterling. It was striking that there was not a single Manchester United player in England’s matchday squad at Wembley. Marcus Rashford and Luke Shaw had pulled out through injury, while Jesse Lingard had told Gareth Southgate he would not be fit enough for the Czech Republic and Montenegro games before the midfielder played against Wolverhampton Wanderers in the FA Cup. It would have been unthinkable to contemplate an England squad with no United players not so long ago, but that is no longer the case. All three United players will be part of Southgate’s thoughts and plans for this summer’s Nations League, but Lingard in particular may now face a battle to get his starting place back. Southgate had admitted he was surprised to see the 26-year-old face Wolves and spoke to him afterwards to clarify the situation. Regardless of whether he was satisfied with the explanation, Lingard must somehow convince the England manager he is worthy of a place ahead of the likes of Jadon Sancho, Dele Alli and Ross Barkley. That is no easy task. James Milner might give him a run for his money, but Harry Kane must be the best penalty taker in English football right now. When he puts the ball on the spot, he simply does not miss and that was the case again at Wembley. The penalty he scored at the end of the first half against Czech Republic was the sixth he has hit the net with in succession for England. Kane’s performance was also notable for the way he dropped deep to also create opportunities for his team-mates. It was from the captain’s superb pass to split the Czech defence that Jadon Sancho got free on the right and set up Sterling to score. He also put the ball across from which Sancho squandered an excellent chance. Like Tottenham Hotspur, England’s biggest worry must be how they will ever cope if Kane gets injured at a major tournament. But, also like Spurs, the Three Lions must feel incredibly grateful to have one of the best striker’s in the world to count on. It’s been a whirlwind few weeks for Declan Rice, but he can now officially call himself an England player after making his debut as a 63rd-minute replacement for Dele Alli. Any apprehension that an old social media post referencing the IRA would be held against him by the Wembley crowd would have been quickly calmed by the reception he received when he first warmed up down the touchline. Ross Barkley went on ahead of him when Eric Dier was forced off early through injury, but Rice’s time came with just under half-an-hour to go and, again, he was applauded by the England supporters. With Dier surely now a doubt to face Montenegro in Podgorica on Monday night, Rice may well be handed a first England start.
Proteomic Analysis Reveals Aberrant O-GlcNAcylation of Extracellular Proteins from Breast Cancer Cell Secretion. BACKGROUND O-GlcNAcylation is a unique intracellular protein modification; however, few extracellular O-GlcNAc-modified proteins have been discovered. We have previously demonstrated that many cellular proteins were aberrant in O-GlcNAcylation in breast cancer tissues. In the present study, therefore, we investigated whether O-GlcNAc-modified proteins were abnormally secreted from breast cancer cells. MATERIALS AND METHODS Intracellular and extracellular proteins were prepared from cell lysates of breast cancer cells (MCF-7 and MDA-MB-231) and normal breast cells (HMEC) and from their serum-free media (SFM), respectively. O-GlcNAcylation level was examined by immunoblotting. O-GlcNAc-Modified proteins were identified using two-dimensional gel electrophoresis and Liquid Chromatography-tandem Mass Spectrometry. RESULTS O-GlcNAcylation level was significantly increased in the extracellular compartment of both types of cancer cells compared to normal cells. Interestingly, O-GlcNAc patterns differed between intracellular and extracellular proteins. Proteomic analysis revealed that many O-GlcNAc spots in MCF-7 secretions were abnormally increased in comparison to those in HMEC secretions. Among these, transitional endoplasmic reticulum ATPase (TER ATPase) and heat-shock 70 kDa (HSP70) were confirmed to be O-GlcNAc-modified. The levels of O-GlcNAc-HSP70 and O-GlcNAc-TER ATPase were higher in SFM from MCF-7 cells than in that from HMEC. CONCLUSION O-GlcNAcomic study of the extracellular compartments reveals aberrant O-GlcNAc-secreted proteins, which may be of interest as potential biomarkers in breast cancer.
GEMC1 is a critical regulator of multiciliated cell differentiation The generation of multiciliated cells (MCCs) is required for the proper function of many tissues, including the respiratory tract, brain, and germline. Defects in MCC development have been demonstrated to cause a subclass of mucociliary clearance disorders termed reduced generation of multiple motile cilia (RGMC). To date, only two genes, Multicilin (MCIDAS) and cyclin O (CCNO) have been identified in this disorder in humans. Here, we describe mice lacking GEMC1 (GMNC), a protein with a similar domain organization as Multicilin that has been implicated in DNA replication control. We have found that GEMC1deficient mice are growth impaired, develop hydrocephaly with a high penetrance, and are infertile, due to defects in the formation of MCCs in the brain, respiratory tract, and germline. Our data demonstrate that GEMC1 is a critical regulator of MCC differentiation and a candidate gene for human RGMC or related disorders.
<gh_stars>0 import { Component, OnInit } from '@angular/core'; import { AccountService } from '../_service'; import { User, Report } from '../_models'; import { MatTableDataSource } from '@angular/material/table'; import { SelectionModel } from '@angular/cdk/collections'; import { ActivatedRoute, Router } from '@angular/router'; let user = JSON.parse(localStorage.getItem('user')) || []; @Component({ selector: 'app-report', templateUrl: 'user-report-list.component.html', styleUrls: ['./home.component.scss'] }) export class UserReportListComponent implements OnInit { createReport: boolean = false; displayedReport: string[] = ['id', 'username', 'details', 'date'] displayedReport2: string[] = ['id','username','details','date','approval'] report: string; loading: boolean = false; loadingMyReport:boolean=false; selection = new SelectionModel<Report>(true, []); dataSource: MatTableDataSource<Report>; dataSource2:MatTableDataSource<Report>; constructor( private accountService: AccountService, private route: ActivatedRoute, private router: Router, ) { } ngOnInit() { this.getReport(); this.getMyReport(); } createNewReport() { this.createReport = true; } getReport() { this.accountService.getApproveList().subscribe( res => { console.log(res); this.dataSource = new MatTableDataSource(res); this.loading=true; } ) } //get my report based on username getMyReport(){ this.accountService.getMyReportList(user.id).subscribe( res=>{ console.log(res); this.dataSource2 = new MatTableDataSource(res); this.loadingMyReport= true; } ) } newreport(){ this.router.navigate(['home/report']) } submitNewReport() { console.log(this.accountService.getUser.username) this.accountService.submitReport(this.accountService.getUser.username, this.report) .subscribe( data => { console.log(data); }, error => { } ) console.log("submit!") } clear_data(){ this.selection.clear() // this.selectedDataList.length=0; } selectRow(row) { console.log(row); this.router.navigate(['home/reportDetails', {data:row.id}]) } }
John Roberts PAUL J. RICHARDS CBS News Poll analysis by the CBS News Polling Unit: Sarah Dutton, Jennifer De Pinto, Fred Backus and Anthony Salvanto. (CBS News) Most Americans are unfamiliar with Supreme Court Chief Justice John Roberts or have no opinion of him. However, in the wake of Roberts' decision to side with liberals on the court and uphold President Obama's health care law, a new CBS News/ New York Times poll shows that the conservative justice is more popular among liberals than conservatives. Among Americans overall, 73 percent have no opinion or are undecided about Roberts, according to the poll, conducted July 11-16. Fourteen percent have a favorable view of him while 12 percent have an unfavorable view. Among self-identified conservatives, just nine percent have a favorable view of Roberts, who was appointed to the high court by President George W. Bush. Twice as many, 18 percent, have an unfavorable view. Among liberals, 13 percent have a positive view of Roberts while 8 percent have an unfavorable view. Twenty percent of moderates approve of the chief justice. CBS Overall views of the Supreme Court have declined slightly in the aftermath of the health care ruling. Among Americans overall, voters are now split, with 41 percent who approve of the court and 41 percent who disapprove. Another 18 percent have no opinion or don't know. In May of this year, 44 percent approved while 36 percent disapprove. Among Republicans and conservatives, more now disapprove of the job the Supreme Court is doing (52 percent) than did so before the landmark ruling (35 percent). The views of Democrats and independents changed little. The public is divided on the Supreme Court decision itself: 46 percent think the decision to uphold most of the law was a good thing, while 41 percent think it was bad. While views on the decision may be mixed, more than half of Americans (53 percent) think the court's decision was based mostly on personal and political views, rather than legal analysis. Majorities of Republicans and independents hold this view, but Democrats are split. Still, the court's ruling does not appear to have changed overall opinions of the health care law very much: more still disapprove than approve of the law. Overall, 50 percent of voters said they disapprove of the law, 36 percent approve. As to what Congress should do about the health care law, a third of Americans say it should be kept as is or expanded, 27 percent think only the individual mandate should be repealed, while 34 percent would like Congress to repeal the entire law. CBS Politically, 55 percent of voters say the court's ruling will make no difference in their vote for president this November. Among those for whom it matters, 28 percent say the ruling makes them more likely to vote for Romney, while 13 percent say it makes them more likely to back Mr. Obama. The issue of the court's ruling on health care is more of a motivator for Romney supporters than it is for those who are backing the president. When it comes to another policy debate - whether or not to extend President Bush's 2001 and 2003 tax cuts - the poll found that 49 percent of Americans agree with Mr. Obama that they should be extended for households earning less than $250,000. Another 27 percent would like to see the tax cuts extended for everyone, while 17 percent want them to expire for all. Americans' views were largely the same when CBS News asked this question in a November 2010 poll. There are differences by party, with most Democrats and nearly half of independents supporting the president's proposal, while a plurality of Republicans want the tax cuts continued for everyone. MORE FROM THE POLL: Most say Romney policies favor the rich Most say Obama policies contributed to downturn Obama, Romney in dead heat in presidential race VP choice matters to most voters Read The Complete Poll Phone numbers were dialed from samples of both standard land-line and cell phones. The error due to sampling for results based on the entire sample could be plus or minus three percentage points. The error for subgroups may be higher. This poll release conforms to the Standards of Disclosure of the National Council on Public Polls.
Internet Explorer 2 History IE replicated many of the quirks of Netscape Navigator, and allowed importing bookmarks from it. In May 1996 FTP Software announced it was providing Microsoft with various technology for Internet Explorer 2.0 for Windows 3.1, including a PPP network, 16-bit email client, and other technology. Availability Internet Explorer version 2 was released in beta in October 1995, only 2 months after version 1 came out in Microsoft Plus! for 95 that August. It was released for Windows 95 and Windows NT 3.5 in November 1995 and was bundled with NT 4.0 in July 1996. The Beta for Mac on PowerPC came out in January, and the finalized version in April for 68k and PowerPC. Version 2 was also the first release for Windows 3.1 and Macintosh System 7.0.1(PPC or 68k), although the Mac version was not released until January 1996 for PPC, and April for 68k. Version 2 was included in Windows 95 OEM Service Release (OSR 1) and Microsoft's Internet Starter Kit for Windows 95 in early 1996. It launched with twelve languages, including English, but this expanded to 24, 20, and 9 for Win 95, Win 3.1 and Mac respectively by April 1996. The 2.0i version supported double-byte character-sets for supporting Chinese, Japanese or Korean characters in web pages. Version 2.1 for the Mac came out in August 1996, the same month version 2 for Windows was superseded by Microsoft Internet Explorer 3, which was heavily changed from version 2. There were 16-bit and 32-bit versions depending on the OS. Mac version The Mac version, especially version 2.1, was praised for being economic with resources and for new features. Internet Explorer supported the embedding of a number of multimedia formats into web pages, including AVI and QuickTime formatted video and AIFF, MIDI and WAV formatted audio. The non-beta final version was released three months later on April 23, 1996. Version 2.1 fixed bugs and improving stability, but also added a few features such as support for the NPAPI (the first version of Internet Explorer on any platform to do so) and support for QuickTime VR. AOL 3.0 for Macintosh used the IE 2.1 rendering engine in its built-in web browser. The various 16 and 32 bit versions largely depended on the OS although Windows NT would use the 16 bit versions for Windows 3.1. Netscape has enjoyed a virtual monopoly of the browser market (about 90% according to some estimates), and this has allowed it to consolidate its position still further by introducing unofficial or 'extended' HTML tags. As a result, the Web is littered with pages that only work effectively if viewed in Navigator. By the time other browsers catch up, Netscape has made even more additions. — Jack Weber, MacUser (1996)
Kwaku Aning Early life and education Aning was born in 1946 in Ghana. He was educated at Accra Academy, a high school in Accra, Ghana. Aning completed a bachelor's degree in Mechanical Engineering in 1968 from the University of Science and Technology, graduating with first class honours, as one of the first four persons to obtain first class honours in the Engineering Sciences from the University included in this accomplishment was his Accra Academy mate, Samuel Gyasi who later became a Professor at University of California, Berkeley. That same year, he entered Princeton University receiving a Masters in Solid State Physics in 1971. Aning obtained a doctoral degree in metallurgy from Columbia University in 1976. Career Aning started his working career as a Technical Advisor to the UN Conference on Science and Technology for Development. In January 1980, Aning became a Senior Scientific Affairs Officer of the UN Centre for Science and Technology, remaining in this position for the next twelve years. He worked in this role which had a special focus on science and technology development of developing countries. Aning moved on to the UN Peacekeeping Department, working as a Regional Election Officer for the period between March 1992 to June 1994 . Aning was made a Senior Officer, Office of the Secretary-General later on within August 1998 and January 2000. In February 2000, Kwaku Aning joined the International Atomic Energy Agency as the Representative of the Director General of the agency to the United Nations in New York. Aning is the Chairman of the Nuclear Power Institute, GAEC. Aning is also Chairman of the Governing Board, Ghana Atomic Energy Commission. Family and personal life Aning married Arafua Apaloo-Aning, a political activist and keen gardener
package com.yahoo.research.robme.anthelion.framework; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.Date; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.PriorityQueue; import java.util.Properties; import java.util.Queue; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ConcurrentHashMap; import com.yahoo.research.robme.anthelion.models.AnthHost; import com.yahoo.research.robme.anthelion.models.AnthURL; import com.yahoo.research.robme.anthelion.models.ClassificationMode; import com.yahoo.research.robme.anthelion.models.banditfunction.DomainValueFunction; /** * Main class of the Anthelion module. This class includes all the necessary * components to manipulate an incoming {@link AnthURL} list, reorder the list * and push it to a given {@link AnthURL} output list. Within the process, the * URLs will be attached to the hosts and a bandit approach will always select * the next host, URLs should be crawled from. Within the Domain-Entity a * {@link PriorityQueue} is filled with URLs based on the score, calculated by * the {@link AnthOnlineClassifier}. * * @author <NAME> (<EMAIL>) * */ public class AnthProcessor { private BufferedWriter logFileWriter; private boolean writeLog; protected Queue<AnthURL> inputList; protected Queue<AnthURL> outputList; private boolean useStaticClassifier = false; protected ConcurrentHashMap<String, AnthHost> knownDomains = new ConcurrentHashMap<String, AnthHost>(); private Thread banditThread; private Thread pusherThread; private Thread pullerThread; private Thread statusThread; private UrlPusher pusher; private UrlPuller puller; private StatusPrinter statusViewer; private AnthBandit bandit; protected AnthOnlineClassifier onlineLearner; protected ArrayBlockingQueue<AnthHost> queuedDomains; public AnthProcessor(Queue<AnthURL> inputList, Queue<AnthURL> outputList, Properties prop) { new AnthProcessor(inputList, outputList, prop, null); } /** * Use to initialize crawler or add additional feedback. Will immediately * start the learning process. * * @param list * of {@link AnthURL}s which should be used to learn. */ public void initiateClassifier(List<AnthURL> list) { onlineLearner.initialize(list); } public AnthProcessor(Queue<AnthURL> inputList, Queue<AnthURL> outputList, Properties prop, String logFile) { if (prop.getProperty("classifier.static") != null) { useStaticClassifier = Boolean.parseBoolean(prop .getProperty("classifier.static")); } if (prop.get("bandit.lambdadecay") == null) { bandit = new AnthBandit(Double.parseDouble(prop .getProperty("bandit.lambda")), Integer.parseInt(prop .getProperty("domain.known.min")), Integer.parseInt(prop .getProperty("domain.queue.offertime")), this); } else { bandit = new AnthBandit( Double.parseDouble(prop.getProperty("bandit.lambda")), Integer.parseInt(prop.getProperty("domain.known.min")), Integer.parseInt(prop.getProperty("domain.queue.offertime")), this, Boolean.parseBoolean(prop .getProperty("bandit.lambdadecay")), Integer .parseInt(prop .getProperty("bandit.lambdadecayvalue"))); } pusher = new UrlPusher(this, ClassificationMode.valueOf(prop .getProperty("classifier.mode")) == ClassificationMode.OnDemand); try { puller = new UrlPuller( this, Integer.parseInt(prop.getProperty("inputlist.size")), ClassificationMode.valueOf(prop .getProperty("classifier.mode")) == ClassificationMode.OnExplore, (DomainValueFunction) Class.forName( prop.getProperty("domain.value.function")) .newInstance()); } catch (NumberFormatException | InstantiationException | IllegalAccessException | ClassNotFoundException e1) { System.out.println("Could not find HostValueFunction"); e1.printStackTrace(); System.exit(0); } if (logFile != null) { try { this.logFileWriter = new BufferedWriter(new FileWriter( new File(logFile))); writeLog = true; } catch (IOException e) { System.out.println("Could not create logWriter"); writeLog = false; } } else { // just for readability writeLog = false; } statusViewer = new StatusPrinter(); // setting references this.inputList = inputList; this.outputList = outputList; // initializing queuedDomains = new ArrayBlockingQueue<AnthHost>(Integer.parseInt(prop .getProperty("domain.queue.size"))); banditThread = new Thread(bandit, "Anthelion Bandit"); pusherThread = new Thread(pusher, "Anthelion URL Pusher"); pullerThread = new Thread(puller, "Anthelion URL Puller"); statusThread = new Thread(statusViewer, "Anthelion Status Viewer"); onlineLearner = new AnthOnlineClassifier( prop.getProperty("classifier.name"), prop.getProperty("classifier.options"), Integer.parseInt(prop.getProperty("classifier.hashtricksize")), Integer.parseInt(prop.getProperty("classifier.learn.batchsize"))); } /** * Start the threads. */ public void start() { statusThread.start(); System.out.println("Started Status Viewer"); pullerThread.start(); System.out.println("Started Url Puller"); banditThread.start(); System.out.println("Started Bandit"); pusherThread.start(); System.out.println("Started Url Pusher"); } /** * Stop the threads. */ public void stop() { puller.switchOf(); System.out.println("Switched of Url Puller"); bandit.switchOf(); System.out.println("Switched of Bandit"); pusher.switchOf(); System.out.println("Switched of Url Pusher"); statusViewer.switchOf(); System.out.println("Switched of Status Viewer"); } /** * Check the status of the threads. */ public void getStatus() { System.out.println("Status Url Puller: " + pullerThread.getState()); System.out.println("Status Bandit: " + banditThread.getState()); System.out.println("Status Url Pusher: " + pusherThread.getState()); } /** * Switch of the URL Puller to empty the internal queue. */ public void runEmpty() { puller.switchOf(); System.out.println("Switched of Url Puller"); } /** * Push crawled feedback back into the system. * * @param url * URL String - as we assume the data structure used by the * crawler is different than the internal data structure. * @param sem * if the URL included structured data or not. * @throws URISyntaxException * if the URL String is not valid based on the {@link URI} * specifications. */ public void addFeedback(String url, boolean sem) throws URISyntaxException { // we need to give feedback to the classifier and the domains itself URI uri = new URI(url); addFeedback(uri, sem); } public void addFeedback(URI uri, boolean sem) { AnthHost domain = knownDomains.get((uri.getHost() != null ? uri .getHost() : AnthURL.UNKNOWNHOST)); if (domain == null) { return; } AnthURL aurl = domain.returnFeedback(uri, sem); if (aurl == null) { // there is one way this could happen - if the URL was two times in // the process. If the time between the two appearance is great // enough Anthelion will also push the URL multiple times as // feedback. return; } // learn / update classifier if not static if (!useStaticClassifier) { onlineLearner.pushFeedback(aurl); } } private class StatusPrinter implements Runnable { public StatusPrinter() { if (writeLog) { writeLogFileHeader(); } } protected boolean run; public void switchOf() { run = false; } private void writeLogFileHeader() { if (writeLog) { String header = "LogTime\tInputListSize\tUrlsPulled\tGoodUrlsPulled\tOutputListSize\tPushedUrls\tPusherProcessingTime\tPushedGoodUrls\tPushedBadUrls\tKnownDomains\tDomainsInQueue\tReadyUrls\tReadyDomains\tUrlsWaitingForFeedback\tBanditProcessingTime\tBanditArmsPulled\tBanditGoodArmsPulled\tBanditBadArmsPulled\tClassifierRight\tClassifierTotal\tClassifiedRightsPushed\n"; try { logFileWriter.write(header); logFileWriter.flush(); } catch (IOException e) { System.out.println("LogFile is not ready to be written."); writeLog = false; } } else { System.out.println("LogFile is not ready to be written."); } } @Override public void run() { run = true; while (run) { Iterator<Map.Entry<String, AnthHost>> iter = knownDomains .entrySet().iterator(); int rdyDomains = 0; int readyUrls = 0; int feedback = 0; int processed = 0; int right = 0; int banditGood = 0; int banditBad = 0; while (iter.hasNext()) { Map.Entry<String, AnthHost> pairs = (Map.Entry<String, AnthHost>) iter .next(); if (pairs.getValue().rdyToEnqueue()) { rdyDomains++; } readyUrls += pairs.getValue().getReadyUrlsSize(); feedback += pairs.getValue().getAwaitingFeedbackSize(); processed += pairs.getValue().visitedUrls; right += pairs.getValue().predictedRight; banditGood += pairs.getValue().goodUrls; banditBad += pairs.getValue().badUrls; } StringBuilder sb = new StringBuilder(); if (writeLog) { // date sb.append(new Date()); sb.append("\t"); // size of input list sb.append(inputList.size()); sb.append("\t"); // number of pulled urls sb.append(puller.pulledUrl); sb.append("\t"); // number of good pulled urls sb.append(puller.goodPulledUrl); sb.append("\t"); // size of output list sb.append(outputList.size()); sb.append("\t"); // number of pushed URLs sb.append(pusher.pushedUrl); sb.append("\t"); // processing time of pusher sb.append(pusher.processingTime); sb.append("\t"); // good pushed sb.append(pusher.good); sb.append("\t"); // bad pushed sb.append(pusher.bad); sb.append("\t"); // number of known domains sb.append(knownDomains.size()); sb.append("\t"); // queue size of domains to be choosen from sb.append(queuedDomains.size()); sb.append("\t"); // number of urls ready to be taken by pusher sb.append(readyUrls); sb.append("\t"); // ready domains sb.append(rdyDomains); sb.append("\t"); // waiting for feedback sb.append(feedback); sb.append("\t"); // bandit processing time sb.append(bandit.processingTime); sb.append("\t"); // number of arms pulled by bandit sb.append(bandit.armsPulled); sb.append("\t"); sb.append(banditGood); sb.append("\t"); sb.append(banditBad); sb.append("\t"); // right classified sb.append(right); sb.append("\t"); // classified in total sb.append(processed); sb.append("\t"); // predicted right pushed sb.append(pusher.predictedRight); sb.append("\n"); try { logFileWriter.write(sb.toString()); logFileWriter.flush(); } catch (IOException e) { System.out .println("Could not write log. Switch to console."); writeLog = false; } } else { // status about the queues sb.append("\n----------------------------------------------------\n"); sb.append(new Date()); sb.append(" Status Viewer says: "); sb.append("\nInput queue: "); sb.append(inputList.size()); sb.append("\nPulled URLs (good): "); sb.append(puller.pulledUrl); sb.append(" ("); sb.append(puller.goodPulledUrl); sb.append(")\nOutput queue: "); sb.append(outputList.size()); sb.append("\nPushed URLs: "); sb.append(pusher.pushedUrl); sb.append("\nKnown Domains: "); sb.append(knownDomains.size()); sb.append("\nQueued Domains: "); sb.append(queuedDomains.size()); sb.append("\nQueued Urls: "); sb.append(readyUrls); sb.append(" with "); sb.append(rdyDomains); sb.append(" domains beeing ready."); sb.append("\nWaiting for feedback from URLs: "); sb.append(feedback); sb.append("\nBandit Processing time (ms): "); sb.append((double) bandit.processingTime / bandit.armsPulled); sb.append(" after pulling "); sb.append(bandit.armsPulled); sb.append(" arms."); // processing rate sb.append("\nAvg Time to pull new URL from queue (ms): "); sb.append((double) pusher.processingTime / pusher.pushedUrl); sb.append("\nCrawling Ratio (good/bad): "); long good = pusher.good; long bad = pusher.bad; sb.append((double) good / bad); sb.append(" ("); sb.append(good); sb.append("/"); sb.append(bad); sb.append(")"); // accuracy sb.append("\nClassifier Accuracy: "); sb.append((double) right / processed); sb.append(" ("); sb.append(right); sb.append("/"); sb.append(processed); sb.append(")"); sb.append("\n"); System.out.println(sb.toString()); } try { Thread.sleep(5000); } catch (InterruptedException e) { e.printStackTrace(); } } } } }
package android.support.v7.widget; import android.content.Context; import android.graphics.drawable.Drawable; import android.support.v4.view.GravityCompat; import android.support.v4.view.ViewCompat; import android.support.v7.appcompat.R; import android.support.v7.widget.LinearLayoutCompat.LayoutParams; import android.util.AttributeSet; import android.view.View; import android.view.View.MeasureSpec; import android.view.ViewGroup; public class AlertDialogLayout extends LinearLayoutCompat { public AlertDialogLayout(Context context) { super(context); } public AlertDialogLayout(Context context, AttributeSet attributeSet) { super(context, attributeSet); } protected void onMeasure(int i, int i2) { if (!tryOnMeasure(i, i2)) { super.onMeasure(i, i2); } } private boolean tryOnMeasure(int i, int i2) { int id; int measuredHeight; int i3; int i4; int combineMeasuredStates; int i5; View view = null; View view2 = null; int childCount = getChildCount(); int i6 = 0; View view3 = null; while (i6 < childCount) { View childAt = getChildAt(i6); if (childAt.getVisibility() == 8) { childAt = view2; view2 = view; } else { id = childAt.getId(); if (id == R.id.topPanel) { View view4 = view2; view2 = childAt; childAt = view4; } else if (id == R.id.buttonPanel) { view2 = view; } else if (id != R.id.contentPanel && id != R.id.customPanel) { return false; } else { if (view3 != null) { return false; } view3 = childAt; childAt = view2; view2 = view; } } i6++; view = view2; view2 = childAt; } int mode = MeasureSpec.getMode(i2); int size = MeasureSpec.getSize(i2); int mode2 = MeasureSpec.getMode(i); id = 0; i6 = getPaddingBottom() + getPaddingTop(); if (view != null) { view.measure(i, 0); i6 += view.getMeasuredHeight(); id = ViewCompat.combineMeasuredStates(0, ViewCompat.getMeasuredState(view)); } int i7 = 0; if (view2 != null) { view2.measure(i, 0); i7 = resolveMinimumHeight(view2); measuredHeight = view2.getMeasuredHeight() - i7; i6 += i7; id = ViewCompat.combineMeasuredStates(id, ViewCompat.getMeasuredState(view2)); i3 = measuredHeight; } else { i3 = 0; } if (view3 != null) { if (mode == 0) { measuredHeight = 0; } else { measuredHeight = MeasureSpec.makeMeasureSpec(Math.max(0, size - i6), mode); } view3.measure(i, measuredHeight); measuredHeight = view3.getMeasuredHeight(); i6 += measuredHeight; id = ViewCompat.combineMeasuredStates(id, ViewCompat.getMeasuredState(view3)); i4 = measuredHeight; } else { i4 = 0; } measuredHeight = size - i6; if (view2 != null) { i6 -= i7; i3 = Math.min(measuredHeight, i3); if (i3 > 0) { measuredHeight -= i3; i7 += i3; } view2.measure(i, MeasureSpec.makeMeasureSpec(i7, 1073741824)); i7 = view2.getMeasuredHeight() + i6; combineMeasuredStates = ViewCompat.combineMeasuredStates(id, ViewCompat.getMeasuredState(view2)); int i8 = measuredHeight; measuredHeight = i7; i7 = i8; } else { i7 = measuredHeight; combineMeasuredStates = id; measuredHeight = i6; } if (view3 == null || i7 <= 0) { i5 = measuredHeight; measuredHeight = combineMeasuredStates; } else { measuredHeight -= i4; view3.measure(i, MeasureSpec.makeMeasureSpec(i7 + i4, mode)); i8 = measuredHeight + view3.getMeasuredHeight(); measuredHeight = ViewCompat.combineMeasuredStates(combineMeasuredStates, ViewCompat.getMeasuredState(view3)); i5 = i8; } combineMeasuredStates = 0; for (i7 = 0; i7 < childCount; i7++) { View childAt2 = getChildAt(i7); if (childAt2.getVisibility() != 8) { combineMeasuredStates = Math.max(combineMeasuredStates, childAt2.getMeasuredWidth()); } } setMeasuredDimension(ViewCompat.resolveSizeAndState(combineMeasuredStates + (getPaddingLeft() + getPaddingRight()), i, measuredHeight), ViewCompat.resolveSizeAndState(i5, i2, 0)); if (mode2 != 1073741824) { forceUniformWidth(childCount, i2); } return true; } private void forceUniformWidth(int i, int i2) { int makeMeasureSpec = MeasureSpec.makeMeasureSpec(getMeasuredWidth(), 1073741824); for (int i3 = 0; i3 < i; i3++) { View childAt = getChildAt(i3); if (childAt.getVisibility() != 8) { LayoutParams layoutParams = (LayoutParams) childAt.getLayoutParams(); if (layoutParams.width == -1) { int i4 = layoutParams.height; layoutParams.height = childAt.getMeasuredHeight(); measureChildWithMargins(childAt, makeMeasureSpec, 0, i2, 0); layoutParams.height = i4; } } } } private static int resolveMinimumHeight(View view) { int minimumHeight = ViewCompat.getMinimumHeight(view); if (minimumHeight > 0) { return minimumHeight; } if (view instanceof ViewGroup) { ViewGroup viewGroup = (ViewGroup) view; if (viewGroup.getChildCount() == 1) { return resolveMinimumHeight(viewGroup.getChildAt(0)); } } return 0; } protected void onLayout(boolean z, int i, int i2, int i3, int i4) { int i5; int paddingLeft = getPaddingLeft(); int i6 = i3 - i; int paddingRight = i6 - getPaddingRight(); int paddingRight2 = (i6 - paddingLeft) - getPaddingRight(); i6 = getMeasuredHeight(); int childCount = getChildCount(); int gravity = getGravity(); int i7 = gravity & 8388615; switch (gravity & 112) { case 16: i6 = (((i4 - i2) - i6) / 2) + getPaddingTop(); break; case 80: i6 = ((getPaddingTop() + i4) - i2) - i6; break; default: i6 = getPaddingTop(); break; } Drawable dividerDrawable = getDividerDrawable(); if (dividerDrawable == null) { i5 = 0; } else { i5 = dividerDrawable.getIntrinsicHeight(); } int i8 = i6; for (int i9 = 0; i9 < childCount; i9++) { View childAt = getChildAt(i9); if (!(childAt == null || childAt.getVisibility() == 8)) { int i10; int measuredWidth = childAt.getMeasuredWidth(); int measuredHeight = childAt.getMeasuredHeight(); LayoutParams layoutParams = (LayoutParams) childAt.getLayoutParams(); i6 = layoutParams.gravity; if (i6 < 0) { i6 = i7; } switch (GravityCompat.getAbsoluteGravity(i6, ViewCompat.getLayoutDirection(this)) & 7) { case 1: i10 = ((((paddingRight2 - measuredWidth) / 2) + paddingLeft) + layoutParams.leftMargin) - layoutParams.rightMargin; break; case 5: i10 = (paddingRight - measuredWidth) - layoutParams.rightMargin; break; default: i10 = paddingLeft + layoutParams.leftMargin; break; } if (hasDividerBeforeChildAt(i9)) { i6 = i8 + i5; } else { i6 = i8; } i8 = layoutParams.topMargin + i6; setChildFrame(childAt, i10, i8, measuredWidth, measuredHeight); i8 += layoutParams.bottomMargin + measuredHeight; } } } private void setChildFrame(View view, int i, int i2, int i3, int i4) { view.layout(i, i2, i + i3, i2 + i4); } }
Performance characteristics of Y-Ba-Cu-O microwave superconducting detectors Identifies a high-Tc temperature-transition edge, nonbolometric detector response in microstrip line device structures fabricated from thin films of Y1Ba2Cu3O7-x deposited on MgO single crystal substrates. Such detectors have an intrinsically fast response and in principle are capable of operating up to frequencies represented by the superconducting energy gap (terahertz region), if such an energy gap concept is applicable to these unique materials. The detector output, a dc output voltage across the device, is function of bias current and temperature, with an optimum temperature in the vicinity of the point at which zero resistance is reached (To). The mechanism for the detector action is believed to be related to the switching of the superconductor from its superconducting state to its normal state, driven by the input radiation, while the superconductor is held at a suitable current bias point and temperature. Both films made by the laser ablation process and chemical spray pyrolysis form detector elements. However, the films formed by these two techniques are vastly different morphologically. The extent to which film morphology influences detector performance has been examined in an effort to examine the so-called weak-link question. The Noise Equivalent Power (NEP) of a laser ablated detector has been estimated to be below -80 dbm for an operating condition of 70 K, frequency of 8 GHz and modulation of 60 KHz. The measurement limitation is extrinsic noise, and it is believed that these devices are extremely low noise, far better than possible with conventional Schottky diodes/p-n junctions, or Josephson-like SIS structures for that matter. Various other performance characteristics are presented, along with a suggested NSN model for electrical conduction.
Salmon (Salmo salar) Side Streams as a Bioresource to Obtain Potential Antioxidant Peptides after Applying Pressurized Liquid Extraction (PLE) The pressurized liquid extraction (PLE) technique was used to obtain protein extracts with antioxidant capacity from salmon muscle remains, heads, viscera, skin, and tailfins. A protein recovery percentage ≈28% was obtained for all samples except for viscera, which was ≈92%. These values represented an increase of 1.54.8-fold compared to stirring extraction (control). Different SDS-PAGE profiles in control and PLE extracts revealed that extraction conditions affected the protein molecular weight distribution of the obtained extracts. Both TEAC (Trolox equivalent antioxidant capacity) and ORAC (oxygen radical antioxidant capacity) assays showed an outstanding antioxidant activity for viscera PLE extract. Through liquid chromatography coupled with electrospray ionization triple time-of-flight (nanoESI qQTOF) mass spectrometry, 137 and 67 peptides were identified in control and PLE extracts from salmon viscera, respectively None of these peptides was found among the antioxidant peptides inputted in the BIOPEP-UMP database. However, bioinformatics analysis showed several antioxidant small peptides encrypted in amino acid sequences of viscera extracts, especially GPP (glycine-proline-proline) and GAA (glycine-alanine-alanine) for PLE extracts. Further research on the relationship between antioxidant activity and specific peptides from salmon viscera PLE extracts is required. In addition, the salmon side streams studied presented non-toxic levels of As, Hg, Cd, and Pb, as well as the absence of mycotoxins or related metabolites. Overall, these results confirm the feasible use of farmed salmon processing side streams as alternative sources of protein and bioactive compounds for human consumption. Introduction Salmon consumption has tripled since the 1980s, mainly because it is considered a healthy food due to its contents of polyunsaturated fatty acids, quality proteins, vitamins, and minerals. The versatility of commercialized salmon products (i.e., fresh, frozen, smoked, fillet, canned, sushi, ready meals) is also related to a wide distribution, as well as an increased interest aroused by consumers and food industry. At the same time, the salmon aquaculture sector has grown worldwide. In Europe, Atlantic salmon (Salmo salar) is currently the most important farmed species in volume and value, exceeding 1.3 million tons and 5 billion EUR in 2017. Since salmon has a great fillet yield, it is one of the most highly processed fishes. As a result, 50% of complete fresh salmon has been estimated to correspond to side stream materials. Therefore, a large amount of discards are available to develop high-added-value products, including those intended for human consumption. In this context, the nutritional characterization of several salmon processing side streams revealed that they are rich in protein (10-20%) and fat (20-30%), which Total Antioxidant Capacity The results of total antioxidant capacity, determined using the Trolox equivalent antioxidant capacity (TEAC) and oxygen radical antioxidant capacity (ORAC) methods in control and PLE extracts of salmon side streams, are shown in Figure 1. TEAC values in PLE extracts were 734 ± 38, 472 ± 7, 3739 ± 209, 147 ± 37, and 704 ± 42 M Trolox Equivalents (Eq) for muscle, head, viscera, skin, and tailfins, respectively, whereas TEAC values in the corresponding control extracts were 776 ± 32, 322 ± 18, 778 ± 26, 206 ± 12, and 324 ± 22 M Trolox Eq. Regarding the ORAC assay, the values of total antioxidant capacity were higher in PLE extracts than in control extracts for all samples. ORAC values (M Trolox Eq) in PLE extracts were 4586 ± 241 (muscle), 3567 ± 63 (heads), 7772 ± 1174 (viscera), 1244 ± 94 (skin), and 2620 ± 78 (tailfins), whereas control ORAC values were 3 of 21 3005 ± 217, 797 ± 73, 2451 ± 139, 599 ± 19, and 736 ± 39, respectively. Therefore, PLEassisted extraction improved the antioxidant capacity (ORAC) compared to conventional extraction for all salmon side streams. The increases were 1.5-, 4.5-, 3.2-, 2-, and 3.6-fold for muscle, head, viscera, skin, and tailfins, respectively. As for TEAC, the antioxidant capacity of PLE extracts also increased compared to the controls for head (1.5), viscera (4.8), and tails (2.2), whereas the muscle and skin values remained without significant changes. The highest antiradical activity was observed in PLE extracts of viscera for both antioxidant assays. These results are slightly different to those obtained for PLE extracts of sea bass and sea bream by-products, in which muscle PLE extracts showed the highest values of antioxidant capacity determined by both TEAC and ORAC methods. The antioxidant capacity of viscera PLE extracts from sea bass and sea bream were similar to those of head PLE extracts. These differences may be due to the fact that seabass and sea bream are a more closely related species compared to salmon. Mar. Drugs 2021, 19, x FOR PEER REVIEW 3 of 22 Eq) in PLE extracts were 4586 ± 241 (muscle), 3567 ± 63 (heads), 7772 ± 1174 (viscera), 1244 ± 94 (skin), and 2620 ± 78 (tailfins), whereas control ORAC values were 3005 ± 217, 797 ± 73, 2451 ± 139, 599 ± 19, and 736 ± 39, respectively. Therefore, PLE-assisted extraction improved the antioxidant capacity (ORAC) compared to conventional extraction for all salmon side streams. The increases were 1.5-, 4.5-, 3.2-, 2-, and 3.6-fold for muscle, head, viscera, skin, and tailfins, respectively. As for TEAC, the antioxidant capacity of PLE extracts also increased compared to the controls for head (1.5), viscera (4.8), and tails (2.2), whereas the muscle and skin values remained without significant changes. The highest antiradical activity was observed in PLE extracts of viscera for both antioxidant assays. These results are slightly different to those obtained for PLE extracts of sea bass and sea bream by-products, in which muscle PLE extracts showed the highest values of antioxidant capacity determined by both TEAC and ORAC methods. The antioxidant capacity of viscera PLE extracts from sea bass and sea bream were similar to those of head PLE extracts. These differences may be due to the fact that seabass and sea bream are a more closely related species compared to salmon. On the other hand, the different antioxidant capacity exhibited by the protein extracts obtained is probably related to both the size and the amino acid composition of the protein fragments of each salmon side stream. Several authors have suggested that hydrophobic amino acids could contribute to the total antioxidant activity of protein fragments. In this way, glycine and glutamic acid have been reported as the most abundant polar amino acids in salmon heads, skin, and viscera. Hydrophobic amino acids such as alanine, proline, leucine, and valine were also found in relevant quantities. In addition, the molecular weight of fish peptides (0.5-1.5 kDa) has been associated with antioxidant properties. According to this, the outstanding antioxidant capacity shown by PLE viscera extracts could mean the presence of bioactive peptides with some of the aforementioned amino acids in their sequence. Protein Recovery Percentage The results of protein recovery in control and PLE extracts from side streams of gilthead sea bream are shown in Figure 2. The percentage of protein recovery in PLE extracts of salmon muscle, head, viscera, skin, and tailfins were 26.65 ± 1.57, 27.50 ± 3.83, 92.03 ± 4.80, 29.39 ± 0.05, and 28.29 ± 3.66, respectively, whereas those of their corresponding control extracts were 23.51 ± 0.31, 18.57 ± 1.14, 56.76 ± 1.87, 18.41 ± 0.64, and 5.82 ± 0.63. Therefore, PLE improved the protein recovery for all side streams. The improvement in protein recovery was close to 1.5-fold for heads, viscera, and skin extracts. The tailfin extracts experienced a 5-fold increase with the PLE technique, whereas salmon muscle results were similar for both conventional stirring and PLE extraction. The best protein recovery was observed in viscera, consistent with previously observed protein recoveries in extracts of sea bass and sea bream side streams after applying PLE-assisted extraction. Few food matrices or related side streams have been used for protein extraction by means of PLE. For instance, different seaweeds, as well as seeds from red pepper, showed protein recovery percentages about 5% and 50%, respectively. On the other hand, the different antioxidant capacity exhibited by the protein extracts obtained is probably related to both the size and the amino acid composition of the protein fragments of each salmon side stream. Several authors have suggested that hydrophobic amino acids could contribute to the total antioxidant activity of protein fragments. In this way, glycine and glutamic acid have been reported as the most abundant polar amino acids in salmon heads, skin, and viscera. Hydrophobic amino acids such as alanine, proline, leucine, and valine were also found in relevant quantities. In addition, the molecular weight of fish peptides (0.5-1.5 kDa) has been associated with antioxidant properties. According to this, the outstanding antioxidant capacity shown by PLE viscera extracts could mean the presence of bioactive peptides with some of the aforementioned amino acids in their sequence. Protein Recovery Percentage The results of protein recovery in control and PLE extracts from side streams of gilthead sea bream are shown in Figure 2. The percentage of protein recovery in PLE extracts of salmon muscle, head, viscera, skin, and tailfins were 26.65 ± 1.57, 27.50 ± 3.83, 92.03 ± 4.80, 29.39 ± 0.05, and 28.29 ± 3.66, respectively, whereas those of their corresponding control extracts were 23.51 ± 0.31, 18.57 ± 1.14, 56.76 ± 1.87, 18.41 ± 0.64, and 5.82 ± 0.63. Therefore, PLE improved the protein recovery for all side streams. The improvement in protein recovery was close to 1.5-fold for heads, viscera, and skin extracts. The tailfin extracts experienced a 5-fold increase with the PLE technique, whereas salmon muscle results were similar for both conventional stirring and PLE extraction. The best protein recovery was observed in viscera, consistent with previously observed protein recoveries in extracts of sea bass and sea bream side streams after applying PLE-assisted extraction. Few food matrices or related side streams have been used for protein extraction by means of PLE. For instance, different seaweeds, as well as seeds from red pepper, showed protein recovery percentages about 5% and 50%, respectively. Percentage of protein recovery in control and PLE extracts from salmon muscle, heads, viscera, skin, and tailfin. PLE: pressurized liquid extraction. Results are expressed as mean ± standard deviation (n = 2). Different lowercase letters in bars indicate statistically significant differences (p < 0.05) among samples. Protein Molecular Weight Distribution The protein molecular weight distribution of salmon side stream extracts, obtained both through conventional stirring and PLE-assisted extraction, was provided by means Percentage of protein recovery in control and PLE extracts from salmon muscle, heads, viscera, skin, and tailfin. PLE: pressurized liquid extraction. Results are expressed as mean ± standard deviation (n = 2). Different lowercase letters in bars indicate statistically significant differences (p < 0.05) among samples. Protein Molecular Weight Distribution The protein molecular weight distribution of salmon side stream extracts, obtained both through conventional stirring and PLE-assisted extraction, was provided by means of SDS-PAGE ( Figure 3A). As can be seen in the images, the extracts presented different electrophoretic profiles. In general, these differences appeared to be related to both the type of side stream and the type of extraction process. In order to obtain the molecular weight of each band and also to group the areas of the bands by kDa ranges, the images of the gels were analyzed using ImageJ and GraphPad Prism Programs ( Figure 3B). For muscle leftovers, clear bands from 9 to 108 kDa were observed in control and PLE extracts, which could be due to the fact that both extraction processes were carried out at room temperature. However, the differences in the width of the bands revealed that PLE extracts presented a greater amount of total protein fragments for all molecular weight groups. This behavior is in agreement with those previously reported for sea bass and sea bream muscle remains subjected to the same PLE and shaking extraction conditions. Protein fragments of head control extracts showed several bands from 10 to 108 kDa, whereas the highest protein molecular weight for head PLE extracts was of 96 kDa. In addition, bands of 20-50 kDa in head control extracts were not found in head PLE extracts. In contrast, control and PLE extracts from salmon viscera exhibited the same protein molecular weight distribution (≤7-73 kDa) and few slight bands. The range of values was similar to that shown by sea bass and sea bream viscera extracts (8-61 kDa). of SDS-PAGE ( Figure 3A). As can be seen in the images, the extracts presented different electrophoretic profiles. In general, these differences appeared to be related to both the type of side stream and the type of extraction process. In order to obtain the molecular weight of each band and also to group the areas of the bands by kDa ranges, the images of the gels were analyzed using ImageJ and GraphPad Prism Programs ( Figure 3B). For muscle leftovers, clear bands from 9 to 108 kDa were observed in control and PLE extracts, which could be due to the fact that both extraction processes were carried out at room temperature. However, the differences in the width of the bands revealed that PLE extracts presented a greater amount of total protein fragments for all molecular weight groups. This behavior is in agreement with those previously reported for sea bass and sea bream muscle remains subjected to the same PLE and shaking extraction conditions. Protein fragments of head control extracts showed several bands from 10 to 108 kDa, whereas the highest protein molecular weight for head PLE extracts was of 96 kDa. In addition, bands of 20-50 kDa in head control extracts were not found in head PLE extracts. In contrast, control and PLE extracts from salmon viscera exhibited the same protein molecular weight distribution (≤7-73 kDa) and few slight bands. The range of values was similar to that shown by sea bass and sea bream viscera extracts (8-61 kDa). Both skin and tailfin extracts presented wider molecular weight ranges (≈6-140 kDa) than muscle, heads, and viscera extracts. Furthermore, for both samples, several protein Both skin and tailfin extracts presented wider molecular weight ranges (≈6-140 kDa) than muscle, heads, and viscera extracts. Furthermore, for both samples, several protein bands in control extracts did not appear in PLE extracts. According to the gel image analysis, bands in 25-50 and 75-125 kDa ranges from control skin extracts were not present in the corresponding PLE extracts. Similarly, the 10-30 kDa protein fragments in tailfin control extracts were not found in those of PLE. The protein molecular weight distribution of discards from Australian Atlantic salmon was evaluated previously. The head and Mar. Drugs 2021, 19, 323 6 of 21 skin protein fragments were in the range of 25-250, whereas most of the viscera were below 10 kDa. Based on these results, sodium dodecyl sulfate polyacrylamide gel electrophoresis (SDS-PAGE) revealed different protein profiles between the matrices studied. In addition, differences observed among control and PLE extracts for each side stream have shown that PLE-assisted extraction influenced the size of protein fragments obtained in the extracts. It should be noted that this electrophoretic technique provides additional information as to the total protein content. However, it does not allow the retention of peptides in the gel, which could be relevant to correlate the presence of peptides with the antioxidant capacity shown by the extracts. Identification of Peptides in Viscera Extracts As previously described, the salmon viscera extracts obtained through PLE-assisted extraction resulted the most interesting sample in terms of in vitro antioxidant capacity. Their TEAC and ORAC values not only stand out against the other salmon byproducts studied here, but also in comparison with previously investigated PLE protein extracts from sea bass and sea bream viscera. For this reason, PLE protein extracts from salmon viscera were selected for the identification of antioxidant peptides. Control viscera extracts were also screened in order to compare peptides extracted through PLE and under stirring conditions. Only peptides with a confidence percentage ≥ 90% have been reported. A total of 137 peptides were identified in the PLE viscera extracts (Table 1). In contrast, 67 peptides were identified in the viscera control extracts ( Table 2). Despite using the same viscera sample, only five peptides matched in both extracts (color marked in both tables). These data show that the extraction conditions used for PLE-assisted extraction influence the peptides obtained from salmon viscera. A common method currently used to speculate about peptide function is through an amino acid homology alignment against a database of known functional peptide sequences. The antioxidant activity of the identified peptides was thus predicted using the BIOPEP-UWM database, which is a bioinformatics tool for searching among bioactive peptides, mainly derived from foods. None of the peptides identified in salmon viscera extracts were found among the antioxidant peptides inputted in the BIOPEP-UMP database. Therefore, a new search based on the profiles of the potential biological activity of peptides was performed. BIOPEP-UWM analysis results exhibited several antioxidant small peptides encrypted in amino acid sequences of PLE (Table 3) and control (Table 4) viscera extracts, with some of them known to be derived from marine species. Throughout the entire structure of peptides, 19 different sequences of peptides with antioxidant activity were found in the PLE extract, whereas there were 12 in the control extract. Most of these potential antioxidant peptides were di-and tri-peptides. The sequence GPP was found in 15 peptides of the PLE extract, followed by GAA, which was found in five peptides. These sequences could be responsible for antioxidant activity, since antioxidant peptides from marine resources have been described to contain hydrophobic acids such as glycine (G), proline (P), and alanine (A). Furthermore, salmon antioxidant peptides from the pectoral fin (FLNEFLHV) and trimmings (GGPAGPAV, GPVA, PP, GP) have been reported. Several antioxidant peptide sequences from the viscera of sardinella (LHT, LARL, GGE), black pomfret (AMT6GLEA), and mackerel (ACFL) have also been identified. In addition to specific amino acids, peptides derived from fish sources, especially in the range of 0.5-1.5 kDa, have been assumed to be a key factor in terms of antioxidant activity. The molecular weight of peptides in control viscera extracts ranged from 0.63 to 2.44 kDa (Table 4), whereas for viscera PLE extracts, the molecular weight of peptides was 0.67-2.60 kDa (Table 2). However, there was a greater amount of small peptides in the PLE extract. As can be seen in Figure 4, a higher intensity of analytes with shorter retention times was observed for the viscera PLE extract, which in the case of peptides usually corresponds to more polar and/or smaller compounds. Determination of Heavy Metals and Mycotoxins in Salmon Side Streams The concentrations of As, Hg, Cd, and Pb in salmon muscle, heads, viscera, skin, and tailfins are shown in Table 5. Mean concentration ranges, expressed as g/g of wet weight (ww), were 0.4186-0.6922, 0.0095-0.0408, 0.0004-0.0104, and 0.0071-0.0859 for As, Hg, Cd, and Pb, respectively. For all salmon side streams, the most abundant element was As, whereas the lowest concentration was observed for Cd. There is a lack of information in the literature on heavy metal contents in salmon discards. For instance, one study reported liver Hg accumulations in four wild species of Pacific salmon. The results (0.120-0.192 g/g, ww) were higher than those found in the present study for viscera samples, which include more organs than the liver. The contents of As, Hg, Cd, and Pb in several fish side streams of sea bass, sea bream, and meager have also been described. The arsenic levels in the viscera (1.867-2.587 g/g, ww) of these fish species were higher than those in the salmon viscera. The data available on toxic elements in fish usually refer to edible muscle due to the potential health risk for consumers. In this sense, levels of Cd and Pb in 21 samples of smoked salmon from a Polish market were determined. The results were on the order of 0.0040-0.0196 g/g (ww) for Cd and 0.0109-0.1559 g/g (ww) for Pb, both of which are considered safe for consumers. In addition, As, Hg, Cd, and Pb contents in fresh salmon muscle were evaluated. It should be noted that the limits for heavy metals in fish According to these results, both the specific amino acid sequences encrypted in the identified peptides and a molecular weight below 1.5 kDa could be related to the antioxidant capacity exhibited by the PLE extract obtained from salmon viscera. Determination of Heavy Metals and Mycotoxins in Salmon Side Streams The concentrations of As, Hg, Cd, and Pb in salmon muscle, heads, viscera, skin, and tailfins are shown in Table 5. Mean concentration ranges, expressed as g/g of wet weight (ww), were 0.4186-0.6922, 0.0095-0.0408, 0.0004-0.0104, and 0.0071-0.0859 for As, Hg, Cd, and Pb, respectively. For all salmon side streams, the most abundant element was As, whereas the lowest concentration was observed for Cd. There is a lack of information in the literature on heavy metal contents in salmon discards. For instance, one study reported liver Hg accumulations in four wild species of Pacific salmon. The results (0.120-0.192 g/g, ww) were higher than those found in the present study for viscera samples, which include more organs than the liver. The contents of As, Hg, Cd, and Pb in several fish side streams of sea bass, sea bream, and meager have also been described. The arsenic levels in the viscera (1.867-2.587 g/g, ww) of these fish species were higher than those in the salmon viscera. The data available on toxic elements in fish usually refer to edible muscle due to the potential health risk for consumers. In this sense, levels of Cd and Pb in 21 samples of smoked salmon from a Polish market were determined. The results were on the order of 0.0040-0.0196 g/g (ww) for Cd and 0.0109-0.1559 g/g (ww) for Pb, both of which are considered safe for consumers. In addition, As, Hg, Cd, and Pb contents in fresh salmon muscle were evaluated. It should be noted that the limits for heavy metals in fish side streams are not currently regulated. Therefore, the safety assessment could be based on the limit values established for edible muscles of fish (g/g): 13.5 for As, 0.5 for Hg, 0.05 for Cd, and 0.30 for Pb. According to this, the toxic elements analyzed in all salmon side streams in this study are below the limits set by authorities and could be considered safe for consumers in terms of As, Hg, Cd, and Pb content. Nostbakken et al. showed a trend towards a decrease in As and Hg content in farmed Atlantic salmon, which was related to the decline in the use of fish meal and fish oil in commercial fish feed. However, the replacement of marine ingredients by others of plant origin can lead to the presence of contaminants such as mycotoxins in both aquafeeds and fish tissues. In this way, Bernhoft et al. conducted a toxicokinetic study of deoxynivalenol (DON) and ochratoxin A (OTA) mycotoxins in farmed salmon fed with contaminated feeds for 8 weeks. The authors observed an even distribution in the liver, kidney, brain, skin, and muscle for DON, as well as a distribution mainly in the liver and kidney for OTA. According to this, the possible occurrence of mycotoxins in the muscle, head, viscera, skin, and tailfin of farmed salmon was investigated in the present study. Through a simultaneous multi-mycotoxin evaluation using a non-targeted screening approach, no mycotoxins or related metabolites were identified in salmon side streams. These results are in agreement with those found by Ncher-Mestre et al. on the carry-over of common and emerging mycotoxins from feeds to edible parts of farmed Atlantic salmon fed with high plant-based diets. In addition, there was no presence detected of several mycotoxins, such as aflatoxins, fumonisins, enniatins, or ochratoxin A, in smoked salmon and raw salmon sushi commercial products. Raw Material and Sample Preparation Whole salmon fish (Salmo salar) from Norwegian aquaculture were purchased in a local market in Valencia (Spain) during different weeks of June 2019. They were immediately transported to the laboratories of the University of Valencia under refrigerated conditions. Individual salmon were dissected as a simulation of fish processing for human consumption. Then, muscle leftovers, complete heads, viscera, flesh-free skin, and tailfins were placed separately inside aluminum containers and frozen at −80 C for 48 h. Next, they were freeze-dried (LABCONCO, 2.5. FREE ZONE, USA) for 72 h, and keep in a desiccator until reaching a constant weight. Then, water content was determined gravimetrically. The moisture percentages were 67.61% ± 1.04%, 61.66% ± 2.52%, 52.31% ± 1.98%, 45.04% ± 1.60%, and 45.63% ± 0.71% for muscle remains, heads, viscera, skin, and tailfins, respectively. Similar values for salmon head, viscera, and skin were reported by Aspevik et al. and He et al.. Each type of sample was ground in an analytical mill (A11 basic IKA ® WERKE, Staufen, Germany) and stored at −25 C until the extraction process and the determination of possible food contaminants. Pressurized Liquid Extraction (PLE) Process Antioxidant protein extracts from salmon side stream materials were obtained using an accelerated solvent extractor ASE 200 Dionex (Sunnyvale, CA, USA) equipped with a solvent controller. Dried samples were mixed with diatomaceous earth before being introduced into 22-mL stainless steel cells with a glass fiber filter placed in the end part. The standard operation parameters were as follows: preheating period (1 min), heating period (5 min), and flush volume (60%), and nitrogen purge (145 psi for 1 min). The extractions were performed under a pressure of 1500 psi with distilled water as a solvent. The pH, temperature, and time conditions for PLE-assisted extraction were selected based on the optimization of the extraction conditions to obtain antioxidant protein extracts from sea bass side streams : pH 7, 20 C, 5 min for muscle; pH 4, 60 C, 15 min for heads; pH 7, 50 C, 15 min for viscera; pH 7, 55 C, 5 min for skin; and pH 7, 60 C, 15 min for tailfins. For all samples, control extracts were also carried out in parallel by stirring for 30 min with distilled water at room temperature. Both types of extractions were performed at least in duplicate. The extracts obtained were homogenized individually, divided into several replicates and stored at −25 C for subsequent analyses. Protein recovery, protein molecular weight distribution, and total antioxidant capacity were evaluated and compared (PLE vs control extracts). Trolox Equivalent Antioxidant Capacity Assay (TEAC) The TEAC assay measures the inhibition of the radical cation ABTS + by antioxidant compounds, which is compared to the activity of a reference antioxidant standard (Trolox). The spectrophotometric method proposed by de la Fuente et al. was used. ABTS reagent (7 mM) and K 2 S 2 O 8 (140 mM) were mixed and maintained at room temperature in darkness for 16 h to generate the ABTS + stock solution. Then, it was diluted in ethanol until an absorbance of 0.700 ± 0.020 at 734 nm and 30 C to obtain the ABTS + working solution. Proper dilution of each fish extract to achieve a percentage of absorbance inhibition of approximately 50% was required. A range of Trolox standard solutions (0-300 M) were prepared. The absorbance of 2 mL of ABTS + working solution was considered the initial point of reaction (A 0 ). Then, 100 L of diluted extracts or Trolox standards were added immediately. After 3 min of reaction, the absorbance was measured and considered the final point (A f ). All measures were conducted in a thermostatized UV-vis spectrophotometer. The percentages of absorbance inhibition were calculated using the following equation: 1 − (A f /A 0 ) 100 and were compared to the Trolox standard curve. The results were expressed as M Trolox Equivalents. Oxygen Radical Absorbance Capacity Assay (ORAC) The ORAC assay measures the scavenging of the peroxyl radical AAPH by antioxidant compounds. The fluorometric method described by de la Fuente et al. was applied. Sodium fluorescein (0.015 mg/mL), AAPH radical solution (120 mg/mL), and Trolox standard solution (100 M) were prepared with phosphate buffer (75 mM, pH 7). Adequate diluted extracts were required. The operating conditions for the final reaction consisted of 50 L of diluted extract, Trolox standard or phosphate buffer (blank), 50 L of fluorescein, and 25 L of AAPH incubated at 37 C in a Multilabel Plate Counter VICTOR3 1420 (PerkinElmer, Turku, Finland). Fluorescence filters for an excitation wavelength (485 nm) and an emission wavelength (535 nm) were selected. The fluorescence was recorded every 5 min over 60 min, where the fluorescence in the assay was less than 5% of the initial value. Differences of areas under the fluorescence decay curve (AUC) between the blank and the sample over time were compared and the results were expressed as M Trolox Equivalents. Determination of Protein Recovery The total nitrogen content in salmon side stream materials and extracts obtained by conventional stirring and PLE-assisted extraction was determined using the Kjeldahl method. The total protein content was calculated based on the total nitrogen values and the protein-nitrogen conversion factor (6.25) for fish and fish side streams. Then, the following formula was applied for protein recovery: (protein in extract/protein in side stream) 100. Molecular Weight Distribution of Protein Fragments SDS-PAGE was used to investigate the protein molecular weight distribution of both control (stirring) and optimal (PLE) extracts from salmon side stream materials. Acetone was added to the extracts at a 4:1 ratio (v/v) and they were mixed by means of a vortex. For protein precipitation, the mixture was centrifuged at 11,000 rpm, 4 C, and 10 min. The supernatant was then removed and the pellet was dissolved and distilled. Afterwards, equal volumes of SDS-PAGE sample buffer solution (62.5 mM Tris-HCl (pH 6.8), 2% SDS, 20% glycerol, 0.01% bromophenol blue, and 50 mM dithiothreitol) and protein solution were mixed and heated in a thermoblock (95 C, 5 min). Next, 10 L were loaded onto 8-16% Mini-PROTEAN ® TGX™ Precast gels (Bio-Rad). The electrophoresis was performed using a Mini-PROTEAN ® Tetra Cell (Bio-Rad) under a constant voltage of 80 V for 120 min. The running buffer consisted of Trizma ® base (25 mM), glycine (192 mM), and SDS (0.1%). The gels obtained were stained in Coomassie brilliant blue R-250 (0.125%) and destained through a solution of water:methanol:acetic acid (70:20:10) until the background was as clear as possible. In order to estimate the molecular weight of protein bands obtained in the electrophoretic gels, a standard molecular weight of protein bands (5-250 kDA, Precision Plus Protein™, Bio-Rad) was used. The images of the gels were also evaluated using ImageJ ® software, a public domain digital image processing program developed at the National Institutes of Health (NIH). For a better visualization of protein bands, background subtraction and 8-bit format were selected. (1.80 V). To correct matrix-induced signal fluctuations and instrumental drift, internal standard solutions of 72 Ge, 103 Rh, and 193 Ir (ISC Science) at 20 g/g were used. For the quantification of As, Cd, and Pb, standard calibration curves from 0 to 1000 g/L were used. As for the quantification of Hg, a standard calibration curve from 0 to 100 g/L was utilized. Limits of detection (LODs) were calculated according to the following equation: LOD = 3sB/a where "3sB" is 3 times the standard deviation at zero concentration and "a" is the slope of the calibration curve. LOD values obtained for As, Hg, Cd, and Pb were 0.012, 0.0015, 0.004, and 0.0015 g/L, respectively. The concentrations of heavy metals in the digested blank (distilled water) were subtracted from the values of samples. The results were expressed as g of element/g of side stream material in wet weight. To confirm the accuracy of the method, the fish protein powder DORM-3 was used as the Certified Reference Material for Trace Metals. It was prepared and analyzed simultaneously to the salmon samples. The recovery percentages were 98%, 86%, 76%, and 77% for As, Hg, Cd, and Pb, respectively. Analysis of Mycotoxins in Salmon Side Stream Materials High-performance liquid chromatography coupled with electrospray ionizationquadrupole-time of flight-mass spectrometry (LC-ESI-qTOF-MS) was employed to investigate the occurrence of mycotoxins in salmon side stream materials. An Agilent 1200-LC system (Agilent Technologies, Palo Alto, CA, USA) equipped with a Gemini ® column NX-C18 (3 M, 150 2 mm ID) (Phenomenex), as well as a vacuum degasser, binary pump, and autosampler, were used to achieve the chromatographic separations The mobile phases consisted of acidified (0.1% of formic acid) water (A) and acetonitrile (B). A gradient program of 50% B (0-6 min); 100% B (7-12 min); and 50% B (13-20 min) was applied. Samples (5 L) were injected at a flow rate of 0.2 mL/min. Mass spectrometry (MS) analysis was carried out using a 6540 Agilent Ultra-High-Definition-Accurate-Mass-q-TOF-MS coupled to the HPLC, equipped with an Agilent Dual Jet Stream electrospray ionization (Dual AJS ESI) interface in positive and negative ionization modes. The operational conditions were as follows: nebulizer pressure (50 psi); capillary voltage (3500 V); fragmenter voltage (160 V); scan range (m/z 50-1500); drying gas temperature (370 C); and nitrogen drying gas flow (12.0 L/min). Automatic MS/MS experiments were performed under the following collision energy values: m/z 100, 30 eV; m/z 500, 35 eV; m/z 1000, 40 eV; and m/z 1500, 45 eV. For data acquisition and integration, Mass Hunter Workstation software was used. The QuEChERS procedure to extract mycotoxins from fish discards, previously reported by de la Fuente et al., was applied. Approximately 3 g of salmon samples were mixed with 30 mL of acidified water (2% formic acid) in an orbital shaker (IKA KS 260) for 30 min. Then, 10 mL of acetonitrile were added and the mixture was stirring again for 30 min. Next, 8 g of MgSO 4 and 2 g of NaCl were added to the mixture, vortexed for 30 s and centrifuged at 4000 rpm for 10 min. Afterward, 0.1 g of Octadecyl C18 sorbent and 0.3 g of MgSO 4 were mixed with 2 mL of supernatant. Additional shaking and centrifugation under the same conditions as reported previously were performed. The supernatant was then filtered (13 mm/0.22 m nylon filter) and 20 L were injected into the LC-ESI-qTOF-MS system. Statistical Analysis Experimental data were subjected to one-way analysis of variance (ANOVA) to determine the significant differences among samples. Tukey's honestly significant difference (HSD) multiple range test, at a significance level of p < 0.05 was applied. Statistical analyses were performed with Statgraphics Centurion XVI.I software (Statpoint Technologies, Inc., The Plains, VA, USA). Conclusions The Pressurized Liquid Extraction (PLE) technique allowed us to obtain, for the first time, protein extracts with in vitro antioxidant capacity from Atlantic salmon processing side streams. PLE-assisted extraction influenced the size of the protein fragments obtained in the extracts, since extracts from muscle leftovers, heads, viscera, skin, and tailfins showed different SDS-PAGE profiles. Both the highest protein recovery percentage (92%) and the highest antioxidant capacity were observed in the viscera PLE extract. As 40% of the peptides identified in the PLE extract contained small peptide sequences with known antioxidant activity, salmon viscera could be considered an interesting source of antioxidant peptides. Further research on the relationship between antioxidant activity and specific peptides from salmon viscera PLE extract is required. The levels of toxic metals (As, Hg, Cd, and Pb) and the absence of mycotoxins in salmon processing side streams contribute not only to increasing the limited data in the literature about these contaminants in farmed fish, but also provide information about their safety as candidates for use in the food industry. Funding: This research was funded by BBI-JU through the H2020 Project AQUABIOPRO-FIT "Aquaculture and agriculture biomass side stream proteins and bioactives for feed, fitness, and health promoting nutritional supplements" (Grant number 790956). Informed Consent Statement: Not applicable. Data Availability Statement: Not applicable.
<reponame>minorityll/RecuitmentWeb package com.csigroup.security; import java.util.ArrayList; import java.util.List; import javax.sql.DataSource; import org.apache.commons.lang3.StringUtils; import org.jboss.logging.annotations.Message; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.security.authentication.AuthenticationProvider; import org.springframework.security.authentication.UsernamePasswordAuthenticationToken; import org.springframework.security.core.Authentication; import org.springframework.security.core.AuthenticationException; import org.springframework.security.core.authority.SimpleGrantedAuthority; import org.springframework.security.core.userdetails.User; import org.springframework.security.crypto.password.PasswordEncoder; import org.springframework.stereotype.Service; import com.csigroup.entity.AuthMUser; import com.csigroup.entity.AuthMUserRole; import com.csigroup.exception.CSIAuthException; import com.csigroup.service.UserAuthenicationServiceDao; import com.csigroup.util.MessageBuddle; @Service public class CSIAuthenicationProvider implements AuthenticationProvider { @Autowired UserAuthenicationServiceDao userService; @Autowired @Qualifier("MessageBuddle") MessageBuddle buddle; @Autowired PasswordEncoder encoder ; @Override public Authentication authenticate(Authentication authentication) throws AuthenticationException { // ServletRequestAttributes attr = (ServletRequestAttributes) RequestContextHolder.currentRequestAttributes(); // HttpSession session = attr.getRequest().getSession(); List<SimpleGrantedAuthority> authorities = new ArrayList<>(); if(StringUtils.isBlank(authentication.getName())) { throw new CSIAuthException( buddle.getEnglishMessage("msg.error.blankuser")); } AuthMUser userM = userService.findByUser(authentication.getName()); if(userM==null) { throw new CSIAuthException( buddle.getEnglishMessage("msg.error.usernotfound")); } if(!encoder.matches(authentication.getCredentials().toString(), userM.getPassWord())) { throw new CSIAuthException( buddle.getEnglishMessage("msg.error.passwordnotmatch")); } List<AuthMUserRole> role = userM.getAuthUserRoles(); for (AuthMUserRole c : role) { authorities.add(new SimpleGrantedAuthority(c.getRoleShortName())); } User user = new User(authentication.getName(), authentication.getCredentials().toString(), authorities); return new UsernamePasswordAuthenticationToken(user, authentication.getCredentials().toString(), authorities); } @Override public boolean supports(Class<?> authentication) { return true; } }
The 12 boys and soccer coach found in a partially flooded cave in northern Thailand after 10 days are mostly in stable medical condition and have received high-protein liquid food, officials said Tuesday, though it is not known when they will be able to go home. When the group will be able to leave the cave isn't known due to flooding and other factors that could make their extraction dangerous. Experts have said it could be safer to simply supply them where they are for now. Thailand's rainy season typically lasts through October. In the 5-minute navy video, the boys are quiet as they sit on their haunches, legs bent in front of them. "You are very strong," one of the rescuers says in English. Someone asks what day it is, and the rescuer responds, "Monday. Monday. You have been here — 10 days." One boy, noticing the camera and hearing unfamiliar words, says in Thai, "Oh, they want to take a picture; tell him we're hungry. I haven't had anything to eat." Then the boy breaks into simple English, saying, "Eat, eat, eat," to which another voice responds in Thai that he already told that to the rescuer. Narongsak said officials had met and agreed on the need to "ensure 100 percent safety for the boys when we bring them out." "These are challenging conditions and there's a lot of consideration for safety as well as, the environment outside is contributing to the environment inside," said U.S. Air Force Capt. Jessica Tait, part of a 30-member U.S. military team assisting in the search, referring to the rain that has been flooding the cave. "So I'd say, yeah, it's an accurate statement that it's challenging."
package com.zeligsoft.domain.omg.dds.api.QOS.impl; import com.zeligsoft.domain.omg.dds.api.QOS.presentationQosPolicy; import com.zeligsoft.domain.omg.dds.api.QOS.impl.qosPolicyZImpl; import com.zeligsoft.domain.omg.dds.api.QOS.PresentationQosPolicyAccessScopeKind; import com.zeligsoft.base.zdl.util.ZDLUtil; public class presentationQosPolicyZImpl extends qosPolicyZImpl implements presentationQosPolicy { protected PresentationQosPolicyAccessScopeKind _access_scope; public presentationQosPolicyZImpl(org.eclipse.emf.ecore.EObject element) { super(element); } @Override public Boolean getOrdered_access() { final Object rawValue = com.zeligsoft.base.zdl.util.ZDLUtil.getValue( eObject(), "DDS::QOS::presentationQosPolicy", "ordered_access"); return (Boolean) rawValue; } @Override public void setOrdered_access(Boolean val) { ZDLUtil.setValue(element, "DDS::QOS::presentationQosPolicy", "ordered_access", val); } @Override public PresentationQosPolicyAccessScopeKind getAccess_scope() { final Object rawValue = com.zeligsoft.base.zdl.util.ZDLUtil.getValue( eObject(), "DDS::QOS::presentationQosPolicy", "access_scope"); if (_access_scope == null) { if (rawValue instanceof org.eclipse.emf.ecore.EObject) { _access_scope = PresentationQosPolicyAccessScopeKind .create((org.eclipse.emf.ecore.EObject) rawValue); } } return _access_scope; } @Override public void setAccess_scope(PresentationQosPolicyAccessScopeKind val) { ZDLUtil.setValue(element, "DDS::QOS::presentationQosPolicy", "access_scope", val.eObject(element)); } @Override public Boolean getCoherent_access() { final Object rawValue = com.zeligsoft.base.zdl.util.ZDLUtil .getValue(eObject(), "DDS::QOS::presentationQosPolicy", "coherent_access"); return (Boolean) rawValue; } @Override public void setCoherent_access(Boolean val) { ZDLUtil.setValue(element, "DDS::QOS::presentationQosPolicy", "coherent_access", val); } }
// EnableUI, given a file system containing the static assets, adds a route for // the user interface. func (r *router) EnableUI(ui http.FileSystem) { fileServer := http.FileServer(ui) r.NotFound(func(w http.ResponseWriter, req *http.Request) { if _, err := ui.Open(req.URL.Path); err != nil { req.URL.Path = "" } fileServer.ServeHTTP(w, req) }) }
#include "namaster.h" #include "ctest.h" #include "utils.h" #include "nmt_test_utils.h" #include <chealpix.h> CTEST(nmt,fsk_mapcpy) { int ii; nmt_flatsky_info *fsk=nmt_flatsky_info_alloc(100,100,M_PI/180,M_PI/180); double *mp1=my_malloc(fsk->npix*sizeof(double)); double *mp2=my_malloc(fsk->npix*sizeof(double)); for(ii=0;ii<fsk->npix;ii++) { mp1[ii]=2.; mp2[ii]=0.5; } fs_mapcpy(fsk,mp1,mp2); for(ii=0;ii<fsk->npix;ii++) ASSERT_DBL_NEAR_TOL(mp1[ii],mp2[ii],1E-10); free(mp1); free(mp2); nmt_flatsky_info_free(fsk); } CTEST(nmt,fsk_read_bad) { int nx,ny; flouble lx,ly; flouble *map; set_error_policy(THROW_ON_ERROR); //Test non-existent file try{ map=fs_read_flat_map("none.fits",&nx,&ny,&lx,&ly,0); } ASSERT_NOT_EQUAL(0,nmt_exception_status); //Test incorrect file format try{ map=fs_read_flat_map("test/benchmarks/msk.fits",&nx,&ny,&lx,&ly,1); } ASSERT_NOT_EQUAL(0,nmt_exception_status); try{ map=fs_read_flat_map("test/benchmarks/msk.fits",&nx,&ny,&lx,&ly,0); } ASSERT_NOT_EQUAL(0,nmt_exception_status); //Test inexistent field try{ map=fs_read_flat_map("test/benchmarks/msk_flat.fits",&nx,&ny,&lx,&ly,1); } ASSERT_NOT_EQUAL(0,nmt_exception_status); set_error_policy(EXIT_ON_ERROR); } CTEST(nmt,fsk_read_good) { int nx,ny; flouble lx,ly; flouble *map; //Test successful read map=fs_read_flat_map("test/benchmarks/msk_flat.fits",&nx,&ny,&lx,&ly,0); ASSERT_EQUAL(NX_TEST,nx); ASSERT_EQUAL(NY_TEST,ny); ASSERT_DBL_NEAR_TOL(DX_TEST*NX_TEST*M_PI/180.,lx,1E-10); ASSERT_DBL_NEAR_TOL(DY_TEST*NY_TEST*M_PI/180.,ly,1E-10); ASSERT_NOT_NULL(map); ASSERT_DBL_NEAR_TOL(0.,map[0],1E-10); ASSERT_DBL_NEAR_TOL(6.064284705880828E-01,map[50 +nx*20 ],1E-10); ASSERT_DBL_NEAR_TOL(9.999850684720466E-01,map[nx/2+nx*(ny/2)],1E-10); free(map); } CTEST(nmt,fsk_synalm) { int ii; int nbpw=30; int nmaps=2; int ncls=nmaps*nmaps; nmt_flatsky_info *fsk=nmt_flatsky_info_alloc(141,311,M_PI/180,M_PI/180); double lmax_x=fsk->nx*M_PI/fsk->lx; double lmax_y=fsk->ny*M_PI/fsk->ly; double lmax=sqrt(lmax_x*lmax_x+lmax_y*lmax_y); double lpivot=lmax/6.; double alpha_pivot=1.; double *larr=my_malloc((nbpw+1)*sizeof(double)); double **cells=my_malloc(ncls*sizeof(double *)); long *npixls=my_calloc(nbpw,sizeof(long)); for(ii=0;ii<ncls;ii++) cells[ii]=my_calloc(nbpw+1,sizeof(double)); for(ii=0;ii<=nbpw;ii++) { double ll=ii*lmax/nbpw; larr[ii]=ll; cells[0][ii]=pow((2*lpivot)/(ll+lpivot),alpha_pivot); cells[3][ii]=pow((2*lpivot)/(ll+lpivot),alpha_pivot); } nmt_binning_scheme_flat *bpw=nmt_bins_flat_create(nbpw,larr,&(larr[1])); double dkx=2*M_PI/fsk->lx,dky=2*M_PI/fsk->ly; for(ii=0;ii<fsk->ny;ii++) { int jj; double ky; int ibin=0; if(2*ii<=fsk->ny) ky=ii*dky; else ky=-(fsk->ny-ii)*dky; for(jj=0;jj<fsk->nx;jj++) { double kx,kmod; if(2*jj<=fsk->nx) kx=jj*dkx; else kx=-(fsk->nx-jj)*dkx; kmod=sqrt(kx*kx+ky*ky); ibin=nmt_bins_flat_search_fast(bpw,kmod,ibin);//(int)(kmod/lmax*nbpw); if((ibin>=0) && (ibin<nbpw)) npixls[ibin]++; } } nmt_k_function **clf=my_malloc(ncls*sizeof(nmt_k_function *)); nmt_k_function **clf_pass=my_malloc(3*sizeof(nmt_k_function *)); nmt_k_function **bmf=my_malloc(nmaps*sizeof(nmt_k_function *)); clf[0]=nmt_k_function_alloc(nbpw+1,larr,cells[0],1.,0.,0); clf[1]=nmt_k_function_alloc(nbpw+1,larr,cells[1],0.,0.,0); clf[2]=nmt_k_function_alloc(nbpw+1,larr,cells[2],0.,0.,0); clf[3]=nmt_k_function_alloc(nbpw+1,larr,cells[3],1.,0.,0); clf_pass[0]=clf[0]; clf_pass[1]=clf[1]; clf_pass[2]=clf[3]; for(ii=0;ii<nmaps;ii++) bmf[ii]=nmt_k_function_alloc(nbpw+1,larr,NULL,1.,1.,1); fcomplex **alms=fs_synalm(fsk->nx,fsk->ny,fsk->lx,fsk->ly,nmaps,clf_pass,bmf,1234); fs_alm2cl(fsk,bpw,alms,alms,1,1,cells,1.,-1.,1.,-1.); for(ii=0;ii<nbpw;ii++) { int im1; double l=0.5*(larr[ii]+larr[ii+1]); for(im1=0;im1<nmaps;im1++) { int im2; for(im2=0;im2<nmaps;im2++) { double c11=nmt_k_function_eval(clf[im1+nmaps*im1],l,NULL); double c12=nmt_k_function_eval(clf[im2+nmaps*im1],l,NULL); double c21=nmt_k_function_eval(clf[im1+nmaps*im2],l,NULL); double c22=nmt_k_function_eval(clf[im2+nmaps*im2],l,NULL); double sig=sqrt((c11*c22+c12*c21)/npixls[ii]); double diff=fabs(cells[im2+nmaps*im1][ii]-c12); //Check that there are no >5-sigma fluctuations around input power spectrum ASSERT_TRUE((int)(diff<5*sig)); } } } for(ii=0;ii<nmaps;ii++) dftw_free(alms[ii]); free(alms); for(ii=0;ii<ncls;ii++) free(cells[ii]); for(ii=0;ii<ncls;ii++) nmt_k_function_free(clf[ii]); for(ii=0;ii<nmaps;ii++) nmt_k_function_free(bmf[ii]); nmt_bins_flat_free(bpw); nmt_flatsky_info_free(fsk); free(npixls); free(clf); free(clf_pass); free(bmf); free(cells); free(larr); } CTEST(nmt,fsk_cls) { int ii; int nmaps=34; int nbpw=10; nmt_flatsky_info *fsk=nmt_flatsky_info_alloc(141,311,M_PI/180,M_PI/180); double **cells=my_malloc(17*sizeof(double *)); for(ii=0;ii<17;ii++) cells[ii]=my_calloc(nbpw,sizeof(double)); //Analytic example (same as in fsk_fft) int i0_x=2,i0_y=3; double k0_x=i0_x*2*M_PI/fsk->lx; double k0_y=i0_y*2*M_PI/fsk->ly; double **maps0=test_make_map_analytic_flat(fsk,0,i0_x,i0_y); double **maps1=test_make_map_analytic_flat(fsk,1,i0_x,i0_y); double **maps2=test_make_map_analytic_flat(fsk,2,i0_x,i0_y); fcomplex **alms0=my_malloc(1*sizeof(fcomplex *)); fcomplex **alms1=my_malloc(2*sizeof(fcomplex *)); fcomplex **alms2=my_malloc(2*sizeof(fcomplex *)); alms0[0]=dftw_malloc(fsk->ny*(fsk->nx/2+1)*sizeof(fcomplex)); alms1[0]=dftw_malloc(fsk->ny*(fsk->nx/2+1)*sizeof(fcomplex)); alms1[1]=dftw_malloc(fsk->ny*(fsk->nx/2+1)*sizeof(fcomplex)); alms2[0]=dftw_malloc(fsk->ny*(fsk->nx/2+1)*sizeof(fcomplex)); alms2[1]=dftw_malloc(fsk->ny*(fsk->nx/2+1)*sizeof(fcomplex)); fs_map2alm(fsk,1,0,maps0,alms0); fs_map2alm(fsk,1,1,maps1,alms1); fs_map2alm(fsk,1,2,maps2,alms2); //Bandpowers double lmax=fmax(fsk->nx*M_PI/fsk->lx,fsk->ny*M_PI/fsk->ly); double *l0=my_malloc(nbpw*sizeof(double)); double *lf=my_malloc(nbpw*sizeof(double)); long *npixls=my_calloc(nbpw,sizeof(long)); double dkx=2*M_PI/fsk->lx,dky=2*M_PI/fsk->ly; int ibpw0=(int)(sqrt(k0_x*k0_x+k0_y*k0_y)*nbpw/lmax); for(ii=0;ii<nbpw;ii++) { l0[ii]=ii*lmax/nbpw; lf[ii]=(ii+1.)*lmax/nbpw; } nmt_binning_scheme_flat *bpw=nmt_bins_flat_create(nbpw,l0,lf); for(ii=0;ii<fsk->ny;ii++) { int jj; double ky; int ibin=0; if(2*ii<=fsk->ny) ky=ii*dky; else ky=-(fsk->ny-ii)*dky; for(jj=0;jj<fsk->nx;jj++) { double kx,kmod; if(2*jj<=fsk->nx) kx=jj*dkx; else kx=-(fsk->nx-jj)*dkx; kmod=sqrt(kx*kx+ky*ky); ibin=nmt_bins_flat_search_fast(bpw,kmod,ibin);//(int)(kmod/lmax*nbpw); if((ibin>=0) && (ibin<nbpw)) npixls[ibin]++; } } //Theory prediction <|a|^2>*(2*pi/Lx)*(2*pi/Ly) [<|a|^2>=0.5] double predict=0.5*2*M_PI*2*M_PI/(fsk->lx*fsk->ly*npixls[ibpw0]); //Compute power spectra and compare with prediction fs_alm2cl(fsk,bpw,alms0,alms0,0,0,&(cells[0]),1.,-1.,1.,-1.); fs_alm2cl(fsk,bpw,alms0,alms1,0,1,&(cells[1]),1.,-1.,1.,-1.); fs_alm2cl(fsk,bpw,alms0,alms2,0,2,&(cells[3]),1.,-1.,1.,-1.); fs_alm2cl(fsk,bpw,alms1,alms1,1,1,&(cells[5]),1.,-1.,1.,-1.); fs_alm2cl(fsk,bpw,alms1,alms2,1,2,&(cells[9]),1.,-1.,1.,-1.); fs_alm2cl(fsk,bpw,alms2,alms2,2,2,&(cells[13]),1.,-1.,1.,-1.); for(ii=0;ii<nbpw;ii++) { int jj; double pred=0; if(ii==ibpw0) pred=predict; ASSERT_DBL_NEAR_TOL(pred,cells[0][ii],1E-5); ASSERT_DBL_NEAR_TOL(pred,cells[1][ii],1E-5); ASSERT_DBL_NEAR_TOL(pred,cells[3][ii],1E-5); ASSERT_DBL_NEAR_TOL(pred,cells[5][ii],1E-5); ASSERT_DBL_NEAR_TOL(pred,cells[9][ii],1E-5); ASSERT_DBL_NEAR_TOL(pred,cells[13][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[2][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[4][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[6][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[7][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[8][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[10][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[11][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[12][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[14][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[15][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[16][ii],1E-5); } fs_anafast(fsk,bpw,maps0,maps0,0,0,&(cells[0])); fs_anafast(fsk,bpw,maps0,maps1,0,1,&(cells[1])); fs_anafast(fsk,bpw,maps0,maps2,0,2,&(cells[3])); fs_anafast(fsk,bpw,maps1,maps1,1,1,&(cells[5])); fs_anafast(fsk,bpw,maps1,maps2,1,2,&(cells[9])); fs_anafast(fsk,bpw,maps2,maps2,2,2,&(cells[13])); for(ii=0;ii<nbpw;ii++) { int jj; double pred=0; if(ii==ibpw0) pred=predict; ASSERT_DBL_NEAR_TOL(pred,cells[0][ii],1E-5); ASSERT_DBL_NEAR_TOL(pred,cells[1][ii],1E-5); ASSERT_DBL_NEAR_TOL(pred,cells[3][ii],1E-5); ASSERT_DBL_NEAR_TOL(pred,cells[5][ii],1E-5); ASSERT_DBL_NEAR_TOL(pred,cells[9][ii],1E-5); ASSERT_DBL_NEAR_TOL(pred,cells[13][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[2][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[4][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[6][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[7][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[8][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[10][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[11][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[12][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[14][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[15][ii],1E-5); ASSERT_DBL_NEAR_TOL(0.,cells[16][ii],1E-5); } free(l0); free(lf); free(npixls); nmt_bins_flat_free(bpw); for(ii=0;ii<17;ii++) free(cells[ii]); free(cells); dftw_free(maps0[0]); dftw_free(maps1[0]); dftw_free(maps1[1]); dftw_free(maps2[0]); dftw_free(maps2[1]); dftw_free(alms0[0]); dftw_free(alms1[0]); dftw_free(alms1[1]); dftw_free(alms2[0]); dftw_free(alms2[1]); free(maps0); free(maps1); free(maps2); free(alms0); free(alms1); free(alms2); nmt_flatsky_info_free(fsk); } CTEST(nmt,fsk_fft) { int ii; int nmaps=34; nmt_flatsky_info *fsk=nmt_flatsky_info_alloc(141,311,M_PI/180,M_PI/180); double **maps=my_malloc(2*nmaps*sizeof(double *)); fcomplex **alms=my_malloc(2*nmaps*sizeof(fcomplex *)); for(ii=0;ii<2*nmaps;ii++) { maps[ii]=dftw_malloc(fsk->npix*sizeof(double)); alms[ii]=dftw_malloc(fsk->ny*(fsk->nx/2+1)*sizeof(fcomplex)); } //Direct FFT //Single FFT, spin-0 fs_map2alm(fsk,1,0,maps,alms); //Several FFT, spin-0 fs_map2alm(fsk,nmaps,0,maps,alms); //Single FFT, spin-2 fs_map2alm(fsk,1,2,maps,alms); //Several FFT, spin-2 fs_map2alm(fsk,nmaps,2,maps,alms); //Zero_alm and alter_alm fs_zero_alm(fsk,alms[0]); fs_zero_alm(fsk,alms[1]); nmt_k_function *b=fs_generate_beam_window(10.); fs_alter_alm(fsk,10.,alms[0],alms[1],NULL,0); fs_alter_alm(fsk,10.,alms[0],alms[1],b,0); fs_alter_alm(fsk,10.,alms[0],alms[1],b,1); nmt_k_function_free(b); //Inverse FFT //Single FFT, spin-0 fs_alm2map(fsk,1,0,maps,alms); //Several FFT, spin-0 fs_alm2map(fsk,nmaps,0,maps,alms); //Single FFT, spin-2 fs_alm2map(fsk,1,2,maps,alms); //Several FFT, spin-2 fs_alm2map(fsk,nmaps,2,maps,alms); for(ii=0;ii<2*nmaps;ii++) dftw_free(maps[ii]); free(maps); //Particular example //Spin-0. map = 2*pi/A * Re[exp(i*k0*x)] -> // a(k) = (delta_{k,k0}+delta_{k,-k0})/2 int i0_x=2,i0_y=3; maps=test_make_map_analytic_flat(fsk,0,i0_x,i0_y); fs_map2alm(fsk,1,0,maps,alms); for(ii=0;ii<fsk->ny;ii++) { int jj; for(jj=0;jj<=fsk->nx/2;jj++) { double re=creal(alms[0][jj+(fsk->nx/2+1)*ii]); double im=cimag(alms[0][jj+(fsk->nx/2+1)*ii]); if((jj==i0_x) && (ii==i0_y)) { ASSERT_DBL_NEAR_TOL(0.5,re,1E-5); ASSERT_DBL_NEAR_TOL(0.0,im,1E-5); } else { ASSERT_DBL_NEAR_TOL(0.0,re,1E-5); ASSERT_DBL_NEAR_TOL(0.0,im,1E-5); } } } dftw_free(maps[0]); free(maps); //Spin-1. map = 2*pi/A * (-cos(phi_k0),sin(phi_k0)) Im[exp(i*k0*x)] -> // a_E(k) = (delta_{k,k0}+delta_{k,-k0})/2 // a_B(k) = 0 maps=test_make_map_analytic_flat(fsk,1,i0_x,i0_y); fs_map2alm(fsk,1,1,maps,alms); for(ii=0;ii<fsk->ny;ii++) { int jj; for(jj=0;jj<=fsk->nx/2;jj++) { double re0=creal(alms[0][jj+(fsk->nx/2+1)*ii]); double im0=cimag(alms[0][jj+(fsk->nx/2+1)*ii]); double re1=creal(alms[1][jj+(fsk->nx/2+1)*ii]); double im1=cimag(alms[1][jj+(fsk->nx/2+1)*ii]); if((jj==i0_x) && (ii==i0_y)) { ASSERT_DBL_NEAR_TOL(0.5,re0,1E-5); ASSERT_DBL_NEAR_TOL(0.0,im0,1E-5); ASSERT_DBL_NEAR_TOL(0.0,re1,1E-5); ASSERT_DBL_NEAR_TOL(0.0,im1,1E-5); } else { ASSERT_DBL_NEAR_TOL(0.0,re0,1E-5); ASSERT_DBL_NEAR_TOL(0.0,im0,1E-5); ASSERT_DBL_NEAR_TOL(0.0,re1,1E-5); ASSERT_DBL_NEAR_TOL(0.0,im1,1E-5); } } } dftw_free(maps[0]); dftw_free(maps[1]); free(maps); //Spin-2. map = 2*pi/A * (cos(2*phi_k0),-sin(2*phi_k0)) Re[exp(i*k0*x)] -> // a_E(k) = (delta_{k,k0}+delta_{k,-k0})/2 // a_B(k) = 0 maps=test_make_map_analytic_flat(fsk,2,i0_x,i0_y); fs_map2alm(fsk,1,2,maps,alms); for(ii=0;ii<fsk->ny;ii++) { int jj; for(jj=0;jj<=fsk->nx/2;jj++) { double re0=creal(alms[0][jj+(fsk->nx/2+1)*ii]); double im0=cimag(alms[0][jj+(fsk->nx/2+1)*ii]); double re1=creal(alms[1][jj+(fsk->nx/2+1)*ii]); double im1=cimag(alms[1][jj+(fsk->nx/2+1)*ii]); if((jj==i0_x) && (ii==i0_y)) { ASSERT_DBL_NEAR_TOL(0.5,re0,1E-5); ASSERT_DBL_NEAR_TOL(0.0,im0,1E-5); ASSERT_DBL_NEAR_TOL(0.0,re1,1E-5); ASSERT_DBL_NEAR_TOL(0.0,im1,1E-5); } else { ASSERT_DBL_NEAR_TOL(0.0,re0,1E-5); ASSERT_DBL_NEAR_TOL(0.0,im0,1E-5); ASSERT_DBL_NEAR_TOL(0.0,re1,1E-5); ASSERT_DBL_NEAR_TOL(0.0,im1,1E-5); } } } dftw_free(maps[0]); dftw_free(maps[1]); free(maps); for(ii=0;ii<2*nmaps;ii++) dftw_free(alms[ii]); free(alms); nmt_flatsky_info_free(fsk); } CTEST(nmt,fsk_fft_malloc) { set_error_policy(THROW_ON_ERROR); double *dum=dftw_malloc(10); free(dum); try{ dftw_malloc(-1); } catch(1) {} ASSERT_NOT_EQUAL(0,nmt_exception_status); set_error_policy(EXIT_ON_ERROR); } CTEST(nmt,fsk_info) { nmt_flatsky_info *fsk=nmt_flatsky_info_alloc(100,100,M_PI/180,M_PI/180); ASSERT_EQUAL(10000,fsk->npix); ASSERT_EQUAL(pow(M_PI/180,2)/10000,fsk->pixsize); nmt_flatsky_info_free(fsk); } CTEST(nmt,fsk_algb) { int ii; nmt_flatsky_info *fsk=nmt_flatsky_info_alloc(100,100,M_PI/180,M_PI/180); double *mp1=my_malloc(fsk->npix*sizeof(double)); double *mp2=my_malloc(fsk->npix*sizeof(double)); double *mpr=my_malloc(fsk->npix*sizeof(double)); for(ii=0;ii<fsk->npix;ii++) { mp1[ii]=2.; mp2[ii]=0.5; } double d=fs_map_dot(fsk,mp1,mp2); fs_map_product(fsk,mp1,mp2,mpr); fs_map_product(fsk,mp1,mp2,mp2); for(ii=0;ii<fsk->npix;ii++) { ASSERT_DBL_NEAR_TOL(1.,mpr[ii],1E-10); ASSERT_DBL_NEAR_TOL(1.,mp2[ii],1E-10); } ASSERT_DBL_NEAR_TOL(pow(M_PI/180,2),d,1E-5); free(mp1); free(mp2); free(mpr); nmt_flatsky_info_free(fsk); } static double fk(double k) { return 100./(k+100.); } CTEST(nmt,fsk_func) { int l; long lmax=2000; double *karr=my_malloc((lmax+1)*sizeof(double)); double *farr=my_malloc((lmax+1)*sizeof(double)); for(l=0;l<=lmax;l++) { karr[l]=l; farr[l]=fk(karr[l]); } nmt_k_function *kf=nmt_k_function_alloc(lmax+1,karr,farr,1.,0.,0); for(l=0;l<lmax;l++) { double k=l+0.5; double f_int=nmt_k_function_eval(kf,k,NULL); double f_exc=fk(k); ASSERT_DBL_NEAR_TOL(1.,f_int/f_exc,1E-3); } nmt_k_function_free(kf); //Beams double sigma=1.*M_PI/180; //Beam sigma in radians double fwhm_amin=sigma*180*60/M_PI*2.35482; kf=fs_generate_beam_window(fwhm_amin); for(l=0;l<100;l++) { double ll=(l+0.5)*4.8/(100.*sigma); double b=nmt_k_function_eval(kf,ll,NULL); double bt=exp(-0.5*ll*ll*sigma*sigma); ASSERT_DBL_NEAR_TOL(1.,b/bt,1E-3); } nmt_k_function_free(kf); free(karr); free(farr); }