content
stringlengths
10
4.9M
// NewWebhook creates the state needed for a webhook func NewWebhook(key []byte, events chan interface{}) Webhook { return Webhook{ key, events, } }
/* * Preallocate log files beyond the specified log endpoint. * * XXX this is currently extremely conservative, since it forces only one * future log segment to exist, and even that only if we are 75% done with * the current one. This is only appropriate for very low-WAL-volume systems. * High-volume systems will be OK once they've built up a sufficient set of * recycled log segments, but the startup transient is likely to include * a lot of segment creations by foreground processes, which is not so good. */ static void PreallocXlogFiles(XLogRecPtr endptr) { XLogSegNo _logSegNo; int lf; bool use_existent; uint64 offset; if (polar_in_replica_mode()) return; XLByteToPrevSeg(endptr, _logSegNo, wal_segment_size); offset = XLogSegmentOffset(endptr - 1, wal_segment_size); if (offset >= (uint32) (0.75 * wal_segment_size)) { _logSegNo++; use_existent = true; lf = XLogFileInit(_logSegNo, &use_existent, true); polar_close(lf); if (!use_existent) CheckpointStats.ckpt_segs_added++; } }
/** * UGENE - Integrated Bioinformatics Tools. * Copyright (C) 2008-2012 UniPro <<EMAIL>> * http://ugene.unipro.ru * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include "CAP3Worker.h" #include "TaskLocalStorage.h" #include "CAP3Support.h" #include <U2Lang/IntegralBusModel.h> #include <U2Lang/WorkflowEnv.h> #include <U2Lang/ActorPrototypeRegistry.h> #include <U2Lang/BaseTypes.h> #include <U2Lang/BaseSlots.h> #include <U2Lang/BasePorts.h> #include <U2Lang/BaseActorCategories.h> #include <U2Designer/DelegateEditors.h> #include <U2Lang/CoreLibConstants.h> #include <U2Core/AppContext.h> #include <U2Core/AppSettings.h> #include <U2Core/Log.h> #include <U2Core/ExternalToolRegistry.h> #include <U2Core/UserApplicationsSettings.h> #include <U2Core/FailTask.h> namespace U2 { namespace LocalWorkflow { ////////////////////////////////////////////////////////////////////////// //CAP3WorkerFactory ////////////////////////////////////////////////////////////////////////// const QString CAP3WorkerFactory::ACTOR_ID("CAP3"); const QString INPUT_FILE_PATH("input-path"); void CAP3WorkerFactory::init() { QList<PortDescriptor*> p; QList<Attribute*> a; Descriptor oud(BasePorts::OUT_MSA_PORT_ID(), CAP3Worker::tr("CAP3 result multiple sequence alignment"), CAP3Worker::tr("The result of the CAP3 contig assembly.")); QMap<Descriptor, DataTypePtr> outM; outM[BaseSlots::MULTIPLE_ALIGNMENT_SLOT()] = BaseTypes::MULTIPLE_ALIGNMENT_TYPE(); p << new PortDescriptor(oud, DataTypePtr(new MapDataType("ace.out.msa", outM)), false /*input*/, true /*multi*/); Descriptor input(INPUT_FILE_PATH, CAP3Worker::tr("Input"), CAP3Worker::tr("Path to input long DNA reads.")); a << new Attribute(input, BaseTypes::STRING_TYPE(), true, QVariant("Default")); QString cap3Description = AppContext::getExternalToolRegistry()->getByName(CAP3_TOOL_NAME)->getDescription(); Descriptor desc(ACTOR_ID, CAP3Worker::tr("Align with CAP3"), cap3Description); ActorPrototype* proto = new IntegralBusActorPrototype(desc, p, a); QMap<QString, PropertyDelegate*> delegates; delegates[INPUT_FILE_PATH] = new URLDelegate("", "inputPath", false); proto->setEditor(new DelegateEditor(delegates)); proto->setPrompter(new CAP3Prompter()); proto->setIconPath(":external_tool_support/images/clustalx.png"); WorkflowEnv::getProtoRegistry()->registerProto(BaseActorCategories::CATEGORY_ASSEMBLY(), proto); DomainFactory* localDomain = WorkflowEnv::getDomainRegistry()->getById(LocalDomainFactory::ID); localDomain->registerEntry(new CAP3WorkerFactory()); } ////////////////////////////////////////////////////////////////////////// // CAP3Prompter ////////////////////////////////////////////////////////////////////////// CAP3Prompter::CAP3Prompter(Actor* p) : PrompterBase<CAP3Prompter>(p) { } QString CAP3Prompter::composeRichDoc() { QString inputPath = target->getParameter(INPUT_FILE_PATH)->getAttributeValueWithoutScript<QString>(); QString doc = tr("Aligns long DNA reads from <u>%1</u> with \"<u>CAP3</u>\".") .arg(inputPath); return doc; } ////////////////////////////////////////////////////////////////////////// // CAP3Worker ////////////////////////////////////////////////////////////////////////// CAP3Worker::CAP3Worker(Actor* a) : BaseWorker(a), input(NULL), output(NULL) { } void CAP3Worker::init() { input = ports.value(BasePorts::IN_MSA_PORT_ID()); output = ports.value(BasePorts::OUT_MSA_PORT_ID()); } Task* CAP3Worker::tick() { if (input->hasMessage()) { Message inputMessage = getMessageAndSetupScriptValues(input); if (inputMessage.isEmpty()) { output->transit(); return NULL; } cfg.inputFiles.append( actor->getParameter(INPUT_FILE_PATH)->getAttributeValue<QString>(context) ); Task* t = new CAP3SupportTask(cfg); connect(t, SIGNAL(si_stateChanged()), SLOT(sl_taskFinished())); return t; } else if (input->isEnded()) { setDone(); output->setEnded(); } return NULL; } void CAP3Worker::sl_taskFinished() { CAP3SupportTask* t = qobject_cast<CAP3SupportTask*>(sender()); if (t->getState() != Task::State_Finished) return; QVariant v = qVariantFromValue<MAlignment>(t->getResultAlignment()->getMAlignment()); output->put(Message(BaseTypes::MULTIPLE_ALIGNMENT_TYPE(), v)); algoLog.info(tr("Aligned %1 with CAP3").arg(t->getResultAlignment()->getMAlignment().getName())); } void CAP3Worker::cleanup() { } } //namespace LocalWorkflow } //namespace U2
# Lint as: python3 # # Copyright 2020 The XLS Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Delay model visualization tool. Dumps a graph (as an image) of each XLS op delay model in a specified directory. Usage: delay_model_visualizer --output_dir=/tmp/images \ xls/delay_model/models/unit.textproto """ import os.path from typing import Text, Optional from absl import app from absl import flags from matplotlib import pyplot from mpl_toolkits import mplot3d # pylint: disable=unused-import import numpy as np from google.protobuf import text_format from xls.delay_model import delay_model from xls.delay_model import delay_model_pb2 flags.DEFINE_string('output_dir', None, 'The directory to write image files into.') flags.mark_flag_as_required('output_dir') FLAGS = flags.FLAGS def maybe_plot_op_model(op_model: delay_model.OpModel, specialization_kind: Optional[Text] = None): """Plots the given delay model and writes the figure to a file. Only plots one-factor (2D plot) and two-factor (3D plot) regression and bounding box models. Args: op_model: OpModel to plot. specialization_kind: Optional kind of specialization. Used in plot title and file name. """ if (not isinstance(op_model, delay_model.RegressionEstimator) and not isinstance(op_model, delay_model.BoundingBoxEstimator)): return def delay_f(*args): try: return op_model.raw_delay(args) except delay_model.Error: return 0 title = op_model.op if specialization_kind: title += ' ' + specialization_kind if len(op_model.delay_factors) == 1: fig, ax = pyplot.subplots() # Plot the real data points as circles. x_actual, y_actual = zip(*op_model.raw_data_points) ax.plot(x_actual, y_actual, 'o') # Plot a curve for the delay model. x_range = np.linspace(1, max(x_actual), num=100) y_est = np.vectorize(delay_f)(x_range) ax.plot(x_range, y_est) pyplot.title(title) ax.set_xlabel( delay_model.delay_factor_description(op_model.delay_factors[0])) ax.set_ylabel('delay (ps)') pyplot.ylim(bottom=0) pyplot.xlim(left=1) if len(op_model.delay_factors) == 2: x_actual, y_actual, z_actual = list(zip(*op_model.raw_data_points)) fig = pyplot.figure() ax = fig.add_subplot(1, 1, 1, projection='3d') # Plot the surface of the delay estimate. x_range, y_range = np.meshgrid( np.arange(1, max(x_actual), 1), np.arange(1, max(y_actual), 1)) z_est = np.vectorize(delay_f)(x_range, y_range) surf = ax.plot_surface( x_range, y_range, z_est, rstride=10, cstride=1, cmap=pyplot.get_cmap('coolwarm'), linewidth=0, antialiased=False, alpha=0.25) ax.set_zlim(min(0, min(z_actual)), max(z_actual)) fig.colorbar(surf, shrink=0.5, aspect=10) # Plot the actual data points as circles with a line extending from the # model estimate. for x_i, y_i, z_i in op_model.raw_data_points: z_est_i = delay_f(x_i, y_i) ax.scatter(x_i, y_i, z_i, marker='o', c='black') ax.plot([x_i, x_i], [y_i, y_i], [z_est_i, z_i], color='black', marker='_') pyplot.title(title) ax.set_xlabel( delay_model.delay_factor_description(op_model.delay_factors[0])) ax.set_ylabel( delay_model.delay_factor_description(op_model.delay_factors[1])) ax.set_zlabel('delay (ps)') if specialization_kind: filename = '%s_%s.png' % (op_model.op, specialization_kind) else: filename = '%s.png' % op_model.op fig.savefig(os.path.join(FLAGS.output_dir, filename)) pyplot.close(fig) def main(argv): if len(argv) > 2: raise app.UsageError('Too many command-line arguments.') with open(argv[1], 'rb') as f: contents = f.read() dm = delay_model.DelayModel( text_format.Parse(contents, delay_model_pb2.DelayModel())) for op in dm.ops(): op_model = dm.op_model(op) maybe_plot_op_model(op_model.estimator) for specialization_kind, estimator in op_model.specializations.items(): maybe_plot_op_model( estimator, delay_model_pb2.SpecializationKind.Name(specialization_kind)) if __name__ == '__main__': app.run(main)
//led toggle macros #define LED1_ON SET_BIT(PORTB, 3) #define LED1_OFF CLEAR_BIT(PORTB, 3) #define LED0_ON SET_BIT(PORTB, 2) #define LED0_OFF CLEAR_BIT(PORTB, 2) #define LEDBOTH_OFF CLEAR_BIT(PORTB, 2); CLEAR_BIT(PORTB, 3) // set backlight (0 = max, 1023 = min). Taken from adc_pwm_backlight.c from topic 11 void set_backlight(int duty_cycle); // setup potentiometer registers and etc void setup_pot(); // setup timer0 with 1024 prescale void setup_timer0(); // setup timer4 for use with pwm of backlight. Taken from adc_pwm_backlight.c from topic 11 void setup_pwm(); void setup_led(); // store 4 consecutive pin values in array void input_history(uint8_t mask); bool input_check(int bit, uint8_t mask); // read potentiometer 0 or 1 int read_pot(int id);
<gh_stars>0 import {Component, OnInit} from '@angular/core'; import {ActivatedRoute} from '@angular/router'; import {ShowDetailsParams} from '../../app-routing.module'; import {TvMazeService} from '../tv-maze.service'; import {ShowDetails} from '../tv.models'; @Component({ selector: 'app-show-details', templateUrl: './show-details.component.html', styleUrls: ['./show-details.component.css'] }) export class ShowDetailsComponent implements OnInit { show: ShowDetails; episodesLength = 2; constructor(private route: ActivatedRoute) { // const {id} = this.route.snapshot.params as ShowDetailsParams; // this.route.params.subscribe(p => console.log(p)); // this.tv.getShow(id).subscribe(show => this.show = show); // this.route.data.subscribe(({show}) => this.show = show); this.show = this.route.snapshot.data.show; this.episodesLength = this.show._embedded.episodes.some(({number}) => number > 99) ? 3 : 2; } ngOnInit() { } }
/* * VmbusChannelSetEvent - Trigger an event notification on the specified * channel. */ static void VmbusChannelSetEvent(struct vmbus_channel *Channel) { struct hv_monitor_page *monitorPage; DPRINT_ENTER(VMBUS); if (Channel->OfferMsg.MonitorAllocated) { set_bit(Channel->OfferMsg.ChildRelId & 31, (unsigned long *) gVmbusConnection.SendInterruptPage + (Channel->OfferMsg.ChildRelId >> 5)); monitorPage = gVmbusConnection.MonitorPages; monitorPage++; set_bit(Channel->MonitorBit, (unsigned long *)&monitorPage->TriggerGroup [Channel->MonitorGroup].Pending); } else { VmbusSetEvent(Channel->OfferMsg.ChildRelId); } DPRINT_EXIT(VMBUS); }
package br.com.sprintters.prettystyle.dao; import java.sql.ResultSet; import java.util.ArrayList; import java.sql.Connection; import java.sql.SQLException; import br.com.sprintters.prettystyle.model.Item; import br.com.sprintters.prettystyle.model.Mark; import br.com.sprintters.prettystyle.model.Product; import br.com.sprintters.prettystyle.model.ProductPhoto; import br.com.sprintters.prettystyle.model.virtual.Cart; import java.sql.PreparedStatement; public class ItemDAO { public int insert(Item to) throws Exception { int id = 0; String sqlInsert = "INSERT INTO item (quantity, id_product, id_client, created_at) values (?, ?, ?, now())"; try (Connection conn = ConnectionFactory.createConnection(); PreparedStatement stm = conn.prepareStatement(sqlInsert)) { stm.setInt(1, to.getQuantity()); stm.setInt(2, to.getIdProduct()); stm.setInt(3, to.getIdClient()); stm.execute(); try (ResultSet rs = stm.executeQuery("SELECT LAST_INSERT_ID()")) { if (rs.next()) { id = rs.getInt(1); to.setId(id); } conn.close(); } catch (SQLException ex) { throw new Exception(ex.getMessage()); } } catch (SQLException e) { throw new Exception(e.getMessage()); } return id; } public void update(Item to) throws Exception { String sqlUpdate = "UPDATE item SET quantity = ?, id_product = ?, updated_at = NOW() WHERE id = ?"; try (Connection conn = ConnectionFactory.createConnection(); PreparedStatement stm = conn.prepareStatement(sqlUpdate)) { stm.setInt(1, to.getQuantity()); stm.setInt(2, to.getIdProduct()); stm.setInt(3, to.getId()); stm.execute(); conn.close(); } catch (SQLException e) { throw new Exception(e.getMessage()); } } public void delete(Item to) throws Exception { String sqlDelete = "UPDATE item SET deleted_at = NOW() WHERE id = ?"; try (Connection conn = ConnectionFactory.createConnection(); PreparedStatement stm = conn.prepareStatement(sqlDelete)) { stm.setInt(1, to.getId()); stm.execute(); conn.close(); } catch (SQLException e) { throw new Exception(e.getMessage()); } } public Item find(int id) throws Exception { Item to = new Item(); String sqlSelect = "SELECT * FROM item WHERE id = ?"; try (Connection conn = ConnectionFactory.createConnection(); PreparedStatement stm = conn.prepareStatement(sqlSelect)) { stm.setInt(1, id); try (ResultSet rs = stm.executeQuery();) { if (rs.next()) { to.setId(rs.getInt("id")); to.setQuantity(rs.getInt("quantity")); to.setIdProduct(rs.getInt("id_product")); to.setCreatedAt(rs.getTimestamp("created_at")); to.setUpdatedAt(rs.getTimestamp("updated_at")); to.setDeletedAt(rs.getTimestamp("deleted_at")); } conn.close(); } catch (SQLException ex) { throw new Exception(ex.getMessage()); } } catch (SQLException e) { throw new Exception(e.getMessage()); } return to; } public ArrayList<Item> list() throws Exception { ArrayList<Item> items = new ArrayList<Item>(); String sqlSelect = "SELECT * FROM item WHERE deleted_at IS NULL"; try (Connection conn = ConnectionFactory.createConnection(); PreparedStatement stm = conn.prepareStatement(sqlSelect)) { try (ResultSet rs = stm.executeQuery()) { while (rs.next()) { Item to = new Item( rs.getInt("id"), rs.getInt("quantity"), rs.getInt("id_product"), rs.getInt("id_request"), rs.getTimestamp("created_at"), rs.getTimestamp("updated_at"), rs.getTimestamp("deleted_at") ); items.add(to); } conn.close(); } catch (SQLException ex) { throw new Exception(ex.getMessage()); } } catch (SQLException e) { throw new Exception(e.getMessage()); } return items; } public Cart listItemsInCartByIdClient(int idClient) throws Exception { Cart cart = new Cart(); String sqlSelect = "SELECT p.id, p.name, p.description, p.price, p.created_at, p.id_mark, m.name as 'mark', pp.id as 'id_photo'\r\n" + ", pp.url, i.id as 'id_item', i.quantity \r\n" + "FROM product p \r\n" + "LEFT JOIN product_photo pp ON p.id = pp.id_product\r\n" + "INNER JOIN mark m ON p.id_mark = m.id \r\n" + "INNER JOIN item i ON p.id = i.id_product \r\n" + "WHERE i.id_client = ? and i.paid = 0"; try (Connection conn = ConnectionFactory.createConnection(); PreparedStatement stm = conn.prepareStatement(sqlSelect)) { stm.setInt(1, idClient); try (ResultSet rs = stm.executeQuery()) { int prevIdProduct = 0; Product product = null; ArrayList<ProductPhoto> photos = null; while (rs.next()) { int idProduct = rs.getInt("id"); int idProductPhoto = rs.getInt("id_photo"); if (idProduct != prevIdProduct) { Item item = new Item(); item.setId(rs.getInt("id_item")); item.setQuantity(rs.getInt("quantity")); product = new Product(); product.setId(idProduct); product.setName(rs.getString("name")); product.setDescription(rs.getString("description")); product.setPrice(rs.getDouble("price")); //product.setCreatedAt(rs.getDate("created_at")); product.setIdMark(rs.getInt("id_mark")); Mark mark = new Mark(); mark.setId(rs.getInt("id_mark")); mark.setName(rs.getString("mark")); product.setMark(mark); item.setProduct(product); cart.getItems().add(item); prevIdProduct = idProduct; photos = new ArrayList<ProductPhoto>(); ProductPhoto photo = new ProductPhoto(); photo.setId(idProductPhoto); photo.setUrl(rs.getString("url")); photos.add(photo); } else { ProductPhoto photo = new ProductPhoto(); photo.setId(idProductPhoto); photo.setUrl(rs.getString("url")); photos.add(photo); } product.setPhotos(photos); } conn.close(); } catch (SQLException ex) { throw new Exception(ex.getMessage()); } } catch (SQLException e) { throw new Exception(e.getMessage()); } return cart; } public Item findByIdProductAndIdClientNotPaid(int idProduct, int idClient) throws Exception { Item to = new Item(); String sqlSelect = "SELECT * FROM item WHERE id_product = ? AND id_client = ? AND paid = 0"; try (Connection conn = ConnectionFactory.createConnection(); PreparedStatement stm = conn.prepareStatement(sqlSelect)) { stm.setInt(1, idProduct); stm.setInt(2, idClient); try (ResultSet rs = stm.executeQuery()) { if (rs.next()) { to.setId(rs.getInt("id")); to.setQuantity(rs.getInt("quantity")); to.setIdProduct(rs.getInt("id_product")); to.setCreatedAt(rs.getTimestamp("created_at")); to.setUpdatedAt(rs.getTimestamp("updated_at")); to.setDeletedAt(rs.getTimestamp("deleted_at")); } conn.close(); } catch (SQLException ex) { throw new Exception(ex.getMessage()); } } catch (SQLException e) { throw new Exception(e.getMessage()); } return to; } public void updateQuantityById(Item to) throws Exception { String sqlUpdate = "UPDATE item SET quantity = ?, updated_at = NOW() WHERE id = ?"; try (Connection conn = ConnectionFactory.createConnection(); PreparedStatement stm = conn.prepareStatement(sqlUpdate)) { stm.setInt(1, to.getQuantity()); stm.setInt(2, to.getId()); stm.execute(); conn.close(); } catch (SQLException e) { throw new Exception(e.getMessage()); } } public void setItemPaid(Item to) throws Exception { String sqlDelete = "UPDATE item SET paid = 1 WHERE id = ? and id_client = ?"; try (Connection conn = ConnectionFactory.createConnection(); PreparedStatement stm = conn.prepareStatement(sqlDelete)) { stm.setInt(1, to.getId()); stm.setInt(2, to.getIdClient()); stm.execute(); conn.close(); } catch (SQLException e) { throw new Exception(e.getMessage()); } } public void updateQuantityInItemFromCart(int itemId, int productId, int newQuantity) throws Exception { String sqlDelete = "UPDATE item SET quantity = ? WHERE id = ? AND id_product = ?"; try (Connection conn = ConnectionFactory.createConnection(); PreparedStatement stm = conn.prepareStatement(sqlDelete)) { stm.setInt(1, newQuantity); stm.setInt(2, itemId); stm.setInt(3, productId); stm.execute(); conn.close(); } catch (SQLException e) { throw new Exception(e.getMessage()); } } }
<filename>runtimeconfig/unit_tests/test_client.py<gh_stars>0 # Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest class TestClient(unittest.TestCase): def _getTargetClass(self): from google.cloud.runtimeconfig.client import Client return Client def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def test_config(self): PROJECT = 'PROJECT' CONFIG_NAME = 'config_name' creds = _Credentials() client_obj = self._makeOne(project=PROJECT, credentials=creds) new_config = client_obj.config(CONFIG_NAME) self.assertEqual(new_config.name, CONFIG_NAME) self.assertIs(new_config._client, client_obj) self.assertEqual(new_config.project, PROJECT) self.assertEqual(new_config.full_name, 'projects/%s/configs/%s' % (PROJECT, CONFIG_NAME)) self.assertFalse(new_config.description) class _Credentials(object): _scopes = None @staticmethod def create_scoped_required(): return True def create_scoped(self, scope): self._scopes = scope return self
/* Multisteps: a program to get optime Runge-Kutta and multi-steps methods. Copyright 2011-2019, <NAME>. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY <NAME> ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <NAME> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * \file rk_4_4.c * \brief Source file to optimize Runge-Kutta 5 steps 4th order methods. * \author <NAME>. * \copyright Copyright 2011-2019. */ #define _GNU_SOURCE #include <string.h> #include <math.h> #include <libxml/parser.h> #include <glib.h> #include <libintl.h> #include <gsl/gsl_rng.h> #include "config.h" #include "utils.h" #include "optimize.h" #include "rk.h" #include "rk_4_4.h" #define DEBUG_RK_4_4 0 ///< macro to debug. /** * Function to obtain the coefficients of a 4 steps 4th order Runge-Kutta * method. */ int rk_tb_4_4 (Optimize * optimize) ///< Optimize struct. { long double *tb, *r; #if DEBUG_RK_4_4 fprintf (stderr, "rk_tb_4_4: start\n"); #endif tb = optimize->coefficient; r = optimize->random_data; t4 (tb) = 1.L; t1 (tb) = r[0]; t2 (tb) = r[1]; t3 (tb) = 1.L; b43 (tb) = (0.25L - 1.L / 3.L * t1 (tb) - (1.L / 3.L - 0.5L * t1 (tb)) * t2 (tb)) / (t3 (tb) * (t3 (tb) - t2 (tb)) * (t3 (tb) - t1 (tb))); if (isnan (b43 (tb))) return 0; b42 (tb) = (1.L / 3.L - 0.5L * t1 (tb) - b43 (tb) * t3 (tb) * (t3 (tb) - t1 (tb))) / (t2 (tb) * (t2 (tb) - t1 (tb))); if (isnan (b42 (tb))) return 0; b41 (tb) = (0.5L - b42 (tb) * t2 (tb) - b43 (tb) * t3 (tb)) / t1 (tb); if (isnan (b41 (tb))) return 0; b32 (tb) = (1.L / 12.L - 1.L / 6.L * t1 (tb)) / (b43 (tb) * t2 (tb) * (t2 (tb) - t1 (tb))); if (isnan (b32 (tb))) return 0; b31 (tb) = ((0.125L - 1.L / 6.L * t2 (tb)) / (b43 (tb) * (t3 (tb) - t2 (tb))) - b32 (tb) * t2 (tb)) / t1 (tb); if (isnan (b31 (tb))) return 0; b21 (tb) = 1.L / 24.L / (t1 (tb) * b43 (tb) * b32 (tb)); if (isnan (b21 (tb))) return 0; rk_b_4 (tb); #if DEBUG_RK_4_4 fprintf (stderr, "rk_tb_4_4: end\n"); #endif return 1; } /** * Function to obtain the coefficients of a 4 steps 4th order, 5th order in * equations depending only in time, Runge-Kutta method. */ int rk_tb_4_4t (Optimize * optimize) ///< Optimize struct. { long double *tb, *r; #if DEBUG_RK_4_4 fprintf (stderr, "rk_tb_4_4t: start\n"); #endif tb = optimize->coefficient; r = optimize->random_data; t4 (tb) = 1.L; t1 (tb) = r[0]; t2 (tb) = 0.5L * (t1 (tb) - 0.6L) / (t1 (tb) - 0.5L); t3 (tb) = 1.L; b43 (tb) = (0.25L - 1.L / 3.L * t1 (tb) - (1.L / 3.L - 0.5L * t1 (tb)) * t2 (tb)) / (t3 (tb) * (t3 (tb) - t2 (tb)) * (t3 (tb) - t1 (tb))); if (isnan (b43 (tb))) return 0; b42 (tb) = (1.L / 3.L - 0.5L * t1 (tb) - b43 (tb) * t3 (tb) * (t3 (tb) - t1 (tb))) / (t2 (tb) * (t2 (tb) - t1 (tb))); if (isnan (b42 (tb))) return 0; b41 (tb) = (0.5L - b42 (tb) * t2 (tb) - b43 (tb) * t3 (tb)) / t1 (tb); if (isnan (b41 (tb))) return 0; b32 (tb) = (1.L / 12.L - 1.L / 6.L * t1 (tb)) / (b43 (tb) * t2 (tb) * (t2 (tb) - t1 (tb))); if (isnan (b32 (tb))) return 0; b31 (tb) = ((0.125L - 1.L / 6.L * t2 (tb)) / (b43 (tb) * (t3 (tb) - t2 (tb))) - b32 (tb) * t2 (tb)) / t1 (tb); if (isnan (b31 (tb))) return 0; b21 (tb) = 1.L / 24.L / (t1 (tb) * b43 (tb) * b32 (tb)); if (isnan (b21 (tb))) return 0; rk_b_4 (tb); #if DEBUG_RK_4_4 fprintf (stderr, "rk_tb_4_4t: end\n"); #endif return 1; } /** * Function to calculate the objective function of a 4 steps 4th order * Runge-Kutta method. * * \return objective function value. */ long double rk_objective_tb_4_4 (RK * rk) ///< RK struct. { long double *tb; long double o; #if DEBUG_RK_4_4 fprintf (stderr, "rk_objective_tb_4_4: start\n"); #endif tb = rk->tb->coefficient; o = fminl (0.L, b20 (tb)); if (b21 (tb) < 0.L) o += b21 (tb); if (b30 (tb) < 0.L) o += b30 (tb); if (b31 (tb) < 0.L) o += b31 (tb); if (b32 (tb) < 0.L) o += b32 (tb); if (b40 (tb) < 0.L) o += b40 (tb); if (b41 (tb) < 0.L) o += b41 (tb); if (b42 (tb) < 0.L) o += b42 (tb); if (b43 (tb) < 0.L) o += b43 (tb); if (o < 0.L) { o = 40.L - o; goto end; } o = 30.L + fmaxl (1.L, fmaxl (t1 (tb), fmaxl (t2 (tb), t3 (tb)))); if (rk->strong) { rk_bucle_ac (rk); o = fminl (o, *rk->ac0->optimal); } end: #if DEBUG_RK_4_4 fprintf (stderr, "rk_objective_tb_4_4: optimal=%Lg\n", o); fprintf (stderr, "rk_objective_tb_4_4: end\n"); #endif return o; } /** * Function to calculate the objective function of a 4 steps 4th order, 5th * order in equations depending only in time, Runge-Kutta method. * * \return objective function value. */ long double rk_objective_tb_4_4t (RK * rk) ///< RK struct. { long double *tb; long double o; #if DEBUG_RK_4_4 fprintf (stderr, "rk_objective_tb_4_4: start\n"); #endif tb = rk->tb->coefficient; o = fminl (0.L, b20 (tb)); if (b21 (tb) < 0.L) o += b21 (tb); if (b30 (tb) < 0.L) o += b30 (tb); if (b31 (tb) < 0.L) o += b31 (tb); if (b32 (tb) < 0.L) o += b32 (tb); if (b40 (tb) < 0.L) o += b40 (tb); if (b41 (tb) < 0.L) o += b41 (tb); if (b42 (tb) < 0.L) o += b42 (tb); if (b43 (tb) < 0.L) o += b43 (tb); if (o < 0.L) { o = 40.L - o; goto end; } o = 30.L + fmaxl (1.L, fmaxl (t1 (tb), fmaxl (t2 (tb), t3 (tb)))); if (rk->strong) { rk_bucle_ac (rk); o = fminl (o, *rk->ac0->optimal); } end: #if DEBUG_RK_4_4 fprintf (stderr, "rk_objective_tb_4_4: optimal=%Lg\n", o); fprintf (stderr, "rk_objective_tb_4_4: end\n"); #endif return o; }
import java.util.*; public class Main { public static void main(String [] args) { Scanner sc = new Scanner(System.in); int n = sc.nextInt(); Node [] G = new Node[n]; for(int i=0;i<n;i++){ int id = sc.nextInt(); int k = sc.nextInt(); int child[] = new int[k]; if(k!=0) for (int j = 0; j < k; j++) child[j] = sc.nextInt(); Node node = new Node(); node.element(id, -1, child, "leaf", 0); G[id] = node; } for(int i=0;i<n;i++) for(int j:G[i].child) G[j].parent = G[i].id; int root =0; for(Node g:G){ if(g.child.length==0&&g.parent!=-1) g.type="leaf"; else if(g.child.length!=0&&g.parent!=-1)g.type="internal node"; else{ g.type="root"; root = g.id; } } int Check [] = new int[n]; Arrays.fill(Check,0); BFS(G,root,Check,0); for(Node node :G){ StringBuilder result = new StringBuilder(); result.append("node "+node.id+": parent = "+node.parent+", depth = "+node.depth+", "+node.type+", ["); if(node.child.length!=0) { result.append(node.child[0]); for (int i = 1; i < node.child.length; i++) result.append(", " + node.child[i]); } result.append("]"); System.out.println(result); } } public static void BFS(Node [] G,int id,int [] C,int depth){ G[id].depth=depth; C[id] = 1; if(G[id].child.length!=0) for(int i:G[id].child) if(C[i]==0) BFS(G,i,C,depth+1); } public static class Node{ int id; int parent; int [] child; String type; int depth; public void element(int id,int parent,int []child,String type,int depth){ this.id = id; this.parent = parent; this.child = child; this.type = type; this.depth = depth; } } }
import os import time from keylime import keylime_logging from keylime.da.record import BaseRecordManagement, base_build_key_list # setup logging logger = keylime_logging.init_logging("durable_attestation_persistent_store") # ###################################################### # Durable Attestation record manager with "plain file" backend # ###################################################### class RecordManagement(BaseRecordManagement): def __init__(self, service): BaseRecordManagement.__init__(self, service) self.rcd_enc = "base64" self.file_path = self.ps_url.path self.file_prefix = self.ps_url.query.replace("prefix=", "") self.line_sep = b"\n----\n" self.ts_sep = b"--" os.makedirs(self.file_path, exist_ok=True) def agent_list_retrieval(self, record_prefix="auto", service="auto"): if record_prefix == "auto": record_prefix = self.file_prefix agent_list = [] record_prefix = f"{record_prefix}_{self.get_record_type(service)}" logger.debug( "Extracting the UUIDs of all agents with entries with prefix %s from filesystem persistent store", record_prefix, ) for entry in next(os.walk(self.file_path), (None, None, []))[2]: if record_prefix in entry: agent_uuid = entry.replace(f"{record_prefix}_", "").replace(f".{self.rcd_fmt}", "") if agent_uuid not in agent_list: agent_list.append(agent_uuid) return agent_list def _bulk_record_retrieval(self, record_identifier, start_date=0, end_date="auto"): logger.debug( "Extracting all records for record_identifier %s from filesystem persistent store", record_identifier ) if f"{end_date}" == "auto": end_date = self.end_of_times record_list = [] with open(record_identifier, "rb") as fp: if self.only_last_record_wanted(start_date, end_date): start_date = 0 # A simple and unoptimized way to get penultimate line of the # file (given the last line is just a separator) try: fp.seek(-2, os.SEEK_END) while fp.read(1) != b"\n": fp.seek(-2, os.SEEK_CUR) fp.seek(-2, os.SEEK_CUR) while fp.read(1) != b"\n": fp.seek(-2, os.SEEK_CUR) except OSError: fp.seek(0) for _line in fp: if b"\n" + _line != self.line_sep: internal_timestamp, encoded_record_object = _line.split(self.ts_sep) decoded_record_object = self.record_deserialize(encoded_record_object) internal_timestamp = int(internal_timestamp) if start_date <= internal_timestamp <= end_date: self.record_signature_check(decoded_record_object, record_identifier) record_list.append(decoded_record_object) return record_list def build_key_list(self, agent_identifier, service="auto"): registration_record_identifier = ( f"{self.file_path}/{self.file_prefix}_{self.get_record_type(service)}_{agent_identifier}.{self.rcd_fmt}" ) registration_record_list = self._bulk_record_retrieval(registration_record_identifier) return base_build_key_list(registration_record_list) def record_read(self, agent_identifier, start_date, end_date, service="auto"): attestation_record_identifier = ( f"{self.file_path}/{self.file_prefix}_{self.get_record_type(service)}_{agent_identifier}.{self.rcd_fmt}" ) attestation_record_list = self._bulk_record_retrieval(attestation_record_identifier, start_date, end_date) self.base_record_read(attestation_record_list) return attestation_record_list def record_signature_check(self, record_object, record_identifier): contents = self.base_record_signature_check(record_object, record_identifier) self.base_record_timestamp_check(record_object, record_identifier, contents) def record_signature_create( self, record_object, agent_data, attestation_data, service="auto", signed_attributes="auto" ): contents = self.base_record_signature_create( record_object, agent_data, attestation_data, service, signed_attributes ) self.base_record_timestamp_create(record_object, agent_data, contents) def record_create( self, agent_data, attestation_data, runtime_policy_data=None, service="auto", signed_attributes="auto" ): record_object = {} self.record_signature_create(record_object, agent_data, attestation_data, service, signed_attributes) logger.debug( "Recording new %s entry for agent %s on filesystem persistent store", self.get_record_type(service), agent_data["agent_id"], ) with open( f'{self.file_path}/{self.file_prefix}_{self.get_record_type(service)}_{agent_data["agent_id"]}.{self.rcd_fmt}', "ab", ) as fp: ts = str(int(time.time())).encode() fp.write( ts + self.ts_sep + self.base_record_create(record_object, agent_data, attestation_data, runtime_policy_data) ) fp.write(self.line_sep)
. A total of 128 adrenalectomies were made for catechol-producing tumor (n = 69), mineralocorticism (n = 27), primary and metastatic adrenal cancer (n = 20), other tumors (n = 12). A stable hypotensive result after adrenalectomy was observed in 97.1, 66.8% patients with pheochromocytoma and mineralocorticism, respectively. The rest patients improved, i.e. their malignant hypertension converted to a benign one. If cancer involves the upper segment of the kidney, the tumor often invades the adrenal. Therefore, pre- and intraoperative search for adrenal tumor must be made.
from .abi_types import * from .address_types import * from .binary_types import * from .block_types import * from .config_types import * from .external_types import * from .network_types import * from .number_types import * from .rpc_types import * from .storage_types import *
/** * Created by Aspsine on 2015/9/7. */ public abstract class BaseLoopPagerAdapter extends PagerAdapter implements ViewPager.OnPageChangeListener, View.OnTouchListener, Runnable { private static final int DEFAULT_DELAY_MILLIS = 5000; private final ViewPager mViewPager; private final Handler mHandler; private final List<View> mViews; private final List mList; private int mChildCount; private int mDelayMillis = DEFAULT_DELAY_MILLIS; private boolean mRunning; public BaseLoopPagerAdapter(ViewPager viewPager) { mHandler = new Handler(Looper.getMainLooper()); mList = new ArrayList<>(); mViews = new LinkedList<>(); mViewPager = viewPager; mViewPager.setOnTouchListener(this); } /** * get the item count or pager count * * @return * @see #notifyDataSetChanged() */ public abstract int getPagerCount(); /** * get the item * * @param position * @return * @see #notifyDataSetChanged() */ public abstract Object getItem(int position); /** * get the viewpager item view * * @param position * @param convertView * @param parent * @return * @see #instantiateItem(ViewGroup, int) */ public abstract View getView(int position, View convertView, ViewGroup parent); /** * @see #onPageSelected(int) */ public abstract void onPageItemSelected(int position); @Override public void notifyDataSetChanged() { int fixedCount = getPagerCount(); if (fixedCount <= 0) { return; } else if (fixedCount == 1) { if (fixedCount != mList.size()) { mList.clear(); mList.add(getItem(0)); } if (fixedCount != mViews.size()) { mViews.clear(); mViews.add(null); } } else if (fixedCount > 1) { if (fixedCount + 2 != mList.size()) { mList.clear(); // add last element in position 0, add all, add first element in last position mList.add(getItem(fixedCount - 1)); for (int i = 0; i < fixedCount; i++) { mList.add(getItem(i)); } mList.add(getItem(0)); } if (fixedCount + 2 != mViews.size()) { mViews.clear(); for (int i = 0; i < mList.size(); i++) { mViews.add(null); } } } super.notifyDataSetChanged(); // this is very important mChildCount = getCount(); if (mViewPager.getCurrentItem() == 0 && mChildCount != 1) { mViewPager.setCurrentItem(1, false); } stop(); start(); } public void setDelayMillis(int delayMillis) { this.mDelayMillis = delayMillis; if (delayMillis <= 0) { mDelayMillis = DEFAULT_DELAY_MILLIS; } } /** * start loop */ public void start() { if (!mRunning) { post(); mRunning = true; } } /** * stop loop */ public void stop() { if (mRunning) { mHandler.removeCallbacks(this); mRunning = false; } } @Override public final boolean onTouch(View v, MotionEvent event) { if (event.getAction() == MotionEvent.ACTION_DOWN) { stop(); } else if (event.getAction() == MotionEvent.ACTION_UP) { start(); } return false; } @Override public final void run() { int currentPosition = mViewPager.getCurrentItem(); if (0 < currentPosition && currentPosition < mList.size() - 1) { if (currentPosition + 1 == mList.size() - 1) { currentPosition = 1; } else { currentPosition++; } mViewPager.setCurrentItem(currentPosition, true); post(); } } private void post() { mHandler.postDelayed(this, mDelayMillis); } @Override public final Object instantiateItem(ViewGroup container, int position) { int fixedPosition = 0; if (position == 0) { fixedPosition = getPagerCount() - 1; } else if (position == mList.size() - 1) { fixedPosition = 0; } else if (0 < position && position < mList.size() - 1) { fixedPosition = position - 1; } if (mViews.get(position) == null) { mViews.set(position, getView(fixedPosition, mViews.get(position), container)); } container.addView(mViews.get(position)); return mViews.get(position); } @Override public final int getItemPosition(Object object) { if (mChildCount > 0) { mChildCount--; return POSITION_NONE; } return super.getItemPosition(object); } @Override public final void destroyItem(ViewGroup container, int position, Object object) { container.removeView((View) object); } @Override public final int getCount() { return mList.size(); } @Override public final boolean isViewFromObject(View view, Object object) { return view == object; } @Override public final void onPageScrolled(int position, float positionOffset, int positionOffsetPixels) { } @Override public final void onPageSelected(int position) { if (0 < position && position < mList.size() - 1) { onPageItemSelected(position - 1); } } @Override public final void onPageScrollStateChanged(int state) { if (state == ViewPager.SCROLL_STATE_IDLE) { if (mList.size() > 3) { if (mViewPager.getCurrentItem() == 0) { mViewPager.setCurrentItem(mList.size() - 2, false); } else if (mViewPager.getCurrentItem() == mList.size() - 1) { mViewPager.setCurrentItem(1, false); } } } } }
import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings') # # NOTE: Do this here, instead of settings, because if you do it from settings # it loads settings and causes a subtle settings import loop. # from django.template.base import add_to_builtins # Add template tags to all templates for {% static %} directive. add_to_builtins('django.contrib.staticfiles.templatetags.staticfiles') # Get celery loaded here. from backend.celery_app import celery_app
// ConfigSaveContext saves a context to disk func (cm *ConfigManager) ConfigSaveContext(c *Context) error { configInfo, err := cm.configrw.ConfigLoad() if err != nil { return err } if len(c.AuthInfo) != 0 { if _, ok := configInfo.AuthInfos[c.AuthInfo]; !ok { return fmt.Errorf("Credentials %s do not exist", c.AuthInfo) } } else { c.AuthInfo = "default" } if _, ok := configInfo.Clusters[c.Cluster]; !ok { return fmt.Errorf("Cluster %s does not exist", c.Cluster) } return cm.configrw.ConfigSaveContext(c) }
// Matrix downsamples a matrix. func Matrix(matrix [][]float64, maxDimension int) [][]float64 { treshold := defineThreshold(maxDimension) scale := calculateScale(matrix, treshold) return downsample(matrix, scale) }
FRIDAY EVENING UPDATE: Claudette Moore was found safe. No other information was immediately released. === She had just watched her son graduate from OSU, and had holiday plans with her family. But this Christmas she's nowhere to be found. Family of 44-year-old Claudette "Cookie" Moore are desperate for answers this holiday. They say no one has seen or heard from her since she went for a walk near campus two days ago. Ask Darryce Moore to describe her mother Claudette, and tears come more easily than words. “She was everything,” she cried. "Supportive and reliable...I don't know...she's just everything." She was there when Darryce graduated from OSU in May, and when her brother, former Buckeye Tight End JT Moore graduated just Sunday. "She was happy. She was happy that my brother, I think he graduated Magna Cum Laude. She was happy about that, and we had the baby with us, and my family, all the family was there." Claudette, who goes by the nickname "Cookie", had traveled from her home in Youngstown to Columbus for JT's graduation. Around 3 Tuesday afternoon, Darryce says her mother was angry and emotional about a family conflict when she left a campus-area apartment at the corner of High Street and Norwich Avenue to go for a walk. "’I'm going for a walk to calm myself down. I'll be back at 5:30.’” Darryce remembers her saying. “She never came back." She left without a car or a change of clothes. Beverlyn Staples has known Moore all her life, and says she's never disappeared like this before. "Regardless of what's going on in her life, she always has put her kids first. She'll come down here for their college functions, Darryce's basketball games, JT’s football games. She is very family oriented, and nothing I don't think could have kept her away for Christmas." That’s what leads her friends and family to assume the worst. They say nothing else about this makes sense. "I just want to tell her that I love you and I want you to come home,” said Darryce. “We all want you to come home." Moore's family and friends plan to gather Saturday morning to search the area of Norwich and High for any clues as to what happened to her. Moore is 5 feet 11, 180 pounds. She was last seen wearing a black bandana with yellow spots, a black jacket, sunglasses, and a red purse. Anyone with information is asked to call Columbus Police.
// Assert functions, if indicator is false then call the apropriate print error function, then exit the program void error_assert(bool indicator, const std::string &message) { if(!indicator) { print_error(message); std::exit(1); } }
P.A.M.E.L.A set to come out on February 2017 P.A.M.E.L.A is set in a former Utopia that has collapsed due to a horrific disease NVYVE Studios announced in an update on steam that they have pushed the release of P.A.M.E.L.A from Fall 2016 back to February 2017. The change in schedule comes as the team buys itself more time to polish and perfect their open world survival horror game; adding new features and getting rid any problems that have come up. Though you may be wondering about the exact day the game will be released on in February, the developers are not able to give an exact date for the release of P.A.M.E.L.A. They have said in their recent steam update that although they are confident about a release date for February, they cannot give a precise date for the game to be released. However, we can look forward to possible confirmation of the release date shortly into 2017. So you won’t have to wait too long to know when you can get your hands on the open world survival horror game. Free Roam Survival Horror As you explore the fallen Utopia and take on various foes, you can uncover the truth about what happened to the former Utopia P.A.M.E.L.A is a first person open world survival horror game. It is set on the Utopian city of Eden which has become a desolate shadow of its former glory. Players take control of a Sleeper who has awakened from cryosleep in Eden and must make use of various hi-tech weapons and equipment along with constructible items that can be upgraded to help you fight through the former utopia. Players will ally themselves with P.A.M.E.L.A, an Artificial Intelligence who oversees Eden. The omnipresent A.I will provide help for the player during their struggles in Eden. P.A.M.E.L.A Announcement Trailer Every encounter is unique The behaviour of other characters is determined on your actions There are a wide range of features for this game. You can interact with different characters, including citizens just trying to survive, to security droids and robotic custodians. These different groups have their own unique behaviour and their interactions with the player will change based on the player’s actions, becoming either friends, or foes. You can deal with hostiles with a collection of powerful weapons and equipment that can be customised. You can also enhance your character’s body through the use of bio-augments to make enemy encounters easier. Developers at Work perfecting P.A.M.E.L.A Incredibly polished visuals are something to look forward to for the February Release of P.A.M.E.L.A NVYVE has been working hard on working on their first sci-fi free roam survival horror game for the past two years. To give a snapshot of the amount of work, here is a list of the current improvements made to the game according to the developer blog. Several hundred new animations have been prepared for the NPCS that players would interact with, along with the addition of two new hologram effects for the shield bracelets in the game. New level geometry has been organised and imported for the Garrison district area and a texture batch for the props of the Garrison district were complete. Certain textures and models in the game being removed after the developer’s most recent optimisation pass. Motion Capture There have also been improvements to motion captured animations in addition to the final batch which has been recorded.. More improvements to graphics also include the continuation on integrating the new super-menu for inventory, equipment and upgrades, more colour depth has been applied to grayscaled textures and work has commenced on a new model for “Afflicted” NPCs. So as you can see, the team has been hard at work on this game since they have started the project 2 years ago. Their work has been continuous and while the delay is a huge drawback that may disappoint a lot of people, it is better to receive a quality product and enjoy a game that the developers have spent a lot of time working on, than to play a mediocre game that was rushed by the developers. All in all, one can safely say that from this update alone, we can look forward to something extraordinary when P.A.M.E.L.A is released A game that is worth the wait Time will tell if the game is worth the wait, but looking at the screenshots and looking at the developer updates, it is easy to see that we can expect something incredible from P.A.M.E.L.A. Also, you don’t have to just wait in anticipation for this game to relase, but you can also check out their Developer Blog on steam which they constantly update to inform you about the newest update to their game. That way, you can see the game as it develops with possible new screenshots included in each update. For more horror game news If you enjoyed reading about P.A.M.E.L.A, then check out these other horror games that are worth playing Torment: Tides of Numenera release date set for 2017 Until Dawn: A Horror Game That Needs to be on PC 12 Most Gruesome Horror Games Ever Made 15 Best Horror Games Releasing in 2016 Friday The 13th Upcoming Horror Game
<reponame>allisonrandal/pcc_testing<filename>src/nci_test.c /* Copyright (C) 2001-2007, Parrot Foundation. $Id$ =head1 NAME src/nci_test.c - shared library used for testing the Native Call Interface =head1 DESCRIPTION From this code a shared library can be compiled and linked with a command like: cc -shared -fpic nci_test.c -o libnci_test.so -g For non-Unix platforms the above command has to be modified appropriately. The resulting shared library should be copied to a location like: parrot/runtime/parrot/dynext/libnci_test.so At that location the shared library is loadable with the opcode 'loadlib'. The functions in the library are available with the opcode 'dlfunc'. The variables in the library are available with the opcode 'dlvar'. =head2 Functions The name of a test function is usually 'nci_<signature>'. E.g. the function 'nci_ip' takes a 'pointer' and returns a 'int'. =over 4 =cut */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <parrot/config.h> #ifdef __cplusplus extern "C" { #endif /* Declarations of structs */ typedef struct Nested { int y; } Nested; typedef struct Outer { int x; Nested *nested; } Outer; typedef struct Rect_Like { int x, y; int w, h; } Rect_Like; typedef struct Opaque { int x; } Opaque; /* Function declarations. *** If you add a new test function here, *** please update src/libnci_test.def and src/call_list.txt too. *** */ PARROT_EXPORT int call_back(const char *str); PARROT_EXPORT char nci_c(void); PARROT_EXPORT char nci_csc(short, char); PARROT_EXPORT double nci_d(void); PARROT_EXPORT double nci_dd(double); PARROT_EXPORT float nci_f(void); PARROT_EXPORT float nci_fff(float, float); PARROT_EXPORT int nci_i(void); PARROT_EXPORT int nci_ib(int *); PARROT_EXPORT int nci_iiii(int, int, int); PARROT_EXPORT int nci_ii3(int, int *); PARROT_EXPORT int nci_ip(void *); PARROT_EXPORT int nci_isc(short, char); PARROT_EXPORT int nci_it(void *); PARROT_EXPORT int nci_i33(int *, int *); PARROT_EXPORT int nci_i4i(long *, int); PARROT_EXPORT long nci_l(void); PARROT_EXPORT int * nci_p(void); PARROT_EXPORT void * nci_pi(int); PARROT_EXPORT void * nci_pii(int, int); PARROT_EXPORT void * nci_piiii(int, int, int, int); PARROT_EXPORT void nci_pip(int, Rect_Like *); PARROT_EXPORT void * nci_pp(void *); PARROT_EXPORT short nci_s(void); PARROT_EXPORT short nci_ssc(short, char); PARROT_EXPORT char * nci_t(void); PARROT_EXPORT char * nci_tb(void *); PARROT_EXPORT char * nci_tB(void **); PARROT_EXPORT char * nci_tt(void *); PARROT_EXPORT void nci_v(void); PARROT_EXPORT void nci_vP(void *); PARROT_EXPORT void nci_vpii(Outer *, int, int); PARROT_EXPORT void nci_vv(void); PARROT_EXPORT void nci_vVi(Opaque**, int); PARROT_EXPORT void nci_vp(Opaque*); PARROT_EXPORT char * nci_ttt(char *, char *); PARROT_EXPORT void nci_vfff(float, float, float); PARROT_EXPORT void nci_vV(const char **); PARROT_EXPORT void nci_vVVV(const char **, const char **, const char **); /* Declarations for callback tests */ typedef void (*cb_C1_func)(const char*, void*); PARROT_EXPORT void nci_cb_C1(cb_C1_func, void*); typedef void (*cb_C2_func)(int, void*); PARROT_EXPORT void nci_cb_C2(cb_C2_func, void*); typedef void (*cb_C3_func)(void*, void*); PARROT_EXPORT void nci_cb_C3(cb_C3_func, void*); typedef void (*cb_D1_func)(void*, const char*); PARROT_EXPORT void nci_cb_D1(cb_D1_func, void*); typedef void (*cb_D2_func)(void*, int); PARROT_EXPORT void nci_cb_D2(cb_D2_func, void*); typedef void (*cb_D3_func)(void*, void*); PARROT_EXPORT void nci_cb_D3(cb_D3_func, void*); typedef void (*cb_D4_func)(void*, void*); PARROT_EXPORT void nci_cb_D4(cb_D4_func, void*); /* Variable definitions */ PARROT_EXPORT int int_cb_D4 = -55555; PARROT_EXPORT int nci_dlvar_char = 22; PARROT_EXPORT int nci_dlvar_short = 333; PARROT_EXPORT int nci_dlvar_int = -4444; PARROT_EXPORT long nci_dlvar_long = -7777777; PARROT_EXPORT float nci_dlvar_float = -333.0; PARROT_EXPORT double nci_dlvar_double = -55555.55555; PARROT_EXPORT char nci_dlvar_cstring[] = "This is a C-string.\n"; /* Function definitions */ /* =item C<PARROT_EXPORT char nci_c(void)> Returns the value of the variable C<nci_dlvar_char>, which is set to 22 by default. =cut */ PARROT_EXPORT char nci_c(void) { return nci_dlvar_char; } /* =item C<PARROT_EXPORT char nci_csc(short l1, char l2)> Multiplies C<l1> and C<l2> together and returns the first byte of the result. =cut */ PARROT_EXPORT char nci_csc(short l1, char l2) { return l1 * l2; } /* =item C<PARROT_EXPORT double nci_d(void)> Multiplies the current value of C<nci_dlvar_double> by 10.0, and returns the new value. =cut */ PARROT_EXPORT double nci_d(void) { nci_dlvar_double *= 10.0; return nci_dlvar_double; } /* =item C<PARROT_EXPORT double nci_dd(double d)> Returns the value C<d> multiplied by 2.0. =cut */ PARROT_EXPORT double nci_dd(double d) { return d * 2.0; } /* =item C<PARROT_EXPORT float nci_f(void)> Multiplies the value C<nci_dlvar_float> by 10.0 and returns the new value. =cut */ PARROT_EXPORT float nci_f(void) { nci_dlvar_float *= 10.0; return nci_dlvar_float; } /* =item C<PARROT_EXPORT float nci_fff(float l1, float l2)> Returns the result of C<l1> / C<l2>. =cut */ PARROT_EXPORT float nci_fff(float l1, float l2) { return l1 / l2; } /* =item C<PARROT_EXPORT int nci_i(void)> Returns the current value of <nci_dlvar_int>. =cut */ PARROT_EXPORT int nci_i(void) { return nci_dlvar_int; } /* =item C<PARROT_EXPORT int nci_isc(short l1, char l2)> Returns the int product of C<l1 * l2>. =cut */ PARROT_EXPORT int nci_isc(short l1, char l2) { return l1 * l2; } /* =item C<PARROT_EXPORT int nci_ip(void *p)> Performs a series of operations on values stored at pointer C<p>. =cut */ PARROT_EXPORT int nci_ip(void *p) { typedef struct _dfi { double d; float f; int i; char *s; } dfi; dfi *sp = (dfi*) p; puts(sp->s); fflush(stdout); return (int) (sp->d + sp->f + sp->i); } /* =item C<PARROT_EXPORT int nci_it(void *p)> test calls this with a string =cut */ PARROT_EXPORT int nci_it(void *p) { fprintf(stderr, "%c%c\n", ((char*) p)[1], ((char *) p)[0]); fflush(stderr); return 2; } /* =item C<PARROT_EXPORT long nci_l(void)> Returns the value of C<nci_dlvar_long>. =cut */ PARROT_EXPORT long nci_l(void) { return nci_dlvar_long; } /* =item C<PARROT_EXPORT int * nci_p(void)> Returns the address of C<nci_dlvar_int>. =cut */ PARROT_EXPORT int * nci_p(void) { return &nci_dlvar_int; } /* =item C<PARROT_EXPORT char * nci_t(void)> Returns the value of C<nci_dlvar_cstring>. =cut */ PARROT_EXPORT char * nci_t(void) { return nci_dlvar_cstring; } /* =item C<PARROT_EXPORT char * nci_tb(void *p)> Prints "xx worked", where "xx" is replaced with the first two character values of C<p>, in reverse order. =cut */ static char b[] = "xx worked\n"; PARROT_EXPORT char * nci_tb(void *p) { b[0] = ((char*) p)[1]; b[1] = ((char*) p)[0]; return b; } /* =item C<PARROT_EXPORT char * nci_tt(void *p)> Prints "xx worked", where "xx" is replaced with the first two character values of C<p>, in reverse order. =cut */ static char s[] = "xx worked\n"; PARROT_EXPORT char * nci_tt(void *p) { s[0] = ((char*) p)[1]; s[1] = ((char*) p)[0]; return s; } /* =item C<PARROT_EXPORT char * nci_tB(void **p)> Prints "xx done", where "xx" is replaced with the first two character values of C<p>, in reverse order. =cut */ static char B[] = "xx done\n"; PARROT_EXPORT char * nci_tB(void **p) { B[0] = (*(char**) p)[1]; B[1] = (*(char**) p)[0]; return B; } /* =item C<PARROT_EXPORT void * nci_pp(void *p)> Returns the value C<p> directly. =cut */ PARROT_EXPORT void * nci_pp(void *p) { return p; } /* =item C<PARROT_EXPORT int nci_iiii(int i1, int i2, int i3)> Prints three integers separated by whitespace to C<stderr>. =cut */ PARROT_EXPORT int nci_iiii(int i1, int i2, int i3) { fprintf(stderr, "%d %d %d\n", i1, i2, i3); fflush(stderr); return 2; } /* =item C<PARROT_EXPORT int nci_i4i(long * l, int i)> Returns the product of C<*l> and C<i>, as an int. =cut */ PARROT_EXPORT int nci_i4i(long * l, int i) { return (int) (*l * i); } /* =item C<PARROT_EXPORT int nci_ii3(int a, int *bp)> Multiplies C<a> and C<*bp> together and returns the result. Updates C<*bp> to the value 4711. =cut */ PARROT_EXPORT int nci_ii3(int a, int *bp) { int r = a * *bp; *bp = 4711; return r; } /* =item C<PARROT_EXPORT int call_back(const char *str)> writes the string C<str> to stdout and returns the value 4711. =cut */ PARROT_EXPORT int call_back(const char *str) { puts(str); fflush(stdout); return 4711; } /* =item C<PARROT_EXPORT void * nci_pi(int test)> Performs one from a series of tests, depending on the value given for C<test>. =cut */ PARROT_EXPORT void * nci_pi(int test) { switch (test) { case 0: { static struct { int i[2]; char c; } t = { {42, 100}, 'B' }; return &t; } case 1: { static struct { float f[2]; double d; } t = { {42.0, 100.0}, 47.11 }; return &t; } case 2: { static struct { char c; int i; } t = { 10, 20 }; return &t; } case 3: { static struct { const char *c; int i; } t = { "hello", 20 }; return &t; } case 4: { static struct _x { int i; int j; double d; } xx = { 100, 77, 200.0 }; static struct { char c; struct _x *x; } t = { 10, &xx }; return &t; } case 5: { static struct { int (*f)(const char *); } t = { call_back }; return &t; } case 6: { static struct xt { int x; struct yt { int i; int j; } _y; int z; } _x = { 32, { 127, 12345 }, 33 }; return &_x; } case 7: { static struct xt { char x; struct yt { char i; int j; } _y; char z; } _x = { 32, { 127, 12345 }, 33 }; return &_x; } case 8: { static struct _z { int i; int j; } zz = { 100, 77 }; static struct xt { int x; struct yt { int i; int j; struct _z *z; } _y; } _x = { 32, { 127, 12345, &zz }, }; return &_x; } case 9: { static int i = 55555; return &i; } default: fprintf(stderr, "unknown test number\n"); } return NULL; } /* =item C<PARROT_EXPORT short nci_s(void)> Returns the value of C<nci_dlvar_short>. =cut */ PARROT_EXPORT short nci_s(void) { return nci_dlvar_short; } /* =item C<PARROT_EXPORT short nci_ssc(short l1, char l2)> Returns the product of C<l1 * l2>. =cut */ PARROT_EXPORT short nci_ssc(short l1, char l2) { return l1 * l2; } /* =item C<PARROT_EXPORT void nci_vP(void *pmc)> Prints "ok" if C<PMC> is not null, prints "got null" otherwise. =cut */ PARROT_EXPORT void nci_vP(void *pmc) { if (pmc) puts("ok"); else puts("got null"); } /* =back =head2 Functions used for pdd16 tests =over 4 =cut */ /* =item C<PARROT_EXPORT void nci_cb_C1(cb_C1_func cb, void* user_data)> Calls C<cb> function with the string "result" and the given C<user_data>. No return value. =cut */ PARROT_EXPORT void nci_cb_C1(cb_C1_func cb, void* user_data) { const char *result = "succeeded"; /* call the cb synchronously */ (cb)(result, user_data); return; } /* =item C<PARROT_EXPORT void nci_cb_C2(cb_C2_func cb, void* user_data)> Calls the function C<cb> with the pointer C<user_data>. No return value. =cut */ PARROT_EXPORT void nci_cb_C2(cb_C2_func cb, void* user_data) { /* call the cb synchronously */ (cb)(77, user_data); return; } /* =item C<PARROT_EXPORT void nci_cb_C3(cb_C3_func cb, void* user_data)> Calls function C<cb> with data C<user_data>. No return value. =cut */ static int int_cb_C3 = 99; PARROT_EXPORT void nci_cb_C3(cb_C3_func cb, void* user_data) { /* call the cb synchronously */ (cb)(&int_cb_C3, user_data); return; } /* =item C<PARROT_EXPORT void nci_cb_D1(cb_D1_func cb, void* user_data)> Calls function C<cb> with data C<user_data>. No return value. =cut */ PARROT_EXPORT void nci_cb_D1(cb_D1_func cb, void* user_data) { const char *result = "succeeded"; /* call the cb synchronously */ (cb)(user_data, result); return; } /* =item C<PARROT_EXPORT void nci_cb_D2(cb_D2_func cb, void* user_data)> Calls function C<cb> with data C<user_data>. =cut */ PARROT_EXPORT void nci_cb_D2(cb_D2_func cb, void* user_data) { /* call the cb synchronously */ (cb)(user_data, 88); return; } /* =item C<PARROT_EXPORT void nci_cb_D3(cb_D3_func cb, void* user_data)> Calls function C<cb> with data C<user_data>. =cut */ static int int_cb_D3 = 111; PARROT_EXPORT void nci_cb_D3(cb_D3_func cb, void* user_data) { /* call the cb synchronously */ (cb)(user_data, &int_cb_D3); return; } /* =item C<PARROT_EXPORT void nci_cb_D4(cb_D4_func times_ten, void* user_data)> Calls function C<times_ten> with data C<user_data> 10 times in a loop. =cut */ PARROT_EXPORT void nci_cb_D4(cb_D4_func times_ten, void* user_data) { int cnt; for (cnt = 0; cnt < 9; cnt++) { (times_ten)(user_data, &int_cb_D4); int_cb_D4++; } return; } /* =item C<PARROT_EXPORT void nci_pip(int count, Rect_Like *rects)> Prints a count integer and the coordinates of 4 rectangles. =cut */ PARROT_EXPORT void nci_pip(int count, Rect_Like *rects) { int i; printf("Count: %d\n", count); for (i = 0; i < 4; ++i) printf("X: %d\nY: %d\nW: %d\nH: %d\n", rects[i].x, rects[i].y, rects[i].w, rects[i].h); } /* =item C<PARROT_EXPORT int nci_i33(int *double_me, int *triple_me)> Returns the result C<*double_me * 2 + *triple_me * 3>. =cut */ PARROT_EXPORT int nci_i33(int *double_me, int *triple_me) { *double_me *= 2; *triple_me *= 3; return (*double_me + *triple_me); } /* =item C<PARROT_EXPORT void nci_vpii(Outer *my_data, int my_x, int my_y)> Updates data in structure pointer C<my_data> with the given data C<my_x> and C<my_y>. =cut */ PARROT_EXPORT void nci_vpii(Outer *my_data, int my_x, int my_y) { my_data->x = my_x; my_data->nested->y = my_y; } /* =item C<PARROT_EXPORT void * nci_piiii(int alpha, int beta, int gamma, int delta)> Stores 4 integer values into an array structure, and returns the address of that structure. =cut */ static int my_array[4]; PARROT_EXPORT void * nci_piiii(int alpha, int beta, int gamma, int delta) { static struct array_container { int x; int *array; } container; my_array[0] = alpha; my_array[1] = beta; my_array[2] = gamma; my_array[3] = delta; container.x = 4; container.array = my_array; return &container; } /* =item C<PARROT_EXPORT void * nci_pii(int fac1, int fac2)> Returns the address of global variable C<nci_dlvar_int> whose value is set to the product of C<fac1 * fac2>. =cut */ PARROT_EXPORT void * nci_pii(int fac1, int fac2) { nci_dlvar_int = fac1 * fac2; return &nci_dlvar_int; } /* =item C<PARROT_EXPORT void nci_v(void)> Multiplies the global variable C<nci_dlvar_int> times 10. =cut */ PARROT_EXPORT void nci_v(void) { nci_dlvar_int *= 10; } /* =item C<PARROT_EXPORT void nci_vv(void)> Multiplies the global variable C<nci_dlvar_int> by 3. =cut */ PARROT_EXPORT void nci_vv(void) { nci_dlvar_int *= 3; } /* =item C<PARROT_EXPORT void nci_vVi(Opaque**, int)> Test an NCI opaque struct out value. =cut */ PARROT_EXPORT void nci_vVi(Opaque **outOpaque, int x) { static Opaque opaque; opaque.x = x; *outOpaque = &opaque; } /* =item C<PARROT_EXPORT int nci_vp(Opaque*)> Test that a previously generated opaque struct gets passed back to an NCI function correctly. =cut */ PARROT_EXPORT void nci_vp(Opaque *inOpaque) { if (inOpaque) printf("got %d\n", inOpaque->x); else printf("got null"); } /* =item C<PARROT_EXPORT char * nci_ttt(void *p)> Prints "s2, s1, s1d" =cut */ PARROT_EXPORT char * nci_ttt(char *s1, char *s2) { char* s = (char*) malloc(strlen(s2) + (2 * strlen(s1)) + 5); sprintf(s, "%s, %s, %s", s2, s2, s1); printf("%s\n", s); return s; } static void validate_float(float f, double checkval) { int valid; double error_ratio; error_ratio = (((double)f) - checkval) / checkval; valid = error_ratio <= 0.01 && error_ratio >= -0.01; printf("%i\n", valid); } /* =item C<PARROT_EXPORT float nci_fff(float l1, float l2)> Returns the result of C<l1> / C<l2>. =cut */ PARROT_EXPORT void nci_vfff(float l1, float l2, float l3) { validate_float(l1, 3456.54); validate_float(l2, 10.1999); validate_float(l3, 14245.567); } /* =item C<PARROT_EXPORT float nci_fff(float l1, float l2)> Returns the result of C<l1> / C<l2>. =cut */ PARROT_EXPORT void nci_vV(const char **ptr) { *ptr = "Hello bright new world\n"; } /* =item C<PARROT_EXPORT float nci_fff(float l1, float l2)> Returns the result of C<l1> / C<l2>. =cut */ PARROT_EXPORT void nci_vVVV(const char **ptr1, const char **ptr2, const char **ptr3) { *ptr1 = "Hello bright new world!\n"; *ptr2 = "It is a beautiful day!\n"; *ptr3 = "Go suck a lemon.\n"; } #ifdef TEST char l2 = 4; float f2 = 4.0; /* =item C<int main(void)> Calls test functions C<nci_ssc> and C<nci_fff> and prints their results. =cut */ int main(void) { short l1 = 3; float f, f1 = 3.0; int l = nci_ssc(l1, l2); printf("%d\n", l); f = nci_fff(f1, f2); printf("%f\n", f); return 0; } #endif #ifdef __cplusplus } #endif /* =back =head1 SEE ALSO: F<docs/pdds/pdd16_native_call.pod> F<config/gen/makefiles/root.in> F<t/pmc/nci.t> =cut */ /* * Local variables: * c-file-style: "parrot" * End: * vim: expandtab shiftwidth=4: */
// MarshalJSON is used to create a JSON representation of this known fingerprint func (k KnownFingerprint) MarshalJSON() ([]byte, error) { return json.Marshal(struct { UserID string FingerprintHex string Untrusted bool }{ UserID: k.UserID, FingerprintHex: hex.EncodeToString(k.Fingerprint), Untrusted: k.Untrusted, }) }
// a helper function that can pause the VM until you press any key in the console. (nice for debugging sometimes.) public void waitOnAnyKey() { System.out.println("Press the any key..."); try { System.in.read(); } catch (IOException e) { e.printStackTrace(); } }
import { css } from '@emotion/css'; export const containerCx = css` position: relative; display: none; `;
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2017-present Datadog, Inc. // +build kubeapiserver package custommetrics import ( "context" "fmt" "net" "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/apiserver" basecmd "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/cmd" "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider" "github.com/spf13/pflag" "github.com/DataDog/datadog-agent/pkg/clusteragent/custommetrics" "github.com/DataDog/datadog-agent/pkg/clusteragent/externalmetrics" "github.com/DataDog/datadog-agent/pkg/config" as "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" ) var cmd *DatadogMetricsAdapter var stopCh chan struct{} type DatadogMetricsAdapter struct { basecmd.AdapterBase } const ( metricsServerConf string = "external_metrics_provider.config" ) // RunServer creates and start a k8s custom metrics API server func RunServer(ctx context.Context) error { defer clearServerResources() cmd = &DatadogMetricsAdapter{} cmd.Name = "datadog-custom-metrics-adapter" cmd.FlagSet = pflag.NewFlagSet(cmd.Name, pflag.ExitOnError) var c []string for k, v := range config.Datadog.GetStringMapString(metricsServerConf) { c = append(c, fmt.Sprintf("--%s=%s", k, v)) } if err := cmd.Flags().Parse(c); err != nil { return err } provider, err := cmd.makeProviderOrDie(ctx) if err != nil { return err } // TODO when implementing the custom metrics provider, add cmd.WithCustomMetrics(provider) here cmd.WithExternalMetrics(provider) conf, err := cmd.Config() if err != nil { return err } server, err := conf.Complete(nil).New(cmd.Name, nil, provider) if err != nil { return err } // TODO Add extra logic to only tear down the External Metrics Server if only some components fail. return server.GenericAPIServer.PrepareRun().Run(ctx.Done()) } func (a *DatadogMetricsAdapter) makeProviderOrDie(ctx context.Context) (provider.ExternalMetricsProvider, error) { client, err := a.DynamicClient() if err != nil { log.Infof("Unable to construct dynamic client: %v", err) return nil, err } apiCl, err := as.GetAPIClient() if err != nil { log.Errorf("Could not build API Client: %v", err) return nil, err } mapper, err := a.RESTMapper() if err != nil { log.Errorf("Unable to construct discovery REST mapper: %v", err) return nil, err } if config.Datadog.GetBool("external_metrics_provider.use_datadogmetric_crd") { return externalmetrics.NewDatadogMetricProvider(ctx, apiCl) } datadogHPAConfigMap := custommetrics.GetConfigmapName() store, err := custommetrics.NewConfigMapStore(apiCl.Cl, common.GetResourcesNamespace(), datadogHPAConfigMap) if err != nil { log.Errorf("Unable to create ConfigMap Store: %v", err) return nil, err } return custommetrics.NewDatadogProvider(ctx, client, mapper, store), nil } // Config creates the configuration containing the required parameters to communicate with the APIServer as an APIService func (a *DatadogMetricsAdapter) Config() (*apiserver.Config, error) { if a.FlagSet.Lookup("cert-dir").Changed == false { // Ensure backward compatibility. Was hardcoded before. // Config flag is now to be added to the map `external_metrics_provider.config` as, `cert-dir`. a.SecureServing.ServerCert.CertDirectory = "/etc/datadog-agent/certificates" } if a.FlagSet.Lookup("secure-port").Changed == false { // Ensure backward compatibility. 443 by default, but will error out if incorrectly set. // refer to apiserver code in k8s.io/apiserver/pkg/server/option/serving.go a.SecureServing.BindPort = config.Datadog.GetInt("external_metrics_provider.port") } if err := a.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil { log.Errorf("Failed to create self signed AuthN/Z configuration %#v", err) return nil, fmt.Errorf("error creating self-signed certificates: %v", err) } return a.CustomMetricsAdapterServerOptions.Config() } // clearServerResources closes the connection and the server // stops listening to new commands. func clearServerResources() { if stopCh != nil { close(stopCh) } }
/** * Method used to first check if the company has the right to post new internship offer and then add the offer * to the DB. * * @param companyId company id. * @param internship internship to add. * @return > 0 if ok (id of internship), 0 if the company can't post, -1 otherwise. */ public int addInternshipIfAllowed(int companyId, Internship internship) { int status = -1; if (companyDAO.hasRightToPost(companyId)) { internship.setCompany_fk(companyId); status = internshipDAO.insert(internship); } return status; }
Oh, my God! I really like this movie! And I never ever thought that I’m going to say this – but this year’s TIFF is definitely full of surprises! As usual, I have a feeling that I have to write something spectacular about some movie. But this time, I don’t give a shit if you’re going to pay attention to this title, or not, simply because Ole Christian Madsens‘ Superclasico deserves only one word – Super! This comedy has it all. Great cast, great dialogues. I mean – when someone says “Yes, Julio Iglesias, it’s lobe!” – then I say – yeah man, you’re funny! Really refreshing project, or as they already describe it – “quite possibly the happiest movie about divorce ever made.” So, what do you get when you mix cold Denmark and hot Argentina? Superclasico, indeed! And now, the synopsis part: “Wine store owner Christian (Anders W. Berthelsen) is on the verge of bankruptcy and he is as unsuccessful in just about every other aspect of life. His wife Anna (Paprika Steen) has left him. She now works as a successful sports agent in Buenos Aires and lives a life of luxury with star football player Juan Diaz. One day, Christian and their 16-year-old son get on a plane for Buenos Aires. Christian arrives under the pretense of wanting to sign the divorce papers with Anna, but in truth, he wants to try to win her back… Superclasico is filmed in Buenos Aires, and as I mentioned above, the movie comes from the acclaimed Danish director of Flame And Citron, Ole Christian Madsen. Movie stars great Anders W. Berthelsen, Paprika Steen, Sebastian Estevanez, Jamie Morton, Adriana Mascialino and Dafne Schilling. Let me know what you think about this! Superclasico Trailer Superclasico Clip #1 Superclasico Clip #2 Superclasico Clip #3 Superclasico Clip #4
/// Extract the function name from this section fn name(&self) -> String { self.0 .children() .nth(0) .unwrap() .text() .trim() .to_lowercase() .split_whitespace() .collect::<Vec<_>>() .join("_") }
CTV Vancouver The victim of a scam involving the payment system of the new Compass Cards is warning other commuters to be cautious. Arbab Mehrab, who sells hotdogs outside the Commercial-Broadway SkyTrain Station, purchased a Compass Card from a stranger who said he was leaving town and could no longer use it. The suspect walked Mehrab and a security guard to the Compass machine to confirm it had a balance of $120. “[It was] valid for a whole month,” Mehrab said. “I trusted [him].” The problem is that the balance quickly vanished. Because the cards are tied to a registered account, it appears the account holder quickly transferred the cash to another card. The $120 was gone. “When I found out it was a scam I got sad,” said Mehrab. TransLink spokesperson Jennifer Morland says they’re upset to hear about what happened, but cautioned people against buying from an unknown source. “Customers should buy their Compass Cards through the available channels. We have Compass vending machines at every SkyTrain station," she said. Mehrab says he learned a lesson, albeit in a hard way. He is now telling others to be careful. “This is a good lesson… all the scams going around,” he said. With a report from CTV Vancouver’s Scott Roberts
/*! * @file DFRobot_BMP3XX.h * @brief Define infrastructure of DFRobot_BMP3XX class * @details This is a pressure and temperature sensor that can be controlled via IIC and SPI. * @n BMP(390L/388) has temperature compensation, data oversampling, IIR filter, binary sampling and other functions * @n These functions improve the accuracy of data collected by the BMP (390L/388) sensor. * @n BMP (390L/388) also has a 512-byte FIFO data buffer, which greatly improves its usability * @n Similarly, BMP (390L/388) has an interrupt pin, which can be used in a power-saving way without using software algorithms. * @copyright Copyright (c) 2010 DFRobot Co.Ltd (http://www.dfrobot.com) * @license The MIT License (MIT) * @author [qsjhyy](<EMAIL>) * @version V1.0 * @date 2021-04-01 * @url https://github.com/DFRobot/DFRobot_BMP3XX */ #ifndef __DFROBOT_BMP3XX_H__ #define __DFROBOT_BMP3XX_H__ #include <Arduino.h> #include <Wire.h> #include <SPI.h> // #define ENABLE_DBG //!< Open this macro and you can see the details of the program #ifdef ENABLE_DBG #define DBG(...) {Serial.print("[");Serial.print(__FUNCTION__); Serial.print("(): "); Serial.print(__LINE__); Serial.print(" ] "); Serial.println(__VA_ARGS__);} #else #define DBG(...) #endif #define STANDARD_SEA_LEVEL_PRESSURE_PA 101325 ///< Standard sea level pressure, unit: pa #define DFROBOT_BMP3XX_IIC_ADDR_SDO_GND uint8_t(0x76) ///< IIC communication address when SDO is grounded #define DFROBOT_BMP3XX_IIC_ADDR_SDO_VDD uint8_t(0x77) ///< IIC communication address when SDO is connected to power #define DFROBOT_BMP388_ID 0x50 ///< BMP388 chip version #define DFROBOT_BMP390L_ID 0x60 ///< BMP390L chip version /* BMP3XX register address */ #define BMP3XX_CHIP_ID uint8_t(0x00) ///< The “CHIP_ID” register contains the chip identification code. #define BMP3XX_REV_ID uint8_t(0x01) ///< The “Rev_ID” register contains the mask revision of the ASIC. #define BMP3XX_ERR_REG uint8_t(0x02) ///< Sensor Error conditions are reported in the “ERR_REG” register. #define BMP3XX_STATUS uint8_t(0x03) ///< The Sensor Status Flags are stored in the “STATUS” register. #define BMP3XX_P_DATA_PA uint8_t(0x04) ///< The 24Bit pressure data is split and stored in three consecutive registers. #define BMP3XX_T_DATA_C uint8_t(0x07) ///< The 24Bit temperature data is split and stored in three consecutive registersd. #define BMP3XX_TIME uint8_t(0x0C) ///< The 24Bit sensor time is split and stored in three consecutive registers. #define BMP3XX_EVENT uint8_t(0x10) ///< The “EVENT” register contains the sensor status flags. #define BMP3XX_INT_STATUS uint8_t(0x11) ///< The “INT_STATUS” register shows interrupt status and is cleared after reading. #define BMP3XX_FIFO_LENGTH uint8_t(0x12) ///< The FIFO byte counter indicates the current fill level of the FIFO buffer. #define BMP3XX_FIFO_DATA uint8_t(0x14) ///< The “FIFO_DATA” is the data output register. #define BMP3XX_FIFO_WTM uint8_t(0x15) ///< The FIFO Watermark size is 9 Bit and therefore written to the FIFO_WTM_0 and FIFO_WTM_1 registers. #define BMP3XX_FIFO_WTM2 uint8_t(0x16) ///< The FIFO Watermark size is 9 Bit and therefore written to the FIFO_WTM_0 and FIFO_WTM_1 registers. #define BMP3XX_FIFO_COFG_1 uint8_t(0x17) ///< The “FIFO_CONFIG_1” register contains the FIFO frame content configuration. #define BMP3XX_FIFO_COFG_2 uint8_t(0x18) ///< The “FIFO_CONFIG_2” register extends the FIFO_CONFIG_1 register. #define BMP3XX_INT_CTRL uint8_t(0x19) ///< Interrupt configuration can be set in the “INT_CTRL” register. #define BMP3XX_IF_CONF uint8_t(0x1A) ///< The “IF_CONF” register controls the serial interface settings. #define BMP3XX_PWR_CTRL uint8_t(0x1B) ///< The “PWR_CTRL” register enables or disables pressure and temperature measurement. #define BMP3XX_OSR uint8_t(0x1C) ///< The “OSR” register controls the oversampling settings for pressure and temperature measurements. #define BMP3XX_ODR uint8_t(0x1D) ///< The “ODR” register set the configuration of the output data rates by means of setting the subdivision/subsampling. #define BMP3XX_IIR_CONFIG uint8_t(0x1F) ///< The “CONFIG” register controls the IIR filter coefficients #define BMP3XX_CALIB_DATA uint8_t(0x31) ///< 0x31-0x45 is calibration data #define BMP3XX_CMD uint8_t(0x7E) ///< Command register, can soft reset and clear all FIFO data /* Set the constant of output data rate in subdivision/sub-sampling mode */ #define BMP3XX_ODR_200_HZ uint8_t(0x00) ///< Prescaler:1; ODR 200Hz; Sampling period:5 ms #define BMP3XX_ODR_100_HZ uint8_t(0x01) ///< Prescaler:2; Sampling period:10 ms #define BMP3XX_ODR_50_HZ uint8_t(0x02) ///< Prescaler:4; Sampling period:20 ms #define BMP3XX_ODR_25_HZ uint8_t(0x03) ///< Prescaler:8; Sampling period:40 ms #define BMP3XX_ODR_12P5_HZ uint8_t(0x04) ///< Prescaler:16; Sampling period:80 ms #define BMP3XX_ODR_6P25_HZ uint8_t(0x05) ///< Prescaler:32; Sampling period:160 ms #define BMP3XX_ODR_3P1_HZ uint8_t(0x06) ///< Prescaler:64; Sampling period:320 ms #define BMP3XX_ODR_1P5_HZ uint8_t(0x07) ///< Prescaler:127; Sampling period:640 ms #define BMP3XX_ODR_0P78_HZ uint8_t(0x08) ///< Prescaler:256; Sampling period:1.280 s #define BMP3XX_ODR_0P39_HZ uint8_t(0x09) ///< Prescaler:512; Sampling period:2.560 s #define BMP3XX_ODR_0P2_HZ uint8_t(0x0A) ///< Prescaler:1024 Sampling period:5.120 s #define BMP3XX_ODR_0P1_HZ uint8_t(0x0B) ///< Prescaler:2048; Sampling period:10.24 s #define BMP3XX_ODR_0P05_HZ uint8_t(0x0C) ///< Prescaler:4096; Sampling period:20.48 s #define BMP3XX_ODR_0P02_HZ uint8_t(0x0D) ///< Prescaler:8192; Sampling period:40.96 s #define BMP3XX_ODR_0P01_HZ uint8_t(0x0E) ///< Prescaler:16384; Sampling period:81.92 s #define BMP3XX_ODR_0P006_HZ uint8_t(0x0F) ///< Prescaler:32768; Sampling period:163.84 s #define BMP3XX_ODR_0P003_HZ uint8_t(0x10) ///< Prescaler:65536; Sampling period:327.68 s #define BMP3XX_ODR_0P0015_HZ uint8_t(0x11) ///< Prescaler:131072; ODR 25/16384Hz; Sampling period:655.36 s /* IIR filter coefficient setting constant */ #define BMP3XX_IIR_CONFIG_COEF_0 uint8_t(0x00) ///< Filter coefficient is 0 -> bypass mode #define BMP3XX_IIR_CONFIG_COEF_1 uint8_t(0x02) ///< Filter coefficient is 1 #define BMP3XX_IIR_CONFIG_COEF_3 uint8_t(0x04) ///< Filter coefficient is 3 #define BMP3XX_IIR_CONFIG_COEF_7 uint8_t(0x06) ///< Filter coefficient is 7 #define BMP3XX_IIR_CONFIG_COEF_15 uint8_t(0x08) ///< Filter coefficient is 15 #define BMP3XX_IIR_CONFIG_COEF_31 uint8_t(0x0A) ///< Filter coefficient is 31 #define BMP3XX_IIR_CONFIG_COEF_63 uint8_t(0x0C) ///< Filter coefficient is 63 #define BMP3XX_IIR_CONFIG_COEF_127 uint8_t(0x0E) ///< Filter coefficient is 127 /* CMD(0x7E) register command */ #define BMP3XX_CMD_NOP 0x00 ///< reserved. No command. #define BMP3XX_CMD_FIFO_FLUSH 0xB0 ///< Clears all data in the FIFO, does not change FIFO_CONFIG registers. #define BMP3XX_CMD_SOFTRESET 0xB6 ///< Triggers a reset, all user configuration settings are overwritten with a default state. /* FIFO Header */ ///< FIFO temperature pressure header frame #define BMP3_FIFO_TEMP_PRESS_FRAME UINT8_C(0x94) ///< FIFO temperature header frame #define BMP3_FIFO_TEMP_FRAME UINT8_C(0x90) ///< FIFO pressure header frame #define BMP3_FIFO_PRESS_FRAME UINT8_C(0x84) ///< FIFO time header frame #define BMP3_FIFO_TIME_FRAME UINT8_C(0xA0) ///< FIFO configuration change header frame #define BMP3_FIFO_CONFIG_CHANGE UINT8_C(0x48) ///< FIFO error header frame #define BMP3_FIFO_ERROR_FRAME UINT8_C(0x44) #define BMP3_FIFO_HEADER_LEN UINT8_C(1) ///< The byte length of the header in a frame of FIFO data is 1 #define BMP3_FIFO_DATA_LEN UINT8_C(3) ///< The byte length of each data in a frame of FIFO data is 3 /* Convenience Macro */ #define BMP3XX_CALIB_DATA_LEN (21) ///< Number of calibration data bytes in the BMP3XX register #define BMP3XX_CONCAT_BYTES(msb, lsb) (((uint16_t)msb << 8) | (uint16_t)lsb) ///< Macro combines two 8-bit data into one 16-bit data /* Sampling period corresponding to ODR in microseconds */ static const uint32_t PROGMEM correspondingSamplingPeriod[] = { 5000, 10000, 20000, 40000, 80000, 160000, 320000, 640000, 1280000, 2560000, 5120000, 10240000, 20480000, 40960000, 81920000, 163840000, 327680000, 655360000 }; class DFRobot_BMP3XX { public: #define ERR_OK 0 // No error #define ERR_DATA_BUS (-1) // data bus error #define ERR_IC_VERSION (-2) // the chip version not match /***************** register configuration structs ******************************/ /** * @struct sFIFOMode1_t * @brief “FIFO_CONFIG_1”(0x17)register includes FIFO frame content configuration * @note register struct: * @n ---------------------------------------------------------------------------------------------------- * @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 | * @n ---------------------------------------------------------------------------------------------------- * @n | reserved | fifo_temp_en | fifo_press_en | fifo_time_en | fifo_stop_on_full | fifo_mode | * @n ---------------------------------------------------------------------------------------------------- */ typedef struct { uint8_t FIFOMode: 1; /**< power up is 0, 0: disable FIFO mode, 1: enable FIFO mode */ uint8_t FIFOStopOnFull: 1; /**< power up is 1, 0: continue writing when FIFO on full, 1: stop writing when FIFO on full */ uint8_t FIFOTimeEN: 1; /**< power up is 0, 0: disable, return sensor time frame after the last valid data frame, 1: enable, return sensor time frame */ uint8_t FIFOPressEN: 1; /**< power up is 0, 0: disable pressure data buffer, 1: enable pressure data buffer */ uint8_t FIFOTempEN: 1; /**< power up is 0, 0: disable temperature data buffer, 1: enable temperature data buffer */ uint8_t reserved: 3; /**< reserved bit */ } __attribute__ ((packed)) sFIFOMode1_t; // packed struct variables(knowledge point: __attribute__ ((packed))is to avoid byte alignment and ensure packed storage), we use the variables to store register related content /** * @struct sFIFOMode2_t * @brief “FIFO_CONFIG_2”(0x18)the register expands FIFO_CONFIG_1 register * @note register struct: * @n ------------------------------------------------------------------------------------------ * @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 | * @n ------------------------------------------------------------------------------------------ * @n | reserved | data_select | fifo_subsampling | * @n ------------------------------------------------------------------------------------------ */ typedef struct { uint8_t FIFOSubsampling: 3; /**< sampling select in pressure & temperature data FIFO, coefficient is 2^FIFOSubsampling, power up is 2 */ uint8_t dataSelect: 2; /**< select data source of pressure and temperature, power up is 0, 0: unfiltered data(compensated or uncompensated), 1: filtered data(compensated or uncompensated), 2or3: reserve, same as "unfilt" */ uint8_t reserved: 3; /**< reserved bit */ } __attribute__ ((packed)) sFIFOMode2_t; /** * @struct sIntMode_t * @brief interrupt configuration can be set in "INT_CTRL"(0x19) register * @details It affects INT_STATUS register and INT pin * @note register struct: * @n ------------------------------------------------------------------------------------------ * @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 | * @n ------------------------------------------------------------------------------------------ * @n | reserved | drdy_en | int_ds | ffull_en | fwtm_en | int_latch | int_level | int_od | * @n ------------------------------------------------------------------------------------------ */ typedef struct { uint8_t INTOD: 1; /**< pin output mode, power up is 0, 0: push-pull, 1: open-drain */ uint8_t INTActiveLevel: 1; /**< pin level, power up is 1, 0: active low, 1: active high */ uint8_t INTLatch: 1; /**< enable INT pin and INT_STATUS register lock-in interrupt, power up is 0, 0: disable, 1: enable */ uint8_t INTFWTMEN: 1; /**< enable INT pin and INT_STATUS enable FIFO watermark interrupt, power up is 0, 0: disable, 1: enable */ uint8_t INTFFullEN: 1; /**< enable INT pin and INT_STATUS enable FIFO all interrupt, power up is 0, 0: disable, 1: enable */ uint8_t INTInitialLevel: 1; /**< power up is 0, 0: low, 1: high */ uint8_t INTDrdyEN: 1; /**< enable INT pin and INT_STATUS temperature/pressure data ready interrupt, power up is 0, 0: disable, 1: enable */ uint8_t reserved: 1; /**< reserved bit */ } __attribute__ ((packed)) sIntMode_t; /** * @struct sSerialMode_t * @brief “IF_CONF”(0x1A)register serial port control setting * @note register struct: * @n ------------------------------------------------------------------------------------------ * @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 | * @n ------------------------------------------------------------------------------------------ * @n | reserved | i2c_wdt_sel | i2c_wdt_en | spi3 | * @n ------------------------------------------------------------------------------------------ */ typedef struct { uint8_t SPI3: 1; /**< power up is 0, 0: SPI four-wire mode, 1: SPI three-wire mode */ uint8_t I2CWDTEN: 1; /**< power up is 0, 0: disable I2C WDT, 1: enable I2C WDT */ uint8_t I2CWDTSel: 1; /**< power up is 0, 0: set I2C WDT timeout value to 1.25ms, 1: set I2C WDT timeout value to 40ms */ uint8_t reserved: 5; /**< reserved bit */ } __attribute__ ((packed)) sSerialMode_t; /** * @struct sPWRCTRL_t * @brief “PWR_CTRL”(0x1B)the register enable or disable pressure and temperature measurement * @details measurement mode can be set here: * @n Sleep mode: It will be in sleep mode by default after power-on reset. In this mode, no measurement is performed and power consumption is minimal. All registers are accessible for reading the chip ID and compensation coefficient. * @n Forced mode: In this mode, the sensor will take a single measurement according to the selected measurement and filtering options. After the measurement is completed, the sensor will return to sleep mode, and the measurement result can be obtained in the register. * @n Normal mode: Continuously loop between the measurement period and the standby period. Measurement rate is set in odr_sel register, and prescalers with different sampling frequency Fsampling=200Hz can be selected. * @note register struct: * @n ------------------------------------------------------------------------------------------ * @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 | * @n ------------------------------------------------------------------------------------------ * @n | reserved2 | power_modes | reserved1 | temp_en | press_en | * @n ------------------------------------------------------------------------------------------ */ typedef struct { uint8_t pressEN: 1; /**< power up is 0, 0: disable pressure sensing, 1: enable pressure sensing */ uint8_t tempEN: 1; /**< power up is 0, 0: disable temperature sensing, 1: enable temperature sensing */ uint8_t reserved1: 2; /**< reserved bit */ uint8_t powerMode: 2; /**< power up is 0, 0: sleep mode, 1or2: enforcing mode, 3: normal mode */ uint8_t reserved2: 2; /**< reserved bit */ } __attribute__ ((packed)) sPWRCTRL_t; /** * @struct sOverSamplingMode_t * @brief “OSR”(0x1C)oversampling setting of register control pressure and temperature measurement * @details 6 configurations of temperature and pressure oversampling mode: * @n ------------------------------------------------------------------------------------------ * @n | oversampling setting | osr_p | pressure oversampling | typical pressure resolution | recommended temperature oversampling | * @n ------------------------------------------------------------------------------------------ * @n | ultra-low power consumption | 000 | ×1 | 16 bit / 2.64 Pa | ×1 | * @n ------------------------------------------------------------------------------------------ * @n | low power consumption | 001 | ×2 | 16 bit / 2.64 Pa | ×1 | * @n ------------------------------------------------------------------------------------------ * @n | standard resolution | 010 | ×4 | 18 bit / 0.66 Pa | ×1 | * @n ------------------------------------------------------------------------------------------ * @n | high resolution | 011 | ×8 | 19 bit / 0.33 Pa | ×1 | * @n ------------------------------------------------------------------------------------------ * @n | ultrahigh resolution | 100 | ×16 | 20 bit / 0.17 Pa | ×2 | * @n ------------------------------------------------------------------------------------------ * @n | highest resolution | 101 | ×32 | 21 bit / 0.085 Pa | ×2 | * @n ------------------------------------------------------------------------------------------ * @note register struct: * @n ------------------------------------------------------------------------------------------ * @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 | * @n ------------------------------------------------------------------------------------------ * @n | reserved | osr_t | osr_p | * @n ------------------------------------------------------------------------------------------ */ typedef struct { uint8_t OSRPress: 3; /**< power up is 0, 6 pressure oversampling modes can be set */ uint8_t OSRTemp: 3; /**< power up is 0, temperature mode is also available in 6 settings, similar to pressure mode setting, but it is recommended to use the temperature oversampling mode recommended in the table */ uint8_t reserved: 2; /**< reserved bit */ } __attribute__ ((packed)) sOverSamplingMode_t; /***************** the struct to calibrate compensation data ******************************/ /** * @struct sCalibData_t * @brief buffer the struct of calibrating compensation data in register */ typedef struct { uint16_t parT1; uint16_t parT2; int8_t parT3; int16_t parP1; int16_t parP2; int8_t parP3; int8_t parP4; uint16_t parP5; uint16_t parP6; int8_t parP7; int8_t parP8; int16_t parP9; int8_t parP10; int8_t parP11; int64_t tempLin; } sCalibData_t; /** * @struct sQuantizedCalibData_t * @brief quantized compensation data */ typedef struct { float parT1; float parT2; float parT3; float parP1; float parP2; float parP3; float parP4; float parP5; float parP6; float parP7; float parP8; float parP9; float parP10; float parP11; float tempLin; } sQuantizedCalibData_t; /***************** device status information struct ******************************/ /** * @struct sSensorErrorStatus_t * @brief sensor error cases are reported in "ERR_REG" register * @note register struct: * @n ------------------------------------------------------------------------------------------ * @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 | * @n ------------------------------------------------------------------------------------------ * @n | reserved | conf_err | cmd_err | fatal_err | * @n ------------------------------------------------------------------------------------------ */ typedef struct { uint8_t fatalError: 1; /**< fatal error, unrecoverable error */ uint8_t CMDError: 1; /**< the command fails to be executed and is cleared after being read */ uint8_t configError: 1; /**< detect the sensor configuration error (only work in normal mode), and it's cleared after being read */ uint8_t reserved: 5; /**< reserved bit */ } __attribute__ ((packed)) sSensorErrorStatus_t; /** * @struct sSensorStatus_t * @brief the sensor status is buffered in "STATUS" register * @note register struct: * @n ------------------------------------------------------------------------------------------ * @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 | * @n ------------------------------------------------------------------------------------------ * @n | reserved2 | drdy_temp | drdy_press | cmd_rdy | reserved1 | * @n ------------------------------------------------------------------------------------------ */ typedef struct { uint8_t reserved1: 4; /**< reserved bit */ uint8_t CMDReady: 1; /**< CMD decoder status */ uint8_t pressDrdy: 1; /**< pressure data ready */ uint8_t tempDrdy: 1; /**< temperature data ready */ uint8_t reserved2: 1; /**< reserved bit */ } __attribute__ ((packed)) sSensorStatus_t; /** * @struct sSensorEvent_t * @brief "EVENT" register includes sensor status * @note register struct: * @n ------------------------------------------------------------------------------------------ * @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 | * @n ------------------------------------------------------------------------------------------ * @n | reserved | itf_act_pt | por_detected | * @n ------------------------------------------------------------------------------------------ */ typedef struct { uint8_t porDetected: 1; /**< the device is set to "1" after power on or soft reset, and cleared after reading */ uint8_t itfActPt: 1; /**< the device is set to "1" when a serial port transaction occurs during pressure or temperature conversion, and is cleared after reading */ uint8_t reserved: 6; /**< reserved bit */ } __attribute__ ((packed)) sSensorEvent_t; /** * @struct sSensorINTStatus_t * @brief "INT_STATUS" register displays interrupt status and clear after reading * @note register struct: * @n ------------------------------------------------------------------------------------------ * @n | b7 | b6 | b5 | b4 | b3 | b2 | b1 | b0 | * @n ------------------------------------------------------------------------------------------ * @n | reserved2 | drdy | reserved1 | ffull_int | fwtm_int | * @n ------------------------------------------------------------------------------------------ */ typedef struct { uint8_t fwtmINT: 1; /**< FIFO watermark interrupt */ uint8_t ffullINT: 1; /**< FIFO full interrupt */ uint8_t reserved1: 1; /**< reserved bit */ uint8_t dataReady: 1; /**< data ready interrupt */ uint8_t reserved2: 4; /**< reserved bit */ } __attribute__ ((packed)) sSensorINTStatus_t; /** * @struct sBMP3XXDeviceInfo_t * @brief BMP3XX device information struct */ typedef struct { /* chip ID */ uint8_t chipID; /* calibration compensation coefficient of measured data */ sCalibData_t regCalibData; /**< store the calibration data read by register */ sQuantizedCalibData_t quantizedCalibData; /**< buffer the quantized calibration data */ float seaLevelPressPa; /**< sea level atmospheric pressure used to calculate altitude */ /* sensor configuration data */ sFIFOMode1_t FIFOMode1; /**< the associated struct for register configuration */ sFIFOMode2_t FIFOMode2; sIntMode_t intMode; sSerialMode_t serialMode; sPWRCTRL_t PWRMode; sOverSamplingMode_t overSamplingMode; /* sensor status data */ sSensorErrorStatus_t errStatus; sSensorStatus_t sensorStatus; sSensorEvent_t sensorEvent; sSensorINTStatus_t INTStatus; } sBMP3XXDeviceInfo_t; /***************** enumerate data types of every register detailed configuration ******************************/ /** * @enum eFIFOMode_t * @brief whether to enable FIFO buffer */ typedef enum { eFIFODIS = 0, /**< disable FIFO */ eFIFOEN, /**< enable FIFO */ }eFIFOMode_t; /** * @enum eFIFOStopOnFull_t * @brief whether to continue writing when FIFO on full */ typedef enum { eFIFOStopOnFullDIS = 0<<1, /**< disable: continue writing when on full */ eFIFOStopOnFullEN = 1<<1, /**< enable: stop writing when on full */ }eFIFOStopOnFull_t; /** * @enum eFIFOTime_t * @brief whether to return sensor time frame after the last valid data frame */ typedef enum { eFIFOTimeDIS = 0<<2, /**< disable */ eFIFOTimeEN = 1<<2, /**< enable */ }eFIFOTime_t; /** * @enum eFIFOPress_t * @brief whether to enable pressure data buffer */ typedef enum { eFIFOPressDIS = 0<<3, /**< disable pressure data buffer */ eFIFOPressEN = 1<<3, /**< enable pressure data buffer */ }eFIFOPress_t; /** * @enum eFIFOTemp_t * @brief whether to enable temperature data buffer */ typedef enum { eFIFOTempDIS = 0<<4, /**< disable temperature data buffer */ eFIFOTempEN = 1<<4, /**< enable temperature data buffer */ }eFIFOTemp_t; /** * @enum eFIFOSubsampling_t * @brief sampling select in pressure & temperature data FIFO, coefficient is 2^FIFOSubsampling, power up is 2 */ typedef enum { eFIFOSubsampling0 = 0, eFIFOSubsampling1, eFIFOSubsampling2, eFIFOSubsampling3, eFIFOSubsampling4, eFIFOSubsampling5, eFIFOSubsampling6, eFIFOSubsampling7, }eFIFOSubsampling_t; /** * @enum eFIFODataSelect_t * @brief select data source of pressure and temperature, power up is 0 */ typedef enum { eFIFODataSelectDIS = 0<<3, /**< unfiltered data (compensated or uncompensated) */ eFIFODataSelectEN = 1<<3, /**< filtered data (compensated or uncompensated) */ eFIFODataSelectNO2 = 2<<3, /**< reserve, same as "unfilt" */ eFIFODataSelectNO3 = 3<<3, /**< reserve, same as "unfilt" */ }eFIFODataSelect_t; /** * @enum eINTPinMode_t * @brief interrupt pin output mode */ typedef enum { eINTPinPP = 0, /**< push-pull */ eINTPinOD, /**< open-drain */ }eINTPinMode_t; /** * @enum eINTPinActiveLevel_t * @brief interrupt pin signal level */ typedef enum { eINTPinActiveLevelLow = 0<<1, /**< active low */ eINTPinActiveLevelHigh = 1<<1, /**< active high */ }eINTPinActiveLevel_t; /** * @enum eINTLatch_t * @brief whether to enable INT pin and INT_STATUS register lock-in interrupt */ typedef enum { eINTLatchDIS = 0<<2, /**< disable */ eINTLatchEN = 1<<2, /**< enable */ }eINTLatch_t; /** * @enum eIntFWtm_t * @brief whether to enable INT pin and for INT_STATUS to enable FIFO watermark interrupt */ typedef enum { eIntFWtmDis = 0<<3, /**< disable */ eIntFWtmEn = 1<<3, /**< enable */ }eIntFWtm_t; /** * @enum eINTFFull_t * @brief whether to enable INT pin and for INT_STATUS to enable FIFO all interrupt */ typedef enum { eINTFFullDIS = 0<<4, /**< disable */ eINTFFullEN = 1<<4, /**< enable */ }eINTFFull_t; /** * @enum eINTInitialLevel_t * @brief interrupt data pin level */ typedef enum { eINTInitialLevelLOW = 0<<5, /**< low level */ eINTInitialLevelHIGH = 1<<5, /**< high level */ }eINTInitialLevel_t; /** * @enum eINTDataDrdy_t * @brief whether to enable INT pin and INT_STATUS temperature/pressure data ready interrupt */ typedef enum { eINTDataDrdyDIS = 0<<6, /**< disable */ eINTDataDrdyEN = 1<<6, /**< enable */ }eINTDataDrdy_t; /** * @enum eSPISerialMode_t * @brief SPI communication mode select */ typedef enum { eSerialModeSPI4 = 0, /**< SPI four-wire mode */ eSerialModeSPI3, /**< SPI three-wire mode */ }eSPISerialMode_t; /** * @enum eI2CWDT_t * @brief whether to enable I2C WDT */ typedef enum { eI2CWDTDIS = 0<<1, /**< disable */ eI2CWDTEN = 1<<1, /**< enable */ }eI2CWDT_t; /** * @enum eI2CWDTSel_t * @brief configure I2C WDT */ typedef enum { eI2CWDTSel1p25 = 0<<2, /**< set I2C WDT timeout value to 1.25ms */ eI2CWDTSel40 = 1<<2, /**< set I2C WDT timeout value to 40ms */ }eI2CWDTSel_t; /** * @enum ePressMeasure_t * @brief whether to enable pressure sensing */ typedef enum { ePressDIS = 0, /**< disable pressure sensing */ ePressEN, /**< enable pressure sensing */ }ePressMeasure_t; /** * @enum eTempMeasure_t * @brief whether to enable temperature sensing */ typedef enum { eTempDIS = 0<<1, /**< disable temperature sensing */ eTempEN = 1<<1, /**< enable temperature sensing */ }eTempMeasure_t; /** * @enum ePowerMode_t * @brief measurement (power supply) mode setting */ typedef enum { eSleepMode = 0<<4, /**< Sleep mode: It will be in sleep mode by default after power-on reset. In this mode, no measurement is performed and power consumption is minimal. All registers are accessible for reading the chip ID and compensation coefficient. */ eForcedMode = 1<<4, /**< Forcement mode: In this mode, the sensor will take a single measurement according to the selected measurement and filtering options. After the measurement is completed, the sensor will return to sleep mode, and the measurement result can be obtained in the register. */ eForcedMode2 = 2<<4, /**< Forcement mode: In this mode, the sensor will take a single measurement according to the selected measurement and filtering options. After the measurement is completed, the sensor will return to sleep mode, and the measurement result can be obtained in the register. */ eNormalMode = 3<<4, /**< Normal mode: Continuously loop between the measurement period and the standby period, output data rates are affected by ODR mode setting. */ }ePowerMode_t; /** * @enum ePressOSRMode_t * @brief 6 pressure oversampling modes */ typedef enum { ePressOSRMode1 = 0, /**< sampling×1, 16 bit / 2.64 Pa(recommended temperature oversampling×1) */ ePressOSRMode2, /**< sampling×2, 16 bit / 2.64 Pa(recommended temperature oversampling×1) */ ePressOSRMode4, /**< sampling×4, 18 bit / 0.66 Pa(recommended temperature oversampling×1) */ ePressOSRMode8, /**< sampling×8, 19 bit / 0.33 Pa(recommended temperature oversampling×2) */ ePressOSRMode16, /**< sampling×16, 20 bit / 0.17 Pa(recommended temperature oversampling×2) */ ePressOSRMode32, /**< sampling×32, 21 bit / 0.085 Pa(recommended temperature oversampling×2) */ }ePressOSRMode_t; /** * @enum eTempOSRMode_t * @brief 6 temperature oversampling modes */ typedef enum { eTempOSRMode1 = 0<<3, /**< sampling×1, 16 bit / 0.0050 °C */ eTempOSRMode2 = 1<<3, /**< sampling×2, 16 bit / 0.0025 °C */ eTempOSRMode4 = 2<<3, /**< sampling×4, 18 bit / 0.0012 °C */ eTempOSRMode8 = 3<<3, /**< sampling×8, 19 bit / 0.0006 °C */ eTempOSRMode16 = 4<<3, /**< sampling×16, 20 bit / 0.0003 °C */ eTempOSRMode32 = 5<<3, /**< sampling×32, 21 bit / 0.00015 °C */ }eTempOSRMode_t; /***************** enumerated data types for users easy to select ******************************/ /** * @enum eSDOPinMode_t * @brief SDO wiring status */ typedef enum { eSDOGND = 0, /**< SDO connects GND */ eSDOVDD, /**< SDO connects VDD */ }eSDOPinMode_t; /** * @enum ePrecisionMode_t * @brief the recommended modes for the best settings */ typedef enum { eUltraLowPrecision = 0, /**< ultra-low precision, suitable for weather monitoring (minimum power consumption), enforcement mode, IDD[µA]=4, RMSNoise[cm]=55 */ eLowPrecision, /**< low precision, suitable for random detection, normal mode, IDD[µA]=358, RMSNoise[cm]=36 */ eNormalPrecision1, /**< normal accuracy 1, suitable for dynamic detection on handheld devices (such as mobile phones), normal mode, IDD[µA]=310, RMSNoise[cm]=10 */ eNormalPrecision2, /**< normal accuracy 2, suitable for drones, normal mode, IDD[µA]=570, RMSNoise[cm]=11 */ eHighPrecision, /**< high precision, suitable for low-power handheld devices (such as mobile phones), normal mode, IDD[µA]=145, RMSNoise[cm]=11 */ eUltraPrecision, /**< ultra high precision, suitable for indoor guide, normal mode, IDD[µA]=560, RMSNoise[cm]=5 */ }ePrecisionMode_t; public: /** * @fn DFRobot_BMP3XX * @brief constructor * @param chipID chip ID * @return None */ DFRobot_BMP3XX(uint8_t chipID); /** * @fn begin * @brief initialization function * @return int type, means returning initialization status * @retval 0 NO_ERROR * @retval -1 ERR_DATA_BUS * @retval -2 ERR_IC_VERSION */ virtual int begin(void); /***************** Integrated configuration of each register ******************************/ /** * @fn setFIFOMode1 * @brief FIFO configuration one (FIFO1) * @param mode FIFO mode to be set, perform AND operation on the following to get mode: * @n eFIFODIS: disable FIFO, eFIFOEN: enable FIFO * @n eFIFOStopOnFullDIS: continue writing when on full, eFIFOStopOnFullEN: stop writing when on full * @n eFIFOTimeDIS: disable, eFIFOTimeEN: enable returning sensor time frame after the last valid data frame * @n eFIFOPressDIS: disable pressure data buffer, eFIFOPressEN: enable pressure data buffer * @n eFIFOTempDIS: disable temperature data buffer, eFIFOTempEN: enable temperature data buffer * @return None */ void setFIFOMode1(uint8_t mode); /** * @fn setFIFOMode2 * @brief FIFO configuration two (FIFO2) * @param mode FIFO mode to be set, perform AND operation on the following to get mode: * @n sampling select in FIFO of 8 types of pressure & temperature data (1-128), coefficient is 2^fifo_subsampling(0-7): * @n eFIFOSubsampling0, eFIFOSubsampling1, eFIFOSubsampling2, eFIFOSubsampling3, * @n eFIFOSubsampling4, eFIFOSubsampling5, eFIFOSubsampling6, eFIFOSubsampling7, * @n eFIFODataSelectDIS: unfiltered data (compensated or uncompensated) , eFIFODataSelectEN: filtered data (compensated or uncompensated), add two reserved modes: same as "unfilt" * @return None */ void setFIFOMode2(uint8_t mode); /** * @fn setINTMode * @brief interrupt configuration (INT) * @param mode interrupt mode to be set, perform AND operation on the following to get mode: * @n interrupt pin output mode: eINTPinPP: push-pull , eINTPinOD: open-drain * @n interrupt pin active level: eINTPinActiveLevelLow: active low , eINTPinActiveLevelHigh: active high * @n interrupt register lock-in: eINTLatchDIS: disable , eINTLatchEN: enable * @n FIFO watermark interrupt: eIntFWtmDis: disable , eIntFWtmEn: enable * @n FIFO full interrupt: eINTFFullDIS: disable , eINTFFullEN: enable * @n interrupt pin initial (invalid, no interrupt) level: eINTInitialLevelLOW: low level , eINTInitialLevelHIGH: high level * @n temperature/pressure data ready interrupt: eINTDataDrdyDIS: disable , eINTDataDrdyEN: enable * @return None */ void setINTMode(uint8_t mode); /** * @fn setPWRMode * @brief configuration of measurement mode and power mode * @param mode measurement mode and power mode to be set, perform AND operation on the following to get mode: * @n ePressDIS: disable pressure measurement , ePressEN: enable pressure measurement * @n eTempDIS: disable temperature measurement , eTempEN: enable temperature measurement * @n eSleepMode, eForcedMode, eNormalMode three modes: * @n Sleep mode: It will be in sleep mode by default after power-on reset. In this mode, no measurement is performed and power consumption is minimal. All registers are accessible for reading the chip ID and compensation coefficient. * @n Forcement mode: In this mode, the sensor will take a single measurement according to the selected measurement and filtering options. After the measurement is completed, the sensor will return to sleep mode, and the measurement result can be obtained in the register. * @n normal mode: continuous loop between the measurement period and standby period, output data rates are affected by ODR mode setting. * @return None */ void setPWRMode(uint8_t mode); /** * @fn setOSRMode * @brief oversampling configuration of pressure and temperature measurement (OSR:over-sampling register) * @param mode oversampling mode of pressure & temperature measurement to be set, perform AND operation on the following to get mode: * @n 6 pressure oversampling modes: * @n ePressOSRMode1, pressure sampling×1, 16 bit / 2.64 Pa(recommended temperature oversampling×1) * @n ePressOSRMode2, pressure sampling×2, 16 bit / 2.64 Pa(recommended temperature oversampling×1) * @n ePressOSRMode4, pressure sampling×4, 18 bit / 0.66 Pa(recommended temperature oversampling×1) * @n ePressOSRMode8, pressure sampling×8, 19 bit / 0.33 Pa(recommended temperature oversampling×2) * @n ePressOSRMode16, pressure sampling×16, 20 bit / 0.17 Pa(recommended temperature oversampling×2) * @n ePressOSRMode32, pressure sampling×32, 21 bit / 0.085 Pa(recommended temperature oversampling×2) * @n 6 temperature oversampling modes * @n eTempOSRMode1, temperature sampling×1, 16 bit / 0.0050 °C * @n eTempOSRMode2, temperature sampling×2, 16 bit / 0.0025 °C * @n eTempOSRMode4, temperature sampling×4, 18 bit / 0.0012 °C * @n eTempOSRMode8, temperature sampling×8, 19 bit / 0.0006 °C * @n eTempOSRMode16, temperature sampling×16, 20 bit / 0.0003 °C * @n eTempOSRMode32, temperature sampling×32, 21 bit / 0.00015 °C * @return None */ void setOSRMode(uint8_t mode); /** * @fn setODRMode * @brief set output data rate configuration in subdivision/sub-sampling mode (ODR:output data rates) * @param mode output data rate to be set, configurable modes: * @n BMP3XX_ODR_200_HZ, BMP3XX_ODR_100_HZ, BMP3XX_ODR_50_HZ, BMP3XX_ODR_25_HZ, BMP3XX_ODR_12P5_HZ, * @n BMP3XX_ODR_6P25_HZ, BMP3XX_ODR_3P1_HZ, BMP3XX_ODR_1P5_HZ, BMP3XX_ODR_0P78_HZ, BMP3XX_ODR_0P39_HZ, * @n BMP3XX_ODR_0P2_HZ, BMP3XX_ODR_0P1_HZ, BMP3XX_ODR_0P05_HZ, BMP3XX_ODR_0P02_HZ, BMP3XX_ODR_0P01_HZ, * @n BMP3XX_ODR_0P006_HZ, BMP3XX_ODR_0P003_HZ, BMP3XX_ODR_0P0015_HZ * @return boolean, indicates configuration results * @retval True indicates configuration succeeds, successfully update the configuration * @retval False indicates configuration fails and remains its original state */ bool setODRMode(uint8_t mode); /** * @fn setIIRMode * @brief IIR filter coefficient configuration (IIR filtering) * @param mode IIR filter coefficient setting, configurable modes: * @n BMP3XX_IIR_CONFIG_COEF_0, BMP3XX_IIR_CONFIG_COEF_1, BMP3XX_IIR_CONFIG_COEF_3, * @n BMP3XX_IIR_CONFIG_COEF_7, BMP3XX_IIR_CONFIG_COEF_15, BMP3XX_IIR_CONFIG_COEF_31, * @n BMP3XX_IIR_CONFIG_COEF_63, BMP3XX_IIR_CONFIG_COEF_127 * @return None */ void setIIRMode(uint8_t mode); /** * @fn setCommand * @brief sensor FIFO clear command and soft reset command * @param mode sensor basic command, three commands: * @n BMP3XX_CMD_NOP, null command * @n BMP3XX_CMD_FIFO_FLUSH, clear all the data in FIFO, not change FIFO configuration * @n BMP3XX_CMD_SOFTRESET, when reset triggered, all user configuration settings are ovewritten by default status * @return None */ void setCommand(uint8_t mode); /** * @fn setFIFOWTM * @brief FIFO water level setting configuration * @param WTMSetting FIFO water level to be set(0-511), interrupt is triggered when FIFO fill reaches the watermark * @return None */ void setFIFOWTM(uint16_t WTMSetting); /** * @fn setSamplingMode * @brief common sampling modes for users easy to configure * @param mode: * @n eUltraLowPrecision, ultra-low precision, suitable for weather monitoring (minimum power consumption), in enforcement mode * @n eLowPrecision, low precision, suitable for random detection, in normal mode * @n eNormalPrecision1, normal accuracy 1, suitable for dynamic detection on handheld devices (such as mobile phones), in normal mode * @n eNormalPrecision2, normal accuracy 2, suitable for drones, in normal mode * @n eHighPrecision, high precision, suitable for low-power handheld devices (such as mobile phones), in normal mode * @n eUltraPrecision, ultra high precision, suitable for indoor guide, the collection rate is very low and the collection period is 1000ms, in normal mode * @return boolean, indicates configuration results * @retval True indicates configuration succeeds, successfully update the configuration * @retval False indicates configuration fails and remains its original state */ bool setSamplingMode(ePrecisionMode_t mode); /***************** data register acquisition and processing ******************************/ /** * @fn getSamplingPeriodUS * @brief get the sensor sampling period in the current sampling mode * @return return sampling period, unit is us */ uint32_t getSamplingPeriodUS(void); /** * @fn readTempC * @brief get temperature measured value from the register, operating range(-40 ‒ +85 °C) * @return return temperature measured value, unit is ℃ */ float readTempC(void); /** * @fn readPressPa * @brief get pressured measured value from the register, operating range(300‒1250 hPa) * @return return pressure measured value, unit is Pa * @attention if a reference value is previously provided, calculate the absolute value of the pressure at the current position according to the calibrated sea level atmospheric pressure */ float readPressPa(void); /** * @fn calibratedAbsoluteDifference * @brief use the given current altitude as a reference value, eliminate the absolute difference of subsequent pressure and altitude data * @param altitude current altitude * @return boolean, indicates whether the reference value is set successfully * @retval True indicates the reference value is set successfully * @retval False indicates fail to set the reference value */ bool calibratedAbsoluteDifference(float altitude); /** * @fn readAltitudeM * @brief calculate altitude according to the atmospheric pressure measured by the sensor * @return return altitude, unit is m * @attention if a reference value is previously provided, calculate the absolute value of the altitude at the current position according to the calibrated sea level atmospheric pressure */ float readAltitudeM(void); /** * @fn getFIFOData * @brief get the buffered data in FIFO * @param FIFOTemperatureC variable for storing temperature measured data * @param FIFOPressurePa variable for storing pressure measured data * @note temperature unit is Celsius, pressure unit is Pa * @return None */ void getFIFOData(float &FIFOTemperatureC, float &FIFOPressurePa); /** * @fn getFIFOLength * @brief get FIFO buffered data length * @return return value range: 0-511 */ uint16_t getFIFOLength(void); protected: /** * @fn setIFCONFMode * @brief serial port configuration * @param mode serial port mode to be set, perform AND operation on the following to get mode: * @n eSerialModeSPI4: SPI four-wire mode , eSerialModeSPI3: SPI three-wire mode * @n eI2CWDTDIS: disable , eI2CWDTEN: enable I2C WDT * @n eI2CWDTSel1p25: set I2C WDT timeout value to 1.25ms , eI2CWDTSel40: set I2C WDT timeout value to 40ms * @return None */ void setIFCONFMode(uint8_t mode); /** * @fn getFIFOWTMValue * @brief get the FIFO set watermark * @return return value range: 0-511 */ uint16_t getFIFOWTMValue(void); /** * @fn getBMP3XXCalibData * @brief get sCalibData_t compensation calibration data * @return None */ void getBMP3XXCalibData(void); /** * @fn calibTemperatureC * @brief use the calibration coefficient to compensate the original data * @return return the compensated temperature measured value, the unit is Celsius */ float calibTemperatureC(uint32_t uncompTemp); /** * @fn calibPressurePa * @brief use the calibration coefficient to compensate the original data * @return return the compensated pressure measured value, unit is Pa */ float calibPressurePa(uint32_t uncompPress); /***************** sensor status register acquisition and processing ******************************/ /** * @fn cacheErrorStatus * @brief the API retrieves error messages (fatal error, command and configuration error) from the sensor * @note obtained messages will be stored into struct BMP3Info._errStatus: * @n BMP3Info.errStatus.fatalError: fatal error, unrecoverable error * @n BMP3Info.errStatus.CMDError: the command fails to be executed and will be cleared after being read * @n BMP3Info.errStatus.configError: detect the sensor configuration error (only work in normal mode), and cleared after being read * @return None */ void cacheErrorStatus(void); /** * @fn cacheSensorStatus * @brief the API retrieves readiness for the pressure data and temperature data and CMD decoder status from the sensor * @note obtained messages will be stored into struct BMP3Info._sensorStatus: * @n BMP3Info.sensorStatus.CMDReady: CMD decoder status * @n BMP3Info.sensorStatus.pressDrdy: when a pressure data register is read, it will be reset * @n BMP3Info.sensorStatus.tempDrdy: when a temperature data register is read, it will be reset * @return None */ void cacheSensorStatus(void); /** * @fn cacheSensorEvent * @brief the API gets sensor events from the sensor: 1 indicates the device is powered on or soft reset, and is cleared when read; * @note 1 indicates port transactions occur during pressure or temperature conversion, and is cleared when read * @n obtained messages will be stored into struct BMP3Info._sensorEvent: * @n BMP3Info.sensorEvent.porDetected: "1" device is powered on and soft reset, and is cleared after being read * @n BMP3Info.sensorEvent.itfActPt: "1" indicates port transactions occur during pressure or temperature conversion, and is cleared after being read * @return None */ void cacheSensorEvent(void); /** * @fn cacheINTStatus * @brief the API get the sensor interrupt status (FIFO water level, FIFO on full, data ready) * @note obtained messages will be stored into struct BMP3Info._INTStatus: * @n BMP3Info.INTStatus.fwtmINT: FIFO water level interrupt * @n BMP3Info.INTStatus.ffullINT: FIFO full interrupt * @n BMP3Info.INTStatus.dataReady: data ready interrupt * @return None */ void cacheINTStatus(void); /***************** register reading and writing ports ******************************/ /** * @fn writeReg * @brief write register function, design it as a pure virtual function, implement the function body through a derived class * @param reg register address 8bits * @param pBuf to write data storage and buffer * @param size to write data length * @return None */ virtual void writeReg(uint8_t reg, const void* pBuf, size_t size)=0; /** * @fn readReg * @brief Read register function, design it as a pure virtual function, implement the function body through a derived class * @param reg Register address 8bits * @param pBuf Storage and buffer for data to be read * @param size Length of data to be read * @return return read length, returning 0 means reading failed */ virtual size_t readReg(uint8_t reg, void* pBuf, size_t size)=0; private: // private variables sBMP3XXDeviceInfo_t BMP3Info; }; /***************** initialization and write/read of IIC and SPI interfaces ******************************/ class DFRobot_BMP3XX_IIC:public DFRobot_BMP3XX { public: /** * @fn DFRobot_BMP3XX_IIC * @brief constructor, set sensor IIC communication address according to SDO pin wiring * @param pWire Wire object is defined in Wire.h, so just use &Wire and then the methods in Wire can be pointed to and used * @param mode SDO pin connects to GND, the current I2C address is 0x76;SDO pin connects to VDDIO, the current I2C address is 0x77 * @param chipID chip ID * @return None */ DFRobot_BMP3XX_IIC(TwoWire *pWire, eSDOPinMode_t mode, uint8_t chipID); /** * @fn begin * @brief subclass init function * @return int type, means returning initialization status * @retval 0 NO_ERROR * @retval -1 ERR_DATA_BUS * @retval -2 ERR_IC_VERSION */ virtual int begin(void); protected: /** * @fn writeReg * @brief Write register values through IIC bus * @param reg Register address 8bits * @param pBuf Storage and buffer for data to be written * @param size Length of data to be written * @return None */ virtual void writeReg(uint8_t reg, const void* pBuf, size_t size); /** * @fn readReg * @brief Read register values through IIC bus * @param reg Register address 8bits * @param pBuf Storage and buffer for data to be read * @param size Length of data to be read * @return return read length, returning 0 means reading failed */ virtual size_t readReg(uint8_t reg, void* pBuf, size_t size); private: TwoWire *_pWire; // pointer to IIC communication method uint8_t _deviceAddr; // IIC communication device address }; class DFRobot_BMP3XX_SPI:public DFRobot_BMP3XX { public: /** * @fn DFRobot_BMP3XX_SPI * @brief constructor * @param pSpi extern SPIClass SPI is defined in SPI.h; so just get SPI object address and then the methods in SPI can be pointed to and used * @param csPin is the digital pin that specifies cs to connect to * @param chipID chip ID * @return None */ DFRobot_BMP3XX_SPI(SPIClass *pSpi, uint8_t csPin, uint8_t chipID); /** * @fn DFRobot_BMP3XX_SPI * @brief subclass initialization function * @return int type, means returning initialization status * @retval 0 NO_ERROR * @retval -1 ERR_DATA_BUS * @retval -2 ERR_IC_VERSION */ virtual int begin(void); protected: /** * @fn writeReg * @brief Write register value through SPI bus * @param reg Register address 8bits * @param pBuf Storage and buffer for data to be written * @param size Length of data to be written * @return None */ virtual void writeReg(uint8_t reg, const void* pBuf, size_t size); /** * @fn readReg * @brief Read register value through SPI bus * @param reg Register address 8bits * @param pBuf Storage and buffer for data to be read * @param size Length of data to be read * @return return read length, returning 0 means reading failed */ virtual size_t readReg(uint8_t reg, void* pBuf, size_t size); private: SPIClass *_pSpi; // pointer to SPI communication method uint8_t _csPin; // SPI communication chip select pin }; /***************** BMP388 chip ******************************/ /***************** initialization and write/read of IIC and SPI interfaces ******************************/ class DFRobot_BMP388_IIC:public DFRobot_BMP3XX_IIC { public: /** * @fn DFRobot_BMP388_IIC * @brief constructor, set sensor IIC communication address according to SDO pin wiring * @param pWire Wire object is defined in Wire.h, so just use &Wire and then the methods in Wire can be pointed to and used * @param mode SDO pin connects to GND, the current I2C address is 0x76; SDO pin connects to VDDIO, the current I2C address is 0x77 * @return None */ DFRobot_BMP388_IIC(TwoWire *pWire=&Wire, eSDOPinMode_t mode=eSDOVDD); }; class DFRobot_BMP388_SPI:public DFRobot_BMP3XX_SPI { public: /** * @fn DFRobot_BMP388_SPI * @brief constructor, set sensor IIC communication address according to SDO pin wiring * @param pWire Wire object is defined in Wire.h, so just use &Wire and then the methods in Wire can be pointed to and used * @param csPin is the digital pin that specifies cs to connect to * @return None */ DFRobot_BMP388_SPI(SPIClass *pSpi=&SPI, uint8_t csPin=3); }; /***************** BMP390L chip ******************************/ /***************** initialization and write/read of IIC and SPI interfaces ******************************/ class DFRobot_BMP390L_IIC:public DFRobot_BMP3XX_IIC { public: /** * @fn DFRobot_BMP390L_IIC * @brief constructor, set sensor IIC communication address according to SDO pin wiring * @param pWire Wire object is defined in Wire.h, so just use &Wire and then the methods in Wire can be pointed to and used * @param mode SDO pin connects to GND, the current I2C address is 0x76; SDO pin connects to VDDIO, the current I2C address is 0x77 * @return None */ DFRobot_BMP390L_IIC(TwoWire *pWire=&Wire, eSDOPinMode_t mode=eSDOVDD); }; class DFRobot_BMP390L_SPI:public DFRobot_BMP3XX_SPI { public: /** * @fn DFRobot_BMP390L_SPI * @brief constructor, set sensor IIC communication address according to SDO pin wiring * @param pWire Wire object is defined in Wire.h, so just use &Wire and then the methods in Wire can be pointed to and used * @param csPin is the digital pin that specifies cs to connect to * @return None */ DFRobot_BMP390L_SPI(SPIClass *pSpi=&SPI, uint8_t csPin=3); }; #endif
def from_dictionary(cls, dictionary): if dictionary is None: return None error = cohesity_management_sdk.models.request_error.RequestError.from_dictionary(dictionary.get('error')) if dictionary.get('error') else None is_instant_recovery_finished = dictionary.get('isInstantRecoveryFinished') task_state = dictionary.get('taskState') virtual_disk_restore_response = cohesity_management_sdk.models.virtual_disk_restore_response.VirtualDiskRestoreResponse.from_dictionary(dictionary.get('virtualDiskRestoreResponse')) if dictionary.get('virtualDiskRestoreResponse') else None return cls(error, is_instant_recovery_finished, task_state, virtual_disk_restore_response)
from __future__ import absolute_import import numpy as np from numpy.testing import assert_allclose from nose.tools import assert_equal, assert_true import pycircstat from pycircstat import event_series as es def test_vector_strength_spectrum(): T = 3 # 2s sampling_rate = 10000. firing_rate = 10 # 1000Hz s = T * np.random.rand(np.random.poisson(firing_rate * T)) w, vs_spec = es.vector_strength_spectrum(s, sampling_rate) F0 = [] R = [] lowcut, highcut = 500, 550 idx = (w >= lowcut) & (w <= highcut) for i in np.where(idx)[0]: f0 = w[i] p0 = 1 / f0 rho = pycircstat.resultant_vector_length((s % p0) / p0 * 2 * np.pi) F0.append(f0) R.append(rho) assert_allclose(R, vs_spec[idx]) def test_direct_vector_strength_spectrum(): T = 3 # 2s sampling_rate = 10000. firing_rate = 10 # 1000Hz s = T * np.random.rand(np.random.poisson(firing_rate * T)) w, vs_spec = es.vector_strength_spectrum(s, sampling_rate) lowcut, highcut = 500, 550 idx = (w >= lowcut) & (w <= highcut) vs_2 = es.direct_vector_strength_spectrum(s, w[idx]) assert_allclose(vs_2, vs_spec[idx]) def test_direct_vector_strength_spectrum_parallel(): T = 3 # 2s sampling_rate = 10000. firing_rate = 10 # 1000Hz s = T * np.random.rand(np.random.poisson(firing_rate * T)) w, vs_spec = es.vector_strength_spectrum(s, sampling_rate) lowcut, highcut = 1, 1400 idx = (w >= lowcut) & (w <= highcut) vs_2 = es.direct_vector_strength_spectrum(s, w[idx]) assert_allclose(vs_2, vs_spec[idx], rtol=1e-4, atol=1e-4)
/* * Allocate a new open/delegation state counter. This is needed for * pNFS for proper return on close semantics. * * Note that we only allocate it for pNFS-enabled exports, otherwise * all pointers to struct nfs4_clnt_odstate are always NULL. */ static struct nfs4_clnt_odstate * alloc_clnt_odstate(struct nfs4_client *clp) { struct nfs4_clnt_odstate *co; co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); if (co) { co->co_client = clp; refcount_set(&co->co_odcount, 1); } return co; }
#pragma once #include "il2cpp.h" Dpr_MsgWindow_MsgWindowParam_o* Dpr_Battle_View_BtlvUtility__MakeMsgWindowParam (Dpr_Message_MessageTextParseDataModel_o* pStrBuf, const MethodInfo* method_info); Dpr_Message_MessageTextParseDataModel_o* Dpr_Battle_View_BtlvUtility__BTLV_STRPARAM_to_StrBuf (Dpr_Battle_Logic_BTLV_STRPARAM_o* param, const MethodInfo* method_info); int32_t Dpr_Battle_View_BtlvUtility__GetUniqueWazaDataTurnType (int32_t wazaNo, int32_t monsNo, uint8_t formNo, int32_t turnType, const MethodInfo* method_info);
package main import ( "sort" "testing" ) func TestBurrowsWheeler(t *testing.T) { for k, v := range map[string]string{ "oooooooo$ ffffffff ffffffffuuuuuuuuaaaaaaaallllllllbbBbbBBb": "Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo$", "edarddddddddddntensr$ ehhhhhhhhhhhJ aeaaaaaaaaaaalhtf thmbfe tcwohiahoJ eeec t e ": "James while John had had had had had had had had had had had a better effect on the teacher$", "ooooio,io$Nnssshhhjo ee o nnkkkkkkii ": "Neko no ko koneko, shishi no ko kojishi$"} { if r := burrowsWheeler(k); r != v { t.Errorf("failed: burrowsWheeler %s is %s, got %s", k, v, r) } } } func burrowsWheeler(s string) string { e, r := make([]int, len(s)), make([]byte, len(s)) var k int for i := 0; i < len(s); i++ { e[i] = int(s[i])<<16 + i if s[i] == '$' { k = i } } sort.Ints(e) for i := 0; i < len(s); i, k = i+1, e[k]%(1<<16) { r[i] = byte(e[k] >> 16) } return string(r) }
import sys LI=lambda:list(map(int, sys.stdin.readline().split())) MI=lambda:map(int, sys.stdin.readline().split()) SI=lambda:sys.stdin.readline().strip('\n') II=lambda:int(sys.stdin.readline()) def fnd1(s, e): global arr, k l, r=0, k-1 ret=-1 while l<=r: m=(l+r)//2 if arr[m]>=s: r=m-1 ret=m else: l=m+1 return ret if arr[ret]<=e else -1 def fnd2(s, e): global arr, k l, r=0, k-1 ret=-1 while l<=r: m=(l+r)//2 if arr[m]<=e: l=m+1 ret=m else: r=m-1 return ret if arr[ret]>=s else -1 def val(s, e): #print(s, e, end=' ') global arr, k, a, b if s==0 or e==0: return 0 if s==e: i=fnd1(s, e) if i==-1: #print(a) return a else: j=i while j+1<k and arr[j+1]==arr[i]: j+=1 #print(b*(j-i+1)) return b*(j-i+1) else: m=(s+e)//2 i=fnd1(s, e) if i==-1: #print(a) return a else: #print('TBC') j=fnd2(s, e) return min(b*(j-i+1)*(e-s+1), val(s, m)+val(m+1, e)) n, k, a, b=MI() arr=sorted(LI()) print(val(1, 2**n))
/// Returns the number of the correct plural form /// for `n` objects, as defined by the rule contained in this resolver. pub fn resolve(&self, n: u64) -> usize { match *self { Expr(ref ast) => ast.resolve(n), Function(ref f) => f(n), } }
/* * Copyright 2020 the original author or authors. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * https://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.openrewrite.java.tree; import org.junit.jupiter.api.Test; import org.openrewrite.marker.Markers; import java.util.List; import static java.util.Collections.emptyList; import static org.assertj.core.api.Assertions.assertThat; import static org.openrewrite.Tree.randomId; class SpaceTest { @Test void formatLastSuffixWithSameSuffixDoesntChangeReference() { var suffix = Space.format(" "); var trees = List.of(new JRightPadded<>(new J.Empty(randomId(), suffix, Markers.EMPTY), suffix, Markers.EMPTY)); assertThat(Space.formatLastSuffix(trees, suffix)).isSameAs(trees); } @Test void formatFirstPrefixWithSamePrefixDoesntChangeReference() { var prefix = Space.format(" "); var trees = List.of(new J.Empty(randomId(), prefix, Markers.EMPTY)); assertThat(Space.formatFirstPrefix(trees, prefix)).isSameAs(trees); } @Test void spaceWithSameCommentsDoesntChangeReference() { var comments = List.<Comment>of(new TextComment(true, "test", "", Markers.EMPTY)); var s = Space.build("", comments); assertThat(s.withComments(comments)).isSameAs(s); } @Test void singleLineComment() { @SuppressWarnings("TextBlockMigration") var cf = Space.format(" \n" + "// I'm a little // teapot\n" + "// Short and stout //\n " + "// Here is my handle\n "); assertThat(cf.getComments()).hasSize(3); TextComment c1 = (TextComment) cf.getComments().get(0); TextComment c2 = (TextComment) cf.getComments().get(1); TextComment c3 = (TextComment) cf.getComments().get(2); assertThat(c1.getText()).isEqualTo(" I'm a little // teapot"); assertThat(c2.getText()).isEqualTo(" Short and stout //"); assertThat(c3.getText()).isEqualTo(" Here is my handle"); assertThat(c1.getSuffix()).isEqualTo("\n"); assertThat(c2.getSuffix()).isEqualTo("\n "); assertThat(c3.getSuffix()).isEqualTo("\n "); assertThat(cf.getWhitespace()).isEqualTo(" \n"); } @Test void multiLineComment() { @SuppressWarnings("TextBlockMigration") var cf = Space.format(" \n" + "/* /* Here is my spout */\n" + "/* When I get all steamed up */\n" + "/* /*\n" + "Here me shout\n" + "*/\n "); assertThat(cf.getComments()).hasSize(3); TextComment c1 = (TextComment) cf.getComments().get(0); TextComment c2 = (TextComment) cf.getComments().get(1); TextComment c3 = (TextComment) cf.getComments().get(2); assertThat(c1.getText()).isEqualTo(" /* Here is my spout "); assertThat(c2.getText()).isEqualTo(" When I get all steamed up "); assertThat(c3.getText()).isEqualTo(" /*\nHere me shout\n"); assertThat(c1.getSuffix()).isEqualTo("\n"); assertThat(c2.getSuffix()).isEqualTo("\n"); assertThat(c3.getSuffix()).isEqualTo("\n "); assertThat(cf.getWhitespace()).isEqualTo(" \n"); } @Test void javadocComment() { @SuppressWarnings("TextBlockMigration") var cf = Space.format( " \n" + "/**\n" + " * /** Tip me over and pour me out!\n" + " * https://somewhere/over/the/rainbow.txt\n" + " */\n " ); assertThat(cf.getComments()).hasSize(1); assertThat(cf.getComments().get(0).getSuffix()).isEqualTo("\n "); assertThat(cf.getWhitespace()).isEqualTo(" \n"); } @Test void multilineCommentWithDoubleSlashCommentOnFirstLine() { var cf = Space.format(""" /*// debugging * bla */ """ ); assertThat(cf.getComments()).hasSize(1); TextComment c1 = (TextComment) cf.getComments().get(0); assertThat(c1.getText()).isEqualTo("// debugging\n* bla\n"); } @Test void stringify() { assertThat(Space.format("\n \n\t \t").toString()) .isEqualTo("Space(comments=<0 comments>, whitespace='\\n·₁·₂\\n-₁·₂-₃')"); } @Test void findIndent() { assertThat(Space.build(" ", List.of(new TextComment(false, "hi", "\n ", Markers.EMPTY))).getIndent()) .isEqualTo(" "); assertThat(Space.build(" ", emptyList()).getIndent()) .isEqualTo(" "); assertThat(Space.build(" \n ", emptyList()).getIndent()) .isEqualTo(" "); assertThat(Space.build(" \n \n ", emptyList()).getIndent()) .isEqualTo(" "); } }
<reponame>stm107/CS1632_Spring2019<filename>sample_code/performance_testing/Laboon.java<gh_stars>0 import java.math.BigInteger; public class Laboon { public static int laboonify(String l) { char[] chars = l.toCharArray(); BigInteger counter = new BigInteger("0"); for (int j = 0; j < chars.length; j++) { BigInteger x = new BigInteger(Integer.toString((int) chars[j])); BigInteger toAdd = x.pow(10).add(x.pow(8)).subtract(x.pow(3).add(x.pow(2))); counter = counter.add(toAdd); } return counter.mod(new BigInteger("65536")).intValue(); } public static void main(String[] args) { if (args == null || args.length == 0) { System.err.println("Enter a string"); System.exit(1); } String j = String.join(" ", args); int result = laboonify(j); System.out.println(String.format("%x", result)); } }
def start_of_game(self): self.needed_money = 0 self.money_to_be_taken = 0
<gh_stars>1-10 package main import ( "fmt" "github.com/algorand/go-algorand-sdk/future" "github.com/algorand/go-algorand-sdk/client/algod" "github.com/algorand/go-algorand-sdk/client/kmd" ) func main() { const kmdAddress = "http://localhost:7833" const kmdToken = "206ba3f9ad1d83523fb2a303dd055cd99ce10c5be01e35ee88285fe51438f02a" const algodAddress = "http://localhost:8080" const algodToken = "<KEY>" // Create a kmd client kmdClient, err := kmd.MakeClient(kmdAddress, kmdToken) if err != nil { fmt.Printf("failed to make kmd client: %s\n", err) return } fmt.Println("Made a kmd client") // Create an algod client algodClient, err := algod.MakeClient(algodAddress, algodToken) if err != nil { fmt.Printf("failed to make algod client: %s\n", err) return } fmt.Println("Made an algod client") // Get the list of wallets listResponse, err := kmdClient.ListWallets() if err != nil { fmt.Printf("error listing wallets: %s\n", err) return } // Find our wallet name in the list var exampleWalletID string fmt.Printf("Got %d wallet(s):\n", len(listResponse.Wallets)) for _, wallet := range listResponse.Wallets { fmt.Printf("ID: %s\tName: %s\n", wallet.ID, wallet.Name) if wallet.Name == "mylinuxwallet" { fmt.Printf("found wallet '%s' with ID: %s\n", wallet.Name, wallet.ID) exampleWalletID = wallet.ID break } } // Get a wallet handle. The wallet handle is used for things like signing transactions // and creating accounts. Wallet handles do expire, but they can be renewed initResponse, err := kmdClient.InitWalletHandle(exampleWalletID, "<PASSWORD>") if err != nil { fmt.Printf("Error initializing wallet handle: %s\n", err) return } // Extract the wallet handle exampleWalletHandleToken := initResponse.WalletHandleToken fromAddr := "C2MCKQYJCU4RNWCQVWWSWEHGPAD37BEMQTIMVD6XF36AUIPKOXWIZOO7ZE" toAddr := "H65XWDPZDEV7MXWDOUSLEL6UL6UO6CNK2ZILCENCFBKNCD4RATZIZZSIWQ" // Get the suggested transaction parameters txParams, err := algodClient.BuildSuggestedParams() if err != nil { fmt.Printf("error getting suggested tx params: %s\n", err) return } // Make transaction tx, err := future.MakePaymentTxn(fromAddr, toAddr, 1000, nil, "", txParams) if err != nil { fmt.Printf("Error creating transaction: %s\n", err) return } // Sign the transaction signResponse, err := kmdClient.SignTransaction(exampleWalletHandleToken, "<PASSWORD>", tx) if err != nil { fmt.Printf("Failed to sign transaction with kmd: %s\n", err) return } fmt.Printf("kmd made signed transaction with bytes: %x\n", signResponse.SignedTransaction) // Broadcast the transaction to the network // Note that this transaction will get rejected because the accounts do not have any tokens sendResponse, err := algodClient.SendRawTransaction(signResponse.SignedTransaction) if err != nil { fmt.Printf("failed to send transaction: %s\n", err) return } fmt.Printf("Transaction ID: %s\n", sendResponse.TxID) }
<reponame>affromero/SMILE import torch.nn as nn from misc.utils import PRINT from torch.nn import init import math def print_debug(feed, layers, file=None, append=''): if isinstance(feed, (tuple, list)): for i in feed: PRINT(file, i.size()) else: PRINT(file, feed.size()) for layer in layers: try: if isinstance(feed, (tuple, list)): feed = layer(*feed) else: feed = layer(feed) if isinstance(feed, (tuple, list)): feed_str = feed[0] else: feed_str = feed except BaseException: feed = layer(*feed) raise BaseException( "Type of layer {} not compatible with input {}.".format( layer, feed.shape)) try: _str = '{}, {}'.format(str(layer), feed_str.size()) # _str = '{}, {}'.format(str(layer).split('(')[0], feed_str.size()) # _str = '{}, {}'.format(layer.__name__, feed_str.size()) except AttributeError: _str = '{}, {}'.format(layer.__class__.__name__, feed_str.size()) PRINT(file, _str) if append: PRINT(file, append) PRINT(file, ' ') return feed # ==================================================================# # Weights Init # ==================================================================# class weights_init(object): def __init__(self, init_type='kaiming', a=0, nonlinearity='relu'): self.a = 0 self.init_type = init_type self.nonlinearity = nonlinearity def assign(self, m): if isinstance(m, (nn.Conv2d, nn.Linear)): if hasattr(m, 'weight'): if self.init_type == 'gaussian': init.normal_(m.weight.data, 0.0, 0.02) elif self.init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=math.sqrt(2)) elif self.init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=self.a, mode='fan_in', nonlinearity=self.nonlinearity) elif self.init_type == 'kaiming_uniform': init.kaiming_uniform_(m.weight.data, a=self.a, mode='fan_in', nonlinearity=self.nonlinearity) elif self.init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=math.sqrt(2)) elif self.init_type == 'default': pass else: assert 0, "Unsupported initialization: {}".format( self.init_type) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0)
Accelerated Elevation Change of Greenland's Jakobshavn Glacier Observed by ICESat and IceBridge The recent accelerated ice mass loss of the Greenland ice sheet and its outlet glaciers has been widely documented. The Jakobshavn isbrae/glacier is one of the fastest melting glaciers in Greenland. To determine its elevation change rate, recent observations from the laser altimetry mission Ice, Cloud, and land Elevation Satellite (ICESat) (2003-2009) and the airborne campaign IceBridge (2009-2011) were used. The results confirm previously determined elevation loss rates of several decimeters per year. Specifically, the outlet glacier declines up to several meters per year, while the high-elevation areas of the ice sheet exhibit loss of a few decimeters or even elevation gain for some years. Specifically, the three study areas show the following ranges of elevation change rates and standard deviation between 2003 and 2011: (1) - 0.64 to -2.56 ±0.43m/yr for areas below 1500-m elevation; (2) +0.17 to -0.55 ±0.16m/yr for areas above 1500 m; and (3) -2.31 to -5.18 ±0.55 m/yr for outlet glacier. Elevation change accelerates at -0.09 to -0.27 m/yr2 between 2004 and 2011. The uncertainty is better than 0.55 m/yr which demonstrates that both ICESat and IceBridge observations allow for the accurate estimation of rates at the observed magnitudes. Part of the uncertainty can be attributed to the slope correction used to project two footprints at different epochs onto a common location. The slope correction was applied based on the ICESat 1-km digital elevation model and the IceBridge Airborne Topographic Mapper slope data sets. An improved understanding of annual and seasonal variability of elevation change will result in better quantification of its mass balance and its contribution to sea level change.
#include <stdint.h> #include <murax_hex.h> void print_hex(char *str){ while(*str){ uart_write(UART,*(str++)); } } void main() { volatile uint32_t a = 1, b = 2, c = 3; uint32_t result = 0; interruptCtrl_init(TIMER_INTERRUPT); prescaler_init(TIMER_PRESCALER); timer_init(TIMER_A); TIMER_PRESCALER->LIMIT = 12000-1; //1 ms rate TIMER_A->LIMIT = 1000-1; //1 second rate TIMER_A->CLEARS_TICKS = 0x00010002; TIMER_INTERRUPT->PENDINGS = 0xF; TIMER_INTERRUPT->MASKS = 0x1; GPIO_A->OUTPUT_ENABLE = 0x000000FF; GPIO_A->OUTPUT = 0x00000000; GPIO_HEX->OUTPUT_ENABLE = 0x000000FF; GPIO_HEX->OUTPUT = 0x00000000; GPIO_HEX2->OUTPUT_ENABLE = 0x000000FF; GPIO_HEX2->OUTPUT = 0x00000000; UART->STATUS = 2; //Enable RX interrupts UART->DATA = 'C'; //HEX->STATUS = 2; //Enable RX interrupts //HEX->DATA = 'B'; print_hex("Hello !\n"); while(1){ result += a; result += b + c; for(uint32_t idx = 0;idx < 50000;idx++) asm volatile(""); GPIO_A->OUTPUT = (GPIO_A->OUTPUT & ~0x3F) | ((GPIO_A->OUTPUT + 1) & 0x3F); //Counter on LED[5:0] if (GPIO_A->INPUT & 0x00000001){ GPIO_HEX->OUTPUT = (GPIO_HEX->OUTPUT + 7) & 0xFFFFFFFF; //(GPIO_HEX->OUTPUT & ~0xFFFF) | ((GPIO_HEX->OUTPUT + 1) & 0xFFFF); //Counter on LED[7:0] } if (GPIO_A->INPUT & 0x00000001){ GPIO_HEX2->OUTPUT = (GPIO_HEX2->OUTPUT + 1) & 0xFFFFFFFF; //(GPIO_HEX2->OUTPUT & ~0xFFFF) | ((GPIO_HEX2->OUTPUT + 1) & 0xFFFF); //Counter on LED[7:0] } //HEX->DATA = 10;//(HEX->DATA & ~0x3F) | ((HEX->DATA + 1) & 0x3F); //Counter on LED[5:0] } } void irqCallback(){ if(TIMER_INTERRUPT->PENDINGS & 1){ //Timer A interrupt GPIO_A->OUTPUT ^= 0x80; //Toogle led 7 GPIO_HEX->OUTPUT ^= 0x40; //Toogle led 6 TIMER_INTERRUPT->PENDINGS = 1; } while(UART->STATUS & (1 << 9)){ //UART RX interrupt //UART->DATA = ((UART->DATA) + (HEX->DATA)) & 0xFF; //(65) & 0xFF; //(UART->DATA) & 0xFF; UART->DATA = (UART->DATA + (GPIO_A->INPUT & 0x00000001) ) & 0xFF; //(UART->DATA) & 0xFF; //UART->DATA = (UART->DATA) & 0xFF; } //while(HEX->STATUS & (1 << 9)){ //HEX RX interrupt // HEX->DATA = (HEX->DATA) & 0xFF; //} }
<filename>tests/__init__.py """ PKCS#11 Tests The following environment variables will influence the behaviour of test cases: - PKCS11_MODULE, mandatory, points to the library/DLL to use for testing - PKCS11_TOKEN_LABEL, mandatory, contains the token label - PKCS11_TOKEN_PIN, optional (default is None), contains the PIN/passphrase of the token - PKCS11_TOKEN_SO_PIN, optional (default is same as PKCS11_TOKEN_PIN), security officer PIN - OPENSSL_PATH, optional, path to openssl executable (i.e. the folder that contains it) """ import os import shutil import unittest from functools import wraps from warnings import warn import pkcs11 try: LIB = os.environ['PKCS11_MODULE'] except KeyError: raise RuntimeError("Must define `PKCS11_MODULE' to run tests.") try: TOKEN = os.environ['PKCS11_TOKEN_LABEL'] except KeyError: raise RuntimeError("Must define `PKCS11_TOKEN_LABEL' to run tests.") TOKEN_PIN = os.environ.get('PKCS11_TOKEN_PIN') # Can be None if TOKEN_PIN is None: warn("`PKCS11_TOKEN_PIN' env variable is unset.") TOKEN_SO_PIN = os.environ.get('PKCS11_TOKEN_SO_PIN') if TOKEN_SO_PIN is None: TOKEN_SO_PIN = TOKEN_PIN warn("`PKCS11_TOKEN_SO_PIN' env variable is unset. Using value from `PKCS11_TOKEN_PIN'") OPENSSL = shutil.which('openssl', path=os.environ.get('OPENSSL_PATH')) if OPENSSL is None: warn("Path to OpenSSL not found. Please adjust `PATH' or define `OPENSSL_PATH'") class TestCase(unittest.TestCase): """Base test case, optionally creates a token and a session.""" with_token = True """Creates a token for this test case.""" with_session = True """Creates a session for this test case.""" @classmethod def setUpClass(cls): super().setUpClass() cls.lib = lib = pkcs11.lib(LIB) if cls.with_token or cls.with_session: cls.token = lib.get_token(token_label=TOKEN) def setUp(self): super().setUp() if self.with_session: self.session = self.token.open(user_pin=TOKEN_PIN) def tearDown(self): if self.with_session: self.session.close() super().tearDown() def requires(*mechanisms): """ Decorates a function or class as requiring mechanisms, else they are skipped. """ def check_requirements(self): """Determine what, if any, required mechanisms are unavailable.""" unavailable = set(mechanisms) - self.token.slot.get_mechanisms() if unavailable: raise unittest.SkipTest("Requires %s" % ', '.join(map(str, unavailable))) def inner(func): @wraps(func) def wrapper(self, *args, **kwargs): check_requirements(self) return func(self, *args, **kwargs) return wrapper return inner def xfail(condition): """Mark a test that's expected to fail for a given condition.""" def inner(func): if condition: return unittest.expectedFailure(func) else: return func return inner class Is: """ Test what device we're using. """ # trick: str.endswith() can accept tuples, # see https://stackoverflow.com/questions/18351951/check-if-string-ends-with-one-of-the-strings-from-a-list softhsm2 = LIB.lower().endswith(('libsofthsm2.so', 'libsofthsm2.dylib', 'softhsm2.dll', 'softhsm2-x64.dll')) nfast = LIB.lower().endswith(('libcknfast.so', 'cknfast.dll')) opencryptoki = LIB.endswith('libopencryptoki.so') travis = os.environ.get('TRAVIS') == 'true' class Avail: """ Test if a resource is available """ # openssl is searched across the exec path. Optionally, OPENSSL_PATH env variable can be defined # in case there is no direct path to it (i.e. PATH does not point to it) openssl = OPENSSL is not None class Only: """ Limit tests to given conditions """ softhsm2 = unittest.skipUnless(Is.softhsm2, "SoftHSMv2 only") openssl = unittest.skipUnless(Avail.openssl, "openssl not found in the path") class Not: """ Ignore tests for given devices """ softhsm2 = unittest.skipIf(Is.softhsm2, "Not supported by SoftHSMv2") nfast = unittest.skipIf(Is.nfast, "Not supported by nFast") opencryptoki = unittest.skipIf(Is.opencryptoki, "Not supported by OpenCryptoki") class FIXME: """ Tests is broken on this platform. """ softhsm2 = xfail(Is.softhsm2) nfast = xfail(Is.nfast) opencryptoki = xfail(Is.opencryptoki) travis = xfail(Is.travis)
/** * * test short serialize & deserialize model * * @author jason.shang */ public class Hessian2StringShortType implements Serializable { Map<String, Short> stringShortMap; Map<String, Byte> stringByteMap; Map<String, PersonType> stringPersonTypeMap; public Hessian2StringShortType(){ } }
#include<iostream> #include<algorithm> #include<cstdio> #include<cstring> #define RE register using namespace std; template <class T> inline void read(T &x){ x = 0; char ch = getchar(); while(ch < '0' || ch > '9') ch = getchar(); while(ch >= '0' && ch <= '9'){x = (x << 3) + (x << 1) + (ch ^ 48); ch = getchar();} } const int N = 1e3; int n, F[N], dp[N][N]; signed main() { read(n); for(RE int i = 1; i <= n; ++i) read(F[i]); memset(dp, 0x7f, sizeof(dp)); for(RE int i = 1; i <= n; ++i) dp[i][i] = 1; for(RE int i = 1; i < n; ++i) dp[i][i + 1] = (F[i] == F[i + 1]) ? 1 : 2; for(RE int len = 3; len <= n; ++len){ for(RE int l = 1; l <= n - len + 1; ++l){ int r = l + len - 1; if(F[l] == F[r]) dp[l][r] = dp[l + 1][r - 1]; for(RE int k = l; k < r; ++k){ dp[l][r] = min(dp[l][r], dp[l][k] + dp[k + 1][r]); } } } /*for(RE int i = 1; i <= n; ++i){ for(RE int j = i; j <= n; ++j){ cout << dp[i][j] << " "; }cout << endl; }*/ printf("%d", dp[1][n]); return 0; }
import { UnumDto } from '../types'; export interface SmsResponseBody { success: boolean; } /** * Handler to send a SMS using UnumID's SaaS. * Designed to be used with a deeplink which creates a templated message. * @param authorization * @param to * @param deeplink */ export declare const sendSms: (authorization: string, to: string, deeplink: string) => Promise<UnumDto<undefined>>; //# sourceMappingURL=sendSms.d.ts.map
/** * Registers a persistent single bean by id. * * @param pBeanId the id of the single bean * @param pBeanType the bean type of the persistent single bean */ void registerSingleBeanType(String pBeanId, Class<? extends IBean> pBeanType) { final Map<IField<?>, Object> initialContent = BeanReflector.reflectBeanFields(pBeanType).stream().collect(HashMap::new, (pMap, pField) -> pMap.put(pField, pField.getInitialValue()), HashMap::putAll); singleBeanInitialStates.put(pBeanId, new PersistentBeanData(-1, initialContent)); }
export * from './hybrid';
# 1 "lz4hc_cs_adapter.h" # 1 "<command-line>" # 1 "lz4hc_cs_adapter.h" // externaly defined: // - GEN_SAFE: generate safe code // - GEN_X64: generate 64-bit version # 59 "lz4hc_cs_adapter.h" // LZ4HC private const int MAXD = 1 << MAXD_LOG; private const int MAXD_MASK = MAXD - 1; private const int HASHHC_LOG = MAXD_LOG - 1; private const int HASHHC_TABLESIZE = 1 << HASHHC_LOG; private const int HASHHC_MASK = HASHHC_TABLESIZE - 1; private const int MAX_NB_ATTEMPTS = 256; private const int OPTIMAL_ML = (ML_MASK - 1) + MINMATCH; // end of LZ4HC # 116 "lz4hc_cs_adapter.h" // #define COPY4(x,s,d) { byte[] xxx; xxx[d] = xxx[s]; xxx[d + 1] = xxx[s + 1]; xxx[d + 2] = xxx[s + 2]; xxx[d + 3] = xxx[s + 3]; } // #define COPY8(x,s,d) { byte[] xxx; xxx[d] = xxx[s]; xxx[d + 1] = xxx[s + 1]; xxx[d + 2] = xxx[s + 2]; xxx[d + 3] = xxx[s + 3]; xxx[d + 4] = xxx[s + 4]; xxx[d + 5] = xxx[s + 5]; xxx[d + 6] = xxx[s + 6]; xxx[d + 7] = xxx[s + 7]; } # 210 "lz4hc_cs_adapter.h" private class LZ4HC_Data_Structure { public byte* src_base; public int hashTable[HASHHC_TABLESIZE]; public ushort chainTable[MAXD]; public byte* nextToUpdate; }; // GOGOGO # 1 "..\\..\\..\\original\\lz4hc.c" 1 /* LZ4 HC - High Compression Mode of LZ4 Copyright (C) 2011-2013, <NAME>. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html - LZ4 source repository : http://code.google.com/p/lz4/ */ # 330 "..\\..\\..\\original\\lz4hc.c" inline static int LZ4HC_Init (LZ4HC_Data_Structure* hc4, const byte* src_base) { BlockFill((void*)hc4->hashTable, sizeof(hc4->hashTable), 0); BlockFill(hc4->chainTable, sizeof(hc4->chainTable), 0xFF); hc4->nextToUpdate = src_base + 1; hc4->src_base = src_base; return 1; } inline static void* LZ4HC_Create (const byte* src_base) { void* hc4 = (new byte[sizeof(LZ4HC_Data_Structure)]); LZ4HC_Init ((LZ4HC_Data_Structure*)hc4, src_base); return hc4; } inline static int LZ4HC_Free (void** LZ4HC_Data) { /* gc */(*LZ4HC_Data); *LZ4HC_Data = NULL; return (1); } // Update chains up to ip (excluded) forceinline static void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const byte* src_p) { ushort* chainTable = hc4->chainTable; int* hashTable = hc4->hashTable; int src_base = src_0; while(hc4->nextToUpdate < src_p) { const byte* p = hc4->nextToUpdate; int delta = (p) - (hashTable[(((Peek4(_, p)) * 2654435761u) >> HASHHC_ADJUST)] + src_base); if (delta>MAX_DISTANCE) delta = MAX_DISTANCE; chainTable[((int)p) & MAXD_MASK] = (ushort)delta; hashTable[(((Peek4(_, p)) * 2654435761u) >> HASHHC_ADJUST)] = (int)((p) - src_base); hc4->nextToUpdate++; } } forceinline static int LZ4HC_CommonLength (const byte* p1, const byte* p2, const byte* const src_LASTLITERALS) { const byte* p1t = p1; while (p1t<src_LASTLITERALS-(STEPSIZE_64-1)) { ulong diff = Peek8(_, p2) ^ Peek8(_, p1t); if (!diff) { p1t+=STEPSIZE_64; p2+=STEPSIZE_64; continue; } p1t += debruijn64[((ulong)((diff) & -(diff)) * 0x0218A392CDABBD3FL) >> 58]; return (p1t - p1); } if (1) if ((p1t<(src_LASTLITERALS-3)) && (Peek4(_, p2) == Peek4(_, p1t))) { p1t+=4; p2+=4; } if ((p1t<(src_LASTLITERALS-1)) && (Peek2(_, p2) == Peek2(_, p1t))) { p1t+=2; p2+=2; } if ((p1t<src_LASTLITERALS) && (*p2 == *p1t)) p1t++; return (p1t - p1); } forceinline static int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const byte* src_p, const byte* const src_LASTLITERALS, const byte** matchpos) { ushort* const chainTable = hc4->chainTable; int* const hashTable = hc4->hashTable; const byte* xxx_ref; int src_base = src_0; int nbAttempts=MAX_NB_ATTEMPTS; int repl=0, ml=0; ushort delta; // HC4 match finder LZ4HC_Insert(hc4, src_p); xxx_ref = (hashTable[(((Peek4(_, src_p)) * 2654435761u) >> HASHHC_ADJUST)] + src_base); // Detect repetitive sequences of length <= 4 if (xxx_ref >= src_p-4) // potential repetition { if (Peek4(_, xxx_ref) == Peek4(_, src_p)) // confirmed { delta = (ushort)(src_p-xxx_ref); repl = ml = LZ4HC_CommonLength(src_p+MINMATCH, xxx_ref+MINMATCH, src_LASTLITERALS) + MINMATCH; *matchpos = xxx_ref; } xxx_ref = ((xxx_ref) - (int)chainTable[((int)xxx_ref) & MAXD_MASK]); } while ((xxx_ref >= src_p-MAX_DISTANCE) && (nbAttempts)) { nbAttempts--; if (*(xxx_ref+ml) == *(src_p+ml)) if (Peek4(_, xxx_ref) == Peek4(_, src_p)) { int mlt = LZ4HC_CommonLength(src_p+MINMATCH, xxx_ref+MINMATCH, src_LASTLITERALS) + MINMATCH; if (mlt > ml) { ml = mlt; *matchpos = xxx_ref; } } xxx_ref = ((xxx_ref) - (int)chainTable[((int)xxx_ref) & MAXD_MASK]); } // Complete table if (repl) { const byte* ptr = src_p; const byte* end; end = src_p + repl - (MINMATCH-1); while(ptr < end-delta) { chainTable[((int)ptr) & MAXD_MASK] = delta; // Pre-Load ptr++; } do { chainTable[((int)ptr) & MAXD_MASK] = delta; hashTable[(((Peek4(_, ptr)) * 2654435761u) >> HASHHC_ADJUST)] = (int)((ptr) - src_base); // Head of chain ptr++; } while(ptr < end); hc4->nextToUpdate = end; } return (int)ml; } forceinline static int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const byte* src_p, const byte* startLimit, const byte* src_LASTLITERALS, int longest, const byte** matchpos, const byte** startpos) { ushort* const chainTable = hc4->chainTable; int* const hashTable = hc4->hashTable; int src_base = src_0; const byte* xxx_ref; int nbAttempts = MAX_NB_ATTEMPTS; int delta = (int)(src_p-startLimit); // First Match LZ4HC_Insert(hc4, src_p); xxx_ref = (hashTable[(((Peek4(_, src_p)) * 2654435761u) >> HASHHC_ADJUST)] + src_base); while ((xxx_ref >= src_p-MAX_DISTANCE) && (nbAttempts)) { nbAttempts--; if (*(startLimit + longest) == *(xxx_ref - delta + longest)) if (Peek4(_, xxx_ref) == Peek4(_, src_p)) { const byte* reft = xxx_ref+MINMATCH; const byte* ipt = src_p+MINMATCH; const byte* startt = src_p; while (ipt<src_LASTLITERALS-(STEPSIZE_64-1)) { ulong diff = Peek8(_, reft) ^ Peek8(_, ipt); if (!diff) { ipt+=STEPSIZE_64; reft+=STEPSIZE_64; continue; } ipt += debruijn64[((ulong)((diff) & -(diff)) * 0x0218A392CDABBD3FL) >> 58]; goto _endCount; } if (1) if ((ipt<(src_LASTLITERALS-3)) && (Peek4(_, reft) == Peek4(_, ipt))) { ipt+=4; reft+=4; } if ((ipt<(src_LASTLITERALS-1)) && (Peek2(_, reft) == Peek2(_, ipt))) { ipt+=2; reft+=2; } if ((ipt<src_LASTLITERALS) && (*reft == *ipt)) ipt++; _endCount: reft = xxx_ref; while ((startt>startLimit) && (reft > hc4->src_base) && (startt[-1] == reft[-1])) {startt--; reft--;} if ((ipt-startt) > longest) { longest = (int)(ipt-startt); *matchpos = reft; *startpos = startt; } } xxx_ref = ((xxx_ref) - (int)chainTable[((int)xxx_ref) & MAXD_MASK]); } return longest; } forceinline static int LZ4_encodeSequence(const byte** src_p, byte** dst_p, const byte** src_anchor, int matchLength, const byte* xxx_ref, byte* dst_end) { int length, len; byte* xxx_token; // Encode Literal length length = (int)(*src_p - *src_anchor); xxx_token = (*dst_p)++; if ((*dst_p + length + (2 + 1 + LASTLITERALS) + (length>>8)) > dst_end) return 1; // Check output limit if (length>=(int)RUN_MASK) { *xxx_token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*dst_p)++ = 255; *(*dst_p)++ = (byte)len; } else *xxx_token = (byte)(length<<ML_BITS); // Copy Literals if (length > 0) /*?*/{ _i = *dst_p + length; *src_anchor += WildCopy(_, *src_anchor, _, *dst_p, _i); *dst_p = _i; }; // Encode Offset { Poke2(_, *dst_p, (ushort)(*src_p-xxx_ref)); *dst_p += 2; }; // Encode MatchLength len = (int)(matchLength-MINMATCH); if (*dst_p + (1 + LASTLITERALS) + (length>>8) > dst_end) return 1; // Check output limit if (len>=(int)ML_MASK) { *xxx_token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *(*dst_p)++ = 255; *(*dst_p)++ = 255; } if (len > 254) { len-=255; *(*dst_p)++ = 255; } *(*dst_p)++ = (byte)len; } else *xxx_token += (byte)len; // Prepare next loop *src_p += matchLength; *src_anchor = *src_p; return 0; } //**************************** // Compression CODE //**************************** int LZ4_compressHCCtx(LZ4HC_Data_Structure* ctx, const byte* src, byte* dst, int src_len, int dst_maxlen) { const byte* src_p = (const byte*) src; const byte* src_anchor = src_p; const byte* const src_end = src_p + src_len; const byte* const src_mflimit = src_end - MFLIMIT; const byte* const src_LASTLITERALS = (src_end - LASTLITERALS); byte* dst_p = (byte*) dst; byte* const dst_end = dst_p + dst_maxlen; int ml, ml2, ml3, ml0; const byte* xxx_ref=NULL; const byte* start2=NULL; const byte* ref2=NULL; const byte* start3=NULL; const byte* ref3=NULL; const byte* start0; const byte* ref0; src_p++; // Main Loop while (src_p < src_mflimit) { ml = LZ4HC_InsertAndFindBestMatch (ctx, src_p, src_LASTLITERALS, (&xxx_ref)); if (!ml) { src_p++; continue; } // saved, in case we would skip too much start0 = src_p; ref0 = xxx_ref; ml0 = ml; _Search2: if (src_p+ml < src_mflimit) ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, src_p + ml - 2, src_p + 1, src_LASTLITERALS, ml, &ref2, &start2); else ml2=ml; if (ml2 == ml) // No better match { if (LZ4_encodeSequence(&src_p, &dst_p, &src_anchor, ml, xxx_ref, dst_end)) return 0; continue; } if (start0 < src_p) { if (start2 < src_p + ml0) // empirical { src_p = start0; xxx_ref = ref0; ml = ml0; } } // Here, start0==ip if ((start2 - src_p) < 3) // First Match too small : removed { ml = ml2; src_p = start2; xxx_ref =ref2; goto _Search2; } _Search3: // Currently we have : // ml2 > ml1, and // ip1+3 <= ip2 (usually < ip1+ml1) if ((start2 - src_p) < OPTIMAL_ML) { int correction; int new_ml = ml; if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; if (src_p+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - src_p) + ml2 - MINMATCH; correction = new_ml - (int)(start2 - src_p); if (correction > 0) { start2 += correction; ref2 += correction; ml2 -= correction; } } // Now, we have start2 = ip+new_ml, with new_ml=min(ml, OPTIMAL_ML=18) if (start2 + ml2 < src_mflimit) ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, src_LASTLITERALS, ml2, &ref3, &start3); else ml3=ml2; if (ml3 == ml2) // No better match : 2 sequences to encode { // ip & ref are known; Now for ml if (start2 < src_p+ml) ml = (int)(start2 - src_p); // Now, encode 2 sequences if (LZ4_encodeSequence(&src_p, &dst_p, &src_anchor, ml, xxx_ref, dst_end)) return 0; src_p = start2; if (LZ4_encodeSequence(&src_p, &dst_p, &src_anchor, ml2, ref2, dst_end)) return 0; continue; } if (start3 < src_p+ml+3) // Not enough space for match 2 : remove it { if (start3 >= (src_p+ml)) // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 { if (start2 < src_p+ml) { int correction = (int)(src_p+ml - start2); start2 += correction; ref2 += correction; ml2 -= correction; if (ml2 < MINMATCH) { start2 = start3; ref2 = ref3; ml2 = ml3; } } if (LZ4_encodeSequence(&src_p, &dst_p, &src_anchor, ml, xxx_ref, dst_end)) return 0; src_p = start3; xxx_ref = ref3; ml = ml3; start0 = start2; ref0 = ref2; ml0 = ml2; goto _Search2; } start2 = start3; ref2 = ref3; ml2 = ml3; goto _Search3; } // OK, now we have 3 ascending matches; let's write at least the first one // ip & ref are known; Now for ml if (start2 < src_p+ml) { if ((start2 - src_p) < (int)ML_MASK) { int correction; if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; if (src_p + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - src_p) + ml2 - MINMATCH; correction = ml - (int)(start2 - src_p); if (correction > 0) { start2 += correction; ref2 += correction; ml2 -= correction; } } else { ml = (int)(start2 - src_p); } } if (LZ4_encodeSequence(&src_p, &dst_p, &src_anchor, ml, xxx_ref, dst_end)) return 0; src_p = start2; xxx_ref = ref2; ml = ml2; start2 = start3; ref2 = ref3; ml2 = ml3; goto _Search3; } // Encode Last Literals { int lastRun = (int)(src_end - src_anchor); if (((byte*)dst_p - dst) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (uint)dst_maxlen) return 0; // Check output limit if (lastRun>=(int)RUN_MASK) { *dst_p++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *dst_p++ = 255; *dst_p++ = (byte) lastRun; } else *dst_p++ = (byte)(lastRun<<ML_BITS); BlockCopy(_, src_anchor, _, dst_p, src_end - src_anchor); dst_p += src_end-src_anchor; } // End return (int) (((byte*)dst_p)-dst); } int LZ4_compressHC_limitedOutput(const byte* src, byte* dst, int src_len, int dst_maxlen) { void* ctx = LZ4HC_Create((const byte*)src); int result = LZ4_compressHCCtx((LZ4HC_Data_Structure*)ctx, src, dst, src_len, dst_maxlen); LZ4HC_Free (&ctx); return result; } int LZ4_compressHC(const byte* src, byte* dst, int src_len) { return LZ4_compressHC_limitedOutput(src, dst, src_len, LZ4_compressBound(src_len)+1); } # 221 "lz4hc_cs_adapter.h" 2
//Mob for the Thinblade of Vines -- Yves #include <std.h> inherit WEAPONLESS; void create(){ ::create(); set_name("sprite"); setenv("MIN", "$N flies in."); setenv("MOUT", "$N flies off to the $D."); set_id(({ "sprite", "fairy", "fey" })); set_short("%^BOLD%^%^GREEN%^Sprite%^RESET%^"); set_long((:TO,"long_desc":)); set_hd(35,5); set_max_hp(25); set_hp(25); set_gender("female"); set_race("sprite"); set_body_type("human"); set_overall_ac(0); set_size(1); set_exp(1); set_property("magic",1); set_mob_magic_resistance("very high"); set_attacks_num(1); set_damage(3,3); set_attack_limbs(({"torso"})); set_base_damage_type("slashing"); set_property("knock unconscious",1); } string long_desc(){ return "%^BOLD%^%^GREEN%^This sprite is a tiny winged fey creature dressed in greens and " "%^RESET%^%^ORANGE%^browns%^BOLD%^%^GREEN%^ to help her blend in with her forest home. Her small " "irridescent wings %^WHITE%^sparkle%^GREEN%^ and let off pixie dust as she " "flutters and moves about.%^RESET%^"; }
<reponame>hendrysadrak/Bracket-Pair-Colorizer-2<filename>src/deferred.ts<gh_stars>1-10 export const getDeferred = () => { let resolve: () => void; let reject: () => void; const promise = new Promise((_resolve, _reject) => { resolve = _resolve as () => void; reject = _reject; }); return { // @ts-ignore resolve, // @ts-ignore reject, promise, }; };
/*! * Copyright (c) 2014 by Contributors * \file reduceto1d.h * \brief support for sum_rows and sumall_except_dim * \author Tianqi Chen */ #ifndef MSHADOW_EXTENSION_REDUCETO1D_H_ #define MSHADOW_EXTENSION_REDUCETO1D_H_ #include "../extension.h" namespace mshadow { namespace expr { /*! * \brief reduction to 1 dimension tensor * input: Tensor<Device,k>: ishape * output: Tensor<Device,1> shape[0] = ishape[dimkeep]; * * \tparam SrcExp type of expression to be reduced * \tparam DType the data type of the scalar * \tparam Reducer which reducer to use * \tparam m_dimkeep which dimension to be kept, encoded with dimsrc - dimkeep */ template<typename SrcExp, typename DType, typename Reducer, int m_dimkeep> struct ReduceTo1DExp: public Exp<ReduceTo1DExp<SrcExp, DType, Reducer, m_dimkeep>, DType, type::kComplex> { /*! \brief source operand */ const SrcExp &src_; /*! \brief source operand, scale of the */ DType scale_; /*! \brief construct a repmat expression from src and nrow */ ReduceTo1DExp(const SrcExp& src, DType scale) : src_(src), scale_(scale) {} }; /*! * \brief a sum over all dimensions, except dimkeep * \param exp input expression that must be a matrix Tensor<?,2> * \return a expresion with type Tensor<Device,1> * \tparam dimkeep the dimension that will be kept * \tparam SrcExp expression * \tparam etype type of expression */ template<int dimkeep, typename SrcExp, typename DType, int etype> inline ReduceTo1DExp<SrcExp, DType, red::sum, ExpInfo<SrcExp>::kDim - dimkeep> sumall_except_dim(const Exp<SrcExp, DType, etype> &exp) { return ReduceTo1DExp<SrcExp, DType, red::sum, ExpInfo<SrcExp>::kDim - dimkeep>(exp.self(), DType(1)); } /*! * \brief reduce over all dimensions, except dimkeep * \param exp input expression that must be a matrix Tensor<?,2> * \return a expresion with type Tensor<Device,1> * \tparam dimkeep the dimension that will be kept * \tparam SrcExp expression * \tparam etype type of expression */ template<int dimkeep, typename Reducer, typename SrcExp, typename DType, int etype> inline ReduceTo1DExp<SrcExp, DType, Reducer, ExpInfo<SrcExp>::kDim - dimkeep> reduce_except_dim(const Exp<SrcExp, DType, etype> &exp) { return ReduceTo1DExp<SrcExp, DType, Reducer, ExpInfo<SrcExp>::kDim - dimkeep>(exp.self(), DType(1)); } /*! * \brief a expression that sum over rows of a matrix * \param exp input expression that must be a matrix Tensor<?, 2> * \return a expresion with type Tensor<Device, 1> * \tparam SrcExp expression * \tparam etype type of expression */ template<typename SrcExp, typename DType, int etype> inline ReduceTo1DExp<SrcExp, DType, red::sum, 1> sum_rows(const Exp<SrcExp, DType, etype> &exp) { TypeCheckPass<ExpInfo<SrcExp>::kDim ==2> ::Error_Expression_Does_Not_Meet_Dimension_Req(); return sumall_except_dim<1>(exp); } template<typename SV, typename Device, typename DType, typename SrcExp, typename Reducer, int m_dimkeep> struct ExpComplexEngine<SV, Tensor<Device, 1, DType>, ReduceTo1DExp<SrcExp, DType, Reducer, m_dimkeep>, DType> { static const int dimkeep = ExpInfo<SrcExp>::kDim - m_dimkeep; inline static void Eval(Tensor<Device, 1, DType> *dst, const ReduceTo1DExp<SrcExp, DType, Reducer, m_dimkeep> &exp) { TypeCheckPass<m_dimkeep != 1> ::Error_Expression_Does_Not_Meet_Dimension_Req(); MapReduceKeepHighDim<SV, Reducer, dimkeep>(dst, exp.src_, exp.scale_); } }; template<typename SV, typename Device, typename DType, typename SrcExp, typename Reducer> struct ExpComplexEngine<SV, Tensor<Device, 1, DType>, ReduceTo1DExp<SrcExp, DType, Reducer, 1>, DType> { inline static void Eval(Tensor<Device, 1, DType> *dst, const ReduceTo1DExp<SrcExp, DType, Reducer, 1> &exp) { MapReduceKeepLowest<SV, Reducer>(dst, exp.src_, exp.scale_); } }; } // namespace expr } // namespace mshadow #endif // MSHADOW_EXTENSION_REDUCETO1D_H_
def inprocess_order_list(): inprocess = unicode(ORDER_STATUS[2][0]) orders = orders_at_status(inprocess) return { 'orders' : orders, 'multihost' : is_multihost_enabled() }
<filename>server/workers/tests/test_summarization.py<gh_stars>1-10 import pytest from .test_helpers import CASENAMES, RESULTS, get_stopwords LANGS = ["english"] @pytest.mark.parametrize("testcase", CASENAMES) def test_empty_area_titles(testcase): testcase = RESULTS[testcase] assert testcase.area.map(lambda x: len(x)==0).sum() == 0 @pytest.mark.parametrize("testcase", CASENAMES) @pytest.mark.parametrize("lang", LANGS) def test_stopwords_not_start_end_keywords_areatitles(testcase, lang): testcase = RESULTS[testcase] stops = get_stopwords(lang) areatitles = testcase.area.unique() for at in areatitles: keywords = at.split(", ") for kw in keywords: tokens = kw.split(" ") assert tokens[0] not in stops assert tokens[-1] not in stops
It's so easy to miss the little things in life. Those things that brighten up our day but are taken for granted and never really noticed outside of our subconscious. Like Sunny days or calls from a loved one or the fact that your parents will finally be dead one day. RPGs, much like life, are full of these little extra touches that enrich any experience, yet they're truly taken for granted and never given the appreciation they deserve. Even worse, a great many RPGs shun them altogether, despite how easy they are to include and how big a difference they can make, despite their subtlety. Obviously, we cannot let that stand. Good roleplaying games have been throwing in quiet-yet-wonderful touches without applause for far too long, and it's about time we took notice of the little things. Read on as we isolate and appreciate the little things that every RPG should have. Enemies that telegraph when their HP is low: It's amazing that more RPGs don't do this. In nearly every roleplaying game, your own characters will start to wilt and pant when their HP reaches critical levels. The enemies get to know who is weakest and sometimes will capitalize on that, but why aren't players awarded the same advantage? In the RPGs that do include enemy HP telegraphing, there can be few things more satisfying than finally seeing a hard-fought boss begin to sag and indicate that he's about to give up the ghost. That's when you know to go on an all-out attack, showing no mercy and finishing the job. Seeing an enemy droop lets you know to save your precious magical spells for a more healthy foe. MP doesn't grow on trees, and it's annoying to have to waste a powerful attack on something that could be killed with a single sword strike, or use a potion just moments before the fight ends. HP telegraphing also makes for a terrific crescendo to a battle, that final push for victory when you see the enemy falter. Much more exciting and conclusive than an enemy simply disappearing after giving no indication that the fight's nearly over. This is definitely something more RPGs should include. The fact that a significant number of them don't is borderline criminal. Equipped weapons and armor visible on characters: Again, this is something I would expect from far more RPGs. It can be somewhat common to see equipped weapons visible on a character, but sometimes RPGs don't even bother to do that, and it makes me sick. What I really want, however, are more games that render everything you put on your characters' bodies. I want to see that Mythril Helmet, and if my character has put some glasses on, the game should show me what they look like. Lost Odyssey met us halfway, by allowing accessories to appear. That meant that if Kaim was wearing glasses or earrings, they would appear. Final Fantasy: Crystal Chronicles will kit our characters in full armor that reflects what they're wearing. It's also very common for Western RPGs like Elder Scrolls or Fable to render full armor sets, but JRPGs simply don't do them. I think they should. It's silly to put a full suit of armor on a character and then see him fight in only a pair of ludicrously oversized pants. Bestiaries: RPGs are full of varied and strange enemies, usually provided without any kind of explanation simply because the game needed combat and the developers didn't feel like contextualizing it. Random battles against bizarre monsters are a staple of the RPG genre, but it's rare that you'll ever find anything out about the creatures you face, outside of how many times you have to hit it before it dies. Games that provide a bestiary with interesting information on the monsters encountered have an extra flavor to them, and allow these random creatures a sense to have a sense of belonging in the world. More importantly, I simply find that bestiaries are great fun simply to read. Flipping through the summaries of various creatures in Shadow Hearts or Castlevania is something I find enjoyable, and it's a shame that so many games neglect to include listings and biographies of the various opponents defeated throughout the course of an adventure. Why is a Hedgehog Pie called a Hedgehog Pie? Why is is giant worm boss hiding in the sand and why do I have to fight it without any explanation? I want to know these things, and if the game won't tell me while I'm fighting them, at least include a paragraph of explanatory text in a menu somewhere so that I can find out afterward. Besides which, it's fun to read up on an interesting game world and soak in the flare. Useful merchant screens: How many times have you been playing an RPG and started doing equipment shopping, only to find out that the store's menu system sucks? You're not given a clear comparison between your equipped items and the items you want to buy, so you're not sure which items are better. Even more annoying, you're not given the option the equip the new items after purchase, meaning you have to exit the store, open the main menu, navigate to the equipment screen and then equip from there. While somewhat uncommon these days, there are still games that release with awkward, hard-to-read merchant screens that don't allow for automatic equipping, and such things have to stop. A clear, clean, easily decipherable merchant screen that will let you change equipment while shopping makes each trip to the store much more pleasant and enjoyable, and ensures that customers will come back for more. That bit when the party disbands in a town: After battling through a dungeon, defeating a boss and traversing the world map, you come across a brand new city, the biggest city of the game so far. Excitement! When you enter the city, you trigger the obligatory scene where the characters react to the new sights and sounds. The party decided to disband and explore the town on their own, leaving you to your own devices. Time to look around. As you walk about town, you'll notice members of your party simply going off and doing their own thing. One character may be getting into an argument with a storekeeper. Another may have found some kids and decided to join in their game. You get to see these characters you've fought with in a more peaceful and natural environment, and you get to see what they do in their down time and, by extension, learn far more about them as a rounded and three-dimensional character. Getting know and care about characters should be an important part of any RPG, and there's no better way to foster investment into a character than to see them simply being themselves. These moments where you find your allies scattered about a city doing their own thing is a terrific opportunity for the player to get to know who they're questing with. RPGs should always try and include these moments. "Story so far" summaries: An RPG is a long and winding road. Lots of things happen, and sometimes it can be such a lengthy affair that a considerable break from playing is needed. When a player returns from that break, he doesn't want to be stuck trying to remember what happened when he left off, or having no idea where next to go in a world's vast map. A smart RPG will make sure to keep a log of your adventure, so that you can jump right back into the action after a brief refresher course. It can be in the form of a journal or diary written by the player character, it can be a Metal Gear Solid-esque description of the story so far, or it can simply be a clearly labeled objective message telling you exactly what needs to be done next. In a game with a sprawling world and a huge story, it's important to make sure a player doesn't get lost. HP & MP restoration with level increases: It doesn't happen in every RPG, but when it does happen, it's always nice. Notable games would be Pokémon, or any number of Western RPGs such as Sacred 2 or Oblivion. It's incredibly useful to have a level increase restore one's health, especially when it comes to saving precious potions or curative spells. There's nothing quite like being near death, yet incredibly close to leveling up, and fighting on just long enough to get that last batch of XP and get a free restoration. These are the kind of things that add very little to the game on the surface, yet bring just an extra dash of brilliance to keep things interesting. Who needs those expensive Inns when you can work for your full health bar? Leveling up is satisfying in its own right, but throwing HP and MP at the player as well just makes it feel like so much more of a treat. Status spells that actually WORK: This is the biggie. Everybody knows about status spells: Poison, Sleep, Frog, etcetera. The kind of spells that don't deal direct damage to an enemy, but have all sorts of unique, interesting and potentially powerful effects. In theory, these are the kind of spells a brilliant tactician or long-haul player should be permanently stocking in their deck of cards. In practice, however, these spells are nearly always worthless. What happens in reality is that every boss in the game will be immune to the effects of status spells. They can't be poisoned, put to sleep, confused or otherwise manipulated. What's worse, a fair few regular enemies will also shirk off these spells as if they're nothing. Naturally, however, if an enemy possesses the exact same spell, it'll work on YOU every bloody time. It's not fair, and it's not right. Of course, many games would be too easy if these spells worked all the time, but there has to be a compromise. Maybe a Sleep spell will work on a boss, but only for a turn or two. Maybe the Poison damage isn't very much against strong enemies. Either way, it's upsetting to have such great and disruptive magic while lacking the ability to use it effectively. Games like Pokémon actually use status attacks quite expertly, with huge rewards in store for those willing to use them, and tactical RPGs tend to allow for status ailments in a much more lenient fashion. However, many other RPGs over the years have crammed their games with useless spells that could have been incredibly brilliant. Of course, when you actually manage to put an enemy to Sleep, you are one happy gamer. And that's what these little treats should be about. Making gamers happy. So take heed to this list, RPG developers of the world. We demand the little things, and we demand them now! You are logged out. Login | Sign up
<reponame>lambdaxymox/fuchsia<filename>src/connectivity/network/dns/src/main.rs // Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { anyhow::{Context as _, Error}, async_trait::async_trait, dns::{ async_resolver::{Resolver, Spawner}, config::{ServerList, UpdateServersResult}, }, fidl_fuchsia_net::{self as fnet, NameLookupRequest, NameLookupRequestStream}, fidl_fuchsia_net_ext as net_ext, fidl_fuchsia_net_name::{LookupAdminRequest, LookupAdminRequestStream}, fuchsia_async as fasync, fuchsia_component::server::{ServiceFs, ServiceFsDir}, fuchsia_inspect, fuchsia_zircon as zx, futures::{ channel::mpsc, lock::Mutex, FutureExt as _, SinkExt as _, StreamExt as _, TryStreamExt as _, }, log::{debug, error, info, warn}, net_declare::fidl_ip_v6, net_types::ip::IpAddress, parking_lot::RwLock, std::collections::VecDeque, std::convert::TryFrom, std::net::IpAddr, std::rc::Rc, std::sync::Arc, trust_dns_proto::rr::{domain::IntoName, TryParseIp}, trust_dns_resolver::{ config::{ LookupIpStrategy, NameServerConfig, NameServerConfigGroup, Protocol, ResolverConfig, ResolverOpts, }, error::{ResolveError, ResolveErrorKind}, lookup, lookup_ip, }, }; struct SharedResolver<T>(RwLock<Rc<T>>); impl<T> SharedResolver<T> { fn new(resolver: T) -> Self { SharedResolver(RwLock::new(Rc::new(resolver))) } fn read(&self) -> Rc<T> { self.0.read().clone() } fn write(&self, other: Rc<T>) { *self.0.write() = other; } } const STAT_WINDOW_DURATION: zx::Duration = zx::Duration::from_seconds(60); const STAT_WINDOW_COUNT: usize = 30; /// Stats about queries during the last `STAT_WINDOW_COUNT` windows of /// `STAT_WINDOW_DURATION` time. /// /// For example, if `STAT_WINDOW_DURATION` == 1 minute, and /// `STAT_WINDOW_COUNT` == 30, `past_queries` contains information about, at /// most, 30 one-minute windows of completed queries. /// /// NB: there is no guarantee that these windows are directly consecutive; only /// that each window begins at least `STAT_WINDOW_DURATION` after the previous /// window's start time. struct QueryStats { inner: Mutex<VecDeque<QueryWindow>>, } impl QueryStats { fn new() -> Self { Self { inner: Mutex::new(VecDeque::new()) } } async fn finish_query(&self, start_time: fasync::Time, error: Option<&ResolveErrorKind>) { let now = fasync::Time::now(); let past_queries = &mut *self.inner.lock().await; let current_window = if let Some(window) = past_queries.back_mut() { if now - window.start >= STAT_WINDOW_DURATION { past_queries.push_back(QueryWindow::new(now)); if past_queries.len() > STAT_WINDOW_COUNT { // Remove the oldest window of query stats. let _: QueryWindow = past_queries .pop_front() .expect("there should be at least one element in `past_queries`"); } // This is safe because we've just pushed an element to `past_queries`. past_queries.back_mut().unwrap() } else { window } } else { past_queries.push_back(QueryWindow::new(now)); // This is safe because we've just pushed an element to `past_queries`. past_queries.back_mut().unwrap() }; let elapsed_time = now - start_time; if let Some(e) = error { current_window.fail(elapsed_time, e) } else { current_window.succeed(elapsed_time) } } } /// Stats about queries that failed due to an internal trust-dns error. /// These counters map to variants of /// [`trust_dns_resolver::error::ResolveErrorKind`]. #[derive(Default, Debug, PartialEq)] struct FailureStats { message: u64, no_records_found: u64, io: u64, proto: u64, timeout: u64, } impl FailureStats { fn increment(&mut self, kind: &ResolveErrorKind) { let FailureStats { message, no_records_found, io, proto, timeout } = self; match kind { ResolveErrorKind::Message(error) => { let _: &str = error; *message += 1 } ResolveErrorKind::Msg(error) => { let _: &String = error; *message += 1 } ResolveErrorKind::NoRecordsFound { query: _, valid_until: _ } => *no_records_found += 1, ResolveErrorKind::Io(error) => { let _: &std::io::Error = error; *io += 1 } ResolveErrorKind::Proto(error) => { let _: &trust_dns_proto::error::ProtoError = error; *proto += 1 } ResolveErrorKind::Timeout => *timeout += 1, } } } struct QueryWindow { start: fasync::Time, success_count: u64, failure_count: u64, success_elapsed_time: zx::Duration, failure_elapsed_time: zx::Duration, failure_stats: FailureStats, } impl QueryWindow { fn new(start: fasync::Time) -> Self { Self { start, success_count: 0, failure_count: 0, success_elapsed_time: zx::Duration::from_nanos(0), failure_elapsed_time: zx::Duration::from_nanos(0), failure_stats: FailureStats::default(), } } fn succeed(&mut self, elapsed_time: zx::Duration) { let QueryWindow { success_count, success_elapsed_time, start: _, failure_count: _, failure_elapsed_time: _, failure_stats: _, } = self; *success_count += 1; *success_elapsed_time += elapsed_time; } fn fail(&mut self, elapsed_time: zx::Duration, error: &ResolveErrorKind) { let QueryWindow { failure_count, failure_elapsed_time, failure_stats, start: _, success_count: _, success_elapsed_time: _, } = self; *failure_count += 1; *failure_elapsed_time += elapsed_time; failure_stats.increment(error) } } async fn update_resolver<T: ResolverLookup>(resolver: &SharedResolver<T>, servers: ServerList) { let mut resolver_opts = ResolverOpts::default(); resolver_opts.ip_strategy = LookupIpStrategy::Ipv4AndIpv6; // We're going to add each server twice, once with protocol UDP and // then with protocol TCP. let mut name_servers = NameServerConfigGroup::with_capacity(servers.len() * 2); name_servers.extend(servers.into_iter().flat_map(|server| { let net_ext::SocketAddress(socket_addr) = server.into(); // Every server config gets UDP and TCP versions with // preference for UDP. std::iter::once(NameServerConfig { socket_addr, protocol: Protocol::Udp, tls_dns_name: None, }) .chain(std::iter::once(NameServerConfig { socket_addr, protocol: Protocol::Tcp, tls_dns_name: None, })) })); let new_resolver = T::new(ResolverConfig::from_parts(None, Vec::new(), name_servers), resolver_opts).await; let () = resolver.write(Rc::new(new_resolver)); } enum IncomingRequest { // NameLookup service. NameLookup(NameLookupRequestStream), // LookupAdmin Service. LookupAdmin(LookupAdminRequestStream), } #[async_trait] trait ResolverLookup { async fn new(config: ResolverConfig, options: ResolverOpts) -> Self; async fn lookup_ip<N: IntoName + TryParseIp + Send>( &self, host: N, ) -> Result<lookup_ip::LookupIp, ResolveError>; async fn ipv4_lookup<N: IntoName + Send>( &self, host: N, ) -> Result<lookup::Ipv4Lookup, ResolveError>; async fn ipv6_lookup<N: IntoName + Send>( &self, host: N, ) -> Result<lookup::Ipv6Lookup, ResolveError>; async fn reverse_lookup(&self, addr: IpAddr) -> Result<lookup::ReverseLookup, ResolveError>; } #[async_trait] impl ResolverLookup for Resolver { async fn new(config: ResolverConfig, options: ResolverOpts) -> Self { Resolver::new(config, options, Spawner).await.expect("failed to create resolver") } async fn lookup_ip<N: IntoName + TryParseIp + Send>( &self, host: N, ) -> Result<lookup_ip::LookupIp, ResolveError> { self.lookup_ip(host).await } async fn ipv4_lookup<N: IntoName + Send>( &self, host: N, ) -> Result<lookup::Ipv4Lookup, ResolveError> { self.ipv4_lookup(host).await } async fn ipv6_lookup<N: IntoName + Send>( &self, host: N, ) -> Result<lookup::Ipv6Lookup, ResolveError> { self.ipv6_lookup(host).await } async fn reverse_lookup(&self, addr: IpAddr) -> Result<lookup::ReverseLookup, ResolveError> { self.reverse_lookup(addr).await } } /// Helper function to handle a [`ResolverError`] and convert it into a /// [`fnet::LookupError`]. /// /// `source` is used for debugging information. fn handle_err(source: &'static str, err: ResolveError) -> fnet::LookupError { use trust_dns_proto::error::ProtoErrorKind; let (lookup_err, ioerr) = match err.kind() { // The following mapping is based on the analysis of `ResolveError` enumerations. // For cases that are not obvious such as `ResolveErrorKind::Msg` and // `ResolveErrorKind::Message`, I (chunyingw) did code searches to have more insights. // `ResolveErrorKind::Msg`: An error with arbitrary message, it could be ex. "lock was // poisoned, this is non-recoverable" and ""DNS Error". // `ResolveErrorKind::Message`: An error with arbitrary message, it is mostly returned when // there is no name in the input vector to look up with "can not lookup for no names". // This is a best-effort mapping. ResolveErrorKind::NoRecordsFound { query: _, valid_until: _ } => { (fnet::LookupError::NotFound, None) } ResolveErrorKind::Proto(err) => match err.kind() { ProtoErrorKind::DomainNameTooLong(_) | ProtoErrorKind::EdnsNameNotRoot(_) => { (fnet::LookupError::InvalidArgs, None) } ProtoErrorKind::Canceled(_) | ProtoErrorKind::Timeout => { (fnet::LookupError::Transient, None) } ProtoErrorKind::Io(inner) => (fnet::LookupError::Transient, Some(inner)), ProtoErrorKind::CharacterDataTooLong { max: _, len: _ } | ProtoErrorKind::LabelOverlapsWithOther { label: _, other: _ } | ProtoErrorKind::DnsKeyProtocolNot3(_) | ProtoErrorKind::IncorrectRDataLengthRead { read: _, len: _ } | ProtoErrorKind::LabelBytesTooLong(_) | ProtoErrorKind::PointerNotPriorToLabel { idx: _, ptr: _ } | ProtoErrorKind::MaxBufferSizeExceeded(_) | ProtoErrorKind::Message(_) | ProtoErrorKind::Msg(_) | ProtoErrorKind::NoError | ProtoErrorKind::NotAllRecordsWritten { count: _ } | ProtoErrorKind::RrsigsNotPresent { name: _, record_type: _ } | ProtoErrorKind::UnknownAlgorithmTypeValue(_) | ProtoErrorKind::UnknownDnsClassStr(_) | ProtoErrorKind::UnknownDnsClassValue(_) | ProtoErrorKind::UnknownRecordTypeStr(_) | ProtoErrorKind::UnknownRecordTypeValue(_) | ProtoErrorKind::UnrecognizedLabelCode(_) | ProtoErrorKind::UnrecognizedNsec3Flags(_) | ProtoErrorKind::Poisoned | ProtoErrorKind::Ring(_) | ProtoErrorKind::SSL(_) | ProtoErrorKind::Timer | ProtoErrorKind::UrlParsing(_) | ProtoErrorKind::Utf8(_) => (fnet::LookupError::InternalError, None), }, ResolveErrorKind::Io(inner) => (fnet::LookupError::Transient, Some(inner)), ResolveErrorKind::Timeout => (fnet::LookupError::Transient, None), ResolveErrorKind::Msg(_) | ResolveErrorKind::Message(_) => { (fnet::LookupError::InternalError, None) } }; if let Some(ioerr) = ioerr { match ioerr.raw_os_error() { Some(libc::EHOSTUNREACH) => debug!("{} error: {}; (IO error {:?})", source, err, ioerr), // TODO(fxbug.dev/55621): We should log at WARN below, but trust-dns is // erasing raw_os_error for us. Logging to debug for now to reduce // log spam. _ => debug!("{} error: {}; (IO error {:?})", source, err, ioerr), } } else { warn!("{} error: {}", source, err) } lookup_err } struct LookupMode { ipv4_lookup: bool, ipv6_lookup: bool, } async fn lookup_ip_inner<T: ResolverLookup>( caller: &'static str, resolver: &SharedResolver<T>, stats: Arc<QueryStats>, hostname: String, LookupMode { ipv4_lookup, ipv6_lookup }: LookupMode, ) -> Result<Vec<fnet::IpAddress>, fnet::LookupError> { let start_time = fasync::Time::now(); let resolver = resolver.read(); let result: Result<Vec<_>, _> = match (ipv4_lookup, ipv6_lookup) { (true, false) => resolver.ipv4_lookup(hostname).await.map(|addrs| { addrs.into_iter().map(|addr| net_ext::IpAddress(IpAddr::V4(addr)).into()).collect() }), (false, true) => resolver.ipv6_lookup(hostname).await.map(|addrs| { addrs.into_iter().map(|addr| net_ext::IpAddress(IpAddr::V6(addr)).into()).collect() }), (true, true) => resolver .lookup_ip(hostname) .await .map(|addrs| addrs.into_iter().map(|addr| net_ext::IpAddress(addr).into()).collect()), (false, false) => { return Err(fnet::LookupError::InvalidArgs); } }; let () = stats.finish_query(start_time, result.as_ref().err().map(|e| e.kind())).await; result.map_err(|e| handle_err(caller, e)).and_then(|addrs| { if addrs.is_empty() { Err(fnet::LookupError::NotFound) } else { Ok(addrs) } }) } async fn handle_lookup_ip<T: ResolverLookup>( resolver: &SharedResolver<T>, stats: Arc<QueryStats>, hostname: String, options: fnet::LookupIpOptions, ) -> Result<fnet::IpAddressInfo, fnet::LookupError> { let mode = LookupMode { ipv4_lookup: options.contains(fnet::LookupIpOptions::V4Addrs), ipv6_lookup: options.contains(fnet::LookupIpOptions::V6Addrs), }; let response = lookup_ip_inner("LookupIp", resolver, stats, hostname, mode).await?; let mut result = fnet::IpAddressInfo { ipv4_addrs: vec![], ipv6_addrs: vec![], canonical_name: None }; for address in response.into_iter() { match address { fnet::IpAddress::Ipv4(ipv4) => { result.ipv4_addrs.push(ipv4); } fnet::IpAddress::Ipv6(ipv6) => { result.ipv6_addrs.push(ipv6); } } } Ok(result) } async fn handle_lookup_ip2<T: ResolverLookup>( resolver: &SharedResolver<T>, stats: Arc<QueryStats>, routes: &fidl_fuchsia_net_routes::StateProxy, hostname: String, options: fnet::LookupIpOptions2, ) -> Result<fnet::LookupResult, fnet::LookupError> { let fnet::LookupIpOptions2 { ipv4_lookup, ipv6_lookup, sort_addresses, .. } = options; let mode = LookupMode { ipv4_lookup: ipv4_lookup.unwrap_or(false), ipv6_lookup: ipv6_lookup.unwrap_or(false), }; let addrs = lookup_ip_inner("LookupIp2", resolver, stats, hostname, mode).await?; let addrs = if sort_addresses.unwrap_or(false) { sort_preferred_addresses(addrs, routes).await? } else { addrs }; Ok(fnet::LookupResult { addresses: Some(addrs), ..fnet::LookupResult::EMPTY }) } async fn sort_preferred_addresses( mut addrs: Vec<fnet::IpAddress>, routes: &fidl_fuchsia_net_routes::StateProxy, ) -> Result<Vec<fnet::IpAddress>, fnet::LookupError> { let mut addrs_info = futures::future::try_join_all( addrs // Drain addresses from addrs, but keep it alive so we don't need to // reallocate. .drain(..) .map(|mut addr| async move { let source_addr = match routes.resolve(&mut addr).await? { Ok(fidl_fuchsia_net_routes::Resolved::Direct( fidl_fuchsia_net_routes::Destination { source_address, .. }, )) | Ok(fidl_fuchsia_net_routes::Resolved::Gateway( fidl_fuchsia_net_routes::Destination { source_address, .. }, )) => source_address, // If resolving routes returns an error treat it as an // unreachable address. Err(e) => { debug!( "fuchsia.net.routes/State.resolve({}) failed {}", fidl_fuchsia_net_ext::IpAddress::from(addr), zx::Status::from_raw(e) ); None } }; Ok((addr, DasCmpInfo::from_addrs(&addr, source_addr.as_ref()))) }), ) .await .map_err(|e: fidl::Error| { warn!("fuchsia.net.routes/State.resolve FIDL error {:?}", e); fnet::LookupError::InternalError })?; let () = addrs_info.sort_by(|(_laddr, left), (_raddr, right)| left.cmp(right)); // Reinsert the addresses in order from addr_info. let () = addrs.extend(addrs_info.into_iter().map(|(addr, _)| addr)); Ok(addrs) } #[derive(Debug)] struct Policy { prefix: net_types::ip::Subnet<net_types::ip::Ipv6Addr>, precedence: usize, label: usize, } macro_rules! decl_policy { ($ip:tt/$prefix:expr => $precedence:expr, $label:expr) => { Policy { // Unsafe allows us to declare constant subnets. // We make sure no invalid subnets are created in // test_valid_policy_table. prefix: unsafe { net_types::ip::Subnet::new_unchecked( net_types::ip::Ipv6Addr::new(fidl_ip_v6!($ip).addr), $prefix, ) }, precedence: $precedence, label: $label, } }; } /// Policy table is defined in RFC 6724, section 2.1 /// /// A more human-readable version: /// /// Prefix Precedence Label /// ::1/128 50 0 /// ::/0 40 1 /// fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:0/96 35 4 /// 2002::/16 30 2 /// 2001::/32 5 5 /// fc00::/7 3 13 /// ::/96 1 3 /// fec0::/10 1 11 /// 3ffe::/16 1 12 /// /// We willingly left out ::/96, fec0::/10, 3ffe::/16 since those prefix /// assignments are deprecated. /// /// The table is sorted by prefix length so longest-prefix match can be easily /// achieved. const POLICY_TABLE: [Policy; 6] = [ decl_policy!("::1"/128 => 50, 0), decl_policy!("::ffff:0:0"/96 => 35, 4), decl_policy!("2001::"/32 => 5, 5), decl_policy!("2002::"/16 => 30, 2), decl_policy!("fc00::"/7 => 3, 13), decl_policy!("::"/0 => 40, 1), ]; fn policy_lookup(addr: &net_types::ip::Ipv6Addr) -> &'static Policy { POLICY_TABLE .iter() .find(|policy| policy.prefix.contains(addr)) .expect("policy table MUST contain the all addresses subnet") } /// Destination Address selection information. /// /// `DasCmpInfo` provides an implementation of a subset of Destination Address /// Selection according to the sorting rules defined in [RFC 6724 Section 6]. /// /// TODO(fxbug.dev/65219): Implement missing rules 3, 4, and 7. /// Rules 3, 4, and 7 are omitted for compatibility with the equivalent /// implementation in Fuchsia's libc. /// /// `DasCmpInfo` provides an [`std::cmp::Ord`] implementation that will return /// preferred addresses as "lesser" values. /// /// [RFC 6724 Section 6]: https://tools.ietf.org/html/rfc6724#section-6 #[derive(Debug)] struct DasCmpInfo { usable: bool, matching_scope: bool, matching_label: bool, precedence: usize, scope: net_types::ip::Ipv6Scope, common_prefix_len: u8, } impl DasCmpInfo { /// Helper function to convert a FIDL IP address into /// [`net_types::ip::Ipv6Addr`], using a mapped IPv4 when that's the case. fn convert_addr(fidl: &fnet::IpAddress) -> net_types::ip::Ipv6Addr { match fidl { fnet::IpAddress::Ipv4(fnet::Ipv4Address { addr }) => { net_types::ip::Ipv6Addr::from(net_types::ip::Ipv4Addr::new(*addr)) } fnet::IpAddress::Ipv6(fnet::Ipv6Address { addr }) => { net_types::ip::Ipv6Addr::new(*addr) } } } fn from_addrs(dst_addr: &fnet::IpAddress, src_addr: Option<&fnet::IpAddress>) -> Self { use net_types::ScopeableAddress; let dst_addr = Self::convert_addr(dst_addr); let Policy { prefix: _, precedence, label: dst_label } = policy_lookup(&dst_addr); let (usable, matching_scope, matching_label, common_prefix_len) = match src_addr { Some(src_addr) => { let src_addr = Self::convert_addr(src_addr); let Policy { prefix: _, precedence: _, label: src_label } = policy_lookup(&src_addr); ( true, dst_addr.scope() == src_addr.scope(), dst_label == src_label, dst_addr.common_prefix_len(&src_addr), ) } None => (false, false, false, 0), }; DasCmpInfo { usable, matching_scope, matching_label, precedence: *precedence, scope: dst_addr.scope(), common_prefix_len, } } } impl std::cmp::Ord for DasCmpInfo { // TODO(fxbug.dev/65219): Implement missing rules 3, 4, and 7. fn cmp(&self, other: &Self) -> std::cmp::Ordering { use std::cmp::Ordering; let DasCmpInfo { usable: self_usable, matching_scope: self_matching_scope, matching_label: self_matching_label, precedence: self_precedence, scope: self_scope, common_prefix_len: self_common_prefix_len, } = self; let DasCmpInfo { usable: other_usable, matching_scope: other_matching_scope, matching_label: other_matching_label, precedence: other_precedence, scope: other_scope, common_prefix_len: other_common_prefix_len, } = other; fn prefer_true(left: bool, right: bool) -> Ordering { match (left, right) { (true, false) => Ordering::Less, (false, true) => Ordering::Greater, (false, false) | (true, true) => Ordering::Equal, } } // Rule 1: Avoid unusable destinations. prefer_true(*self_usable, *other_usable) .then( // Rule 2: Prefer matching scope. prefer_true(*self_matching_scope, *other_matching_scope), ) .then( // Rule 5: Prefer matching label. prefer_true(*self_matching_label, *other_matching_label), ) .then( // Rule 6: Prefer higher precedence. self_precedence.cmp(other_precedence).reverse(), ) .then( // Rule 8: Prefer smaller scope. self_scope.multicast_scope_id().cmp(&other_scope.multicast_scope_id()), ) .then( // Rule 9: Use longest matching prefix. self_common_prefix_len.cmp(other_common_prefix_len).reverse(), ) // Rule 10: Otherwise, leave the order unchanged. } } impl std::cmp::PartialOrd for DasCmpInfo { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { Some(self.cmp(other)) } } impl std::cmp::PartialEq for DasCmpInfo { fn eq(&self, other: &Self) -> bool { self.cmp(other) == std::cmp::Ordering::Equal } } impl std::cmp::Eq for DasCmpInfo {} async fn handle_lookup_hostname<T: ResolverLookup>( resolver: &SharedResolver<T>, addr: fnet::IpAddress, ) -> Result<String, fnet::LookupError> { let net_ext::IpAddress(addr) = addr.into(); let resolver = resolver.read(); match resolver.reverse_lookup(addr).await { // TODO(chuningw): Revisit LookupHostname() method of namelookup.fidl. Ok(response) => { response.iter().next().ok_or(fnet::LookupError::NotFound).map(|h| h.to_string()) } Err(error) => Err(handle_err("LookupHostname", error)), } } /// IP lookup variants from [`fidl_fuchsia_net::NameLookupRequest`]. enum IpLookupRequest { LookupIp { hostname: String, options: fnet::LookupIpOptions, responder: fnet::NameLookupLookupIpResponder, }, LookupIp2 { hostname: String, options: fnet::LookupIpOptions2, responder: fnet::NameLookupLookupIp2Responder, }, } async fn run_name_lookup<T: ResolverLookup>( resolver: &SharedResolver<T>, stream: NameLookupRequestStream, sender: mpsc::Sender<IpLookupRequest>, ) -> Result<(), fidl::Error> { let result = stream .try_for_each_concurrent(None, |request| async { match request { NameLookupRequest::LookupIp { hostname, options, responder } => { let () = sender .clone() .send(IpLookupRequest::LookupIp { hostname, options, responder }) .await .expect("receiver should not be closed"); Ok(()) } NameLookupRequest::LookupIp2 { hostname, options, responder } => { let () = sender .clone() .send(IpLookupRequest::LookupIp2 { hostname, options, responder }) .await .expect("receiver should not be closed"); Ok(()) } NameLookupRequest::LookupHostname { addr, responder } => { responder.send(&mut handle_lookup_hostname(&resolver, addr).await) } } }) .await; // Some clients will drop the channel when timing out // requests. Mute those errors to prevent log spamming. if let Err(fidl::Error::ServerResponseWrite(zx::Status::PEER_CLOSED)) = result { Ok(()) } else { result } } const MAX_PARALLEL_REQUESTS: usize = 256; fn create_ip_lookup_fut<T: ResolverLookup>( resolver: &SharedResolver<T>, stats: Arc<QueryStats>, routes: fidl_fuchsia_net_routes::StateProxy, recv: mpsc::Receiver<IpLookupRequest>, ) -> impl futures::Future<Output = ()> + '_ { recv.for_each_concurrent(MAX_PARALLEL_REQUESTS, move |request| { let stats = stats.clone(); let routes = routes.clone(); async move { #[derive(Debug)] enum IpLookupResult { LookupIp(Result<fnet::IpAddressInfo, fnet::LookupError>), LookupIp2(Result<fnet::LookupResult, fnet::LookupError>), } let (lookup_result, send_result) = match request { IpLookupRequest::LookupIp { hostname, options, responder } => { let mut lookup_result = handle_lookup_ip(resolver, stats.clone(), hostname, options).await; let send_result = responder.send(&mut lookup_result); (IpLookupResult::LookupIp(lookup_result), send_result) } IpLookupRequest::LookupIp2 { hostname, options, responder } => { let mut lookup_result = handle_lookup_ip2(resolver, stats.clone(), &routes, hostname, options) .await; let send_result = responder.send(&mut lookup_result); (IpLookupResult::LookupIp2(lookup_result), send_result) } }; send_result.unwrap_or_else(|e| match e { // Some clients will drop the channel when timing out // requests. Mute those errors to prevent log spamming. fidl::Error::ServerResponseWrite(zx::Status::PEER_CLOSED) => {} e => warn!( "failed to send IP lookup result {:?} due to FIDL error: {}", lookup_result, e ), }) } }) } /// Serves `stream` and forwards received configurations to `sink`. async fn run_lookup_admin<T: ResolverLookup>( resolver: &SharedResolver<T>, state: &dns::config::ServerConfigState, stream: LookupAdminRequestStream, ) -> Result<(), fidl::Error> { stream .try_for_each(|req| async { match req { LookupAdminRequest::SetDnsServers { servers, responder } => { let mut response = match state.update_servers(servers) { UpdateServersResult::Updated(servers) => { let () = update_resolver(resolver, servers).await; Ok(()) } UpdateServersResult::NoChange => Ok(()), UpdateServersResult::InvalidsServers => { Err(zx::Status::INVALID_ARGS.into_raw()) } }; let () = responder.send(&mut response)?; } LookupAdminRequest::GetDnsServers { responder } => { let () = responder.send(&mut state.servers().iter_mut())?; } } Ok(()) }) .await } /// Adds a [`dns::policy::ServerConfigState`] inspection child node to /// `parent`. fn add_config_state_inspect( parent: &fuchsia_inspect::Node, config_state: Arc<dns::config::ServerConfigState>, ) -> fuchsia_inspect::LazyNode { parent.create_lazy_child("servers", move || { let config_state = config_state.clone(); async move { let srv = fuchsia_inspect::Inspector::new(); let server_list = config_state.servers(); for (i, server) in server_list.into_iter().enumerate() { let child = srv.root().create_child(format!("{}", i)); let net_ext::SocketAddress(addr) = server.into(); let () = child.record_string("address", format!("{}", addr)); let () = srv.root().record(child); } Ok(srv) } .boxed() }) } /// Adds a [`QueryStats`] inspection child node to `parent`. fn add_query_stats_inspect( parent: &fuchsia_inspect::Node, stats: Arc<QueryStats>, ) -> fuchsia_inspect::LazyNode { parent.create_lazy_child("query_stats", move || { let stats = stats.clone(); async move { let past_queries = &*stats.inner.lock().await; let node = fuchsia_inspect::Inspector::new(); for ( i, QueryWindow { start, success_count, failure_count, success_elapsed_time, failure_elapsed_time, failure_stats, }, ) in past_queries.iter().enumerate() { let child = node.root().create_child(format!("window {}", i + 1)); match u64::try_from(start.into_nanos()) { Ok(nanos) => { let () = child.record_uint("start_time_nanos", nanos); }, Err(e) => warn!( "error computing `start_time_nanos`: {:?}.into_nanos() from i64 -> u64 failed: {}", start, e ), } let () = child.record_uint("successful_queries", *success_count); let () = child.record_uint("failed_queries", *failure_count); let record_average = |name: &str, total: zx::Duration, count: u64| { // Don't record an average if there are no stats. if count == 0 { return; } match u64::try_from(total.into_micros()) { Ok(micros) => child.record_uint(name, micros / count), Err(e) => warn!( "error computing `{}`: {:?}.into_micros() from i64 -> u64 failed: {}", name, success_elapsed_time, e ), } }; let () = record_average( "average_success_duration_micros", *success_elapsed_time, *success_count, ); let () = record_average( "average_failure_duration_micros", *failure_elapsed_time, *failure_count, ); let FailureStats { message, no_records_found, io, proto, timeout } = failure_stats; let errors = child.create_child("errors"); let () = errors.record_uint("Message", *message); let () = errors.record_uint("NoRecordsFound", *no_records_found); let () = errors.record_uint("Io", *io); let () = errors.record_uint("Proto", *proto); let () = errors.record_uint("Timeout", *timeout); let () = child.record(errors); let () = node.root().record(child); } Ok(node) } .boxed() }) } #[fasync::run_singlethreaded] async fn main() -> Result<(), Error> { // NB: We manually set tags to syslog so logs from trust-dns crates also get // the same tags as opposed to only the crate path. let () = fuchsia_syslog::init_with_tags(&["dns"]).context("cannot init logger")?; info!("starting"); let mut resolver_opts = ResolverOpts::default(); // Resolver will query for A and AAAA in parallel for lookup_ip. resolver_opts.ip_strategy = LookupIpStrategy::Ipv4AndIpv6; let resolver = SharedResolver::new( Resolver::new(ResolverConfig::default(), resolver_opts, Spawner) .await .expect("failed to create resolver"), ); let config_state = Arc::new(dns::config::ServerConfigState::new()); let stats = Arc::new(QueryStats::new()); let mut fs = ServiceFs::new_local(); let inspector = fuchsia_inspect::component::inspector(); let _state_inspect_node = add_config_state_inspect(inspector.root(), config_state.clone()); let _query_stats_inspect_node = add_query_stats_inspect(inspector.root(), stats.clone()); let () = inspect_runtime::serve(inspector, &mut fs)?; let routes = fuchsia_component::client::connect_to_protocol::<fidl_fuchsia_net_routes::StateMarker>() .context("failed to connect to fuchsia.net.routes/State")?; let _: &mut ServiceFsDir<'_, _> = fs .dir("svc") .add_fidl_service(IncomingRequest::NameLookup) .add_fidl_service(IncomingRequest::LookupAdmin); let _: &mut ServiceFs<_> = fs.take_and_serve_directory_handle().context("failed to serve directory")?; // Create a channel with buffer size `MAX_PARALLEL_REQUESTS`, which allows // request processing to always be fully saturated. let (sender, recv) = mpsc::channel(MAX_PARALLEL_REQUESTS); let serve_fut = fs.for_each_concurrent(None, |incoming_service| async { match incoming_service { IncomingRequest::LookupAdmin(stream) => { run_lookup_admin(&resolver, &config_state, stream).await.unwrap_or_else(|e| { if e.is_closed() { warn!("run_lookup_admin finished with error: {}", e) } else { error!("run_lookup_admin finished with error: {}", e) } }) } IncomingRequest::NameLookup(stream) => { run_name_lookup(&resolver, stream, sender.clone()) .await .unwrap_or_else(|e| warn!("run_name_lookup finished with error: {}", e)) } } }); let ip_lookup_fut = create_ip_lookup_fut(&resolver, stats.clone(), routes, recv); let ((), ()) = futures::future::join(serve_fut, ip_lookup_fut).await; Ok(()) } #[cfg(test)] mod tests { use std::{ net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, str::FromStr, sync::Arc, }; use fidl_fuchsia_net_name as fname; use dns::test_util::*; use dns::DEFAULT_PORT; use fidl_fuchsia_net_ext::IntoExt as _; use fuchsia_inspect::{assert_data_tree, testing::NonZeroUintProperty, tree_assertion}; use futures::future::TryFutureExt as _; use matches::assert_matches; use net_declare::{fidl_ip, fidl_ip_v4, fidl_ip_v6, std_ip, std_ip_v4, std_ip_v6}; use net_types::ip::Ip as _; use pin_utils::pin_mut; use trust_dns_proto::{ op::Query, rr::{Name, RData, Record}, }; use trust_dns_resolver::{ lookup::Ipv4Lookup, lookup::Ipv6Lookup, lookup::Lookup, lookup::ReverseLookup, lookup_ip::LookupIp, }; use super::*; const IPV4_LOOPBACK: fnet::Ipv4Address = fidl_ip_v4!("127.0.0.1"); const IPV6_LOOPBACK: fnet::Ipv6Address = fidl_ip_v6!("::1"); const LOCAL_HOST: &str = "localhost."; // IPv4 address returned by mock lookup. const IPV4_HOST: Ipv4Addr = std_ip_v4!("240.0.0.2"); // IPv6 address returned by mock lookup. const IPV6_HOST: Ipv6Addr = std_ip_v6!("fdf8:f53e:61e4::18"); // host which has IPv4 address only. const REMOTE_IPV4_HOST: &str = "www.foo.com"; // host which has IPv6 address only. const REMOTE_IPV6_HOST: &str = "www.bar.com"; // host used in reverse_lookup when multiple hostnames are returned. const REMOTE_IPV6_HOST_EXTRA: &str = "www.bar2.com"; // host which has IPv4 and IPv6 address if reset name servers. const REMOTE_IPV4_IPV6_HOST: &str = "www.foobar.com"; async fn setup_namelookup_service() -> (fnet::NameLookupProxy, impl futures::Future<Output = ()>) { let (name_lookup_proxy, stream) = fidl::endpoints::create_proxy_and_stream::<fnet::NameLookupMarker>() .expect("failed to create NamelookupProxy"); let mut resolver_opts = ResolverOpts::default(); resolver_opts.ip_strategy = LookupIpStrategy::Ipv4AndIpv6; let resolver = SharedResolver::new( Resolver::new(ResolverConfig::default(), resolver_opts, Spawner) .await .expect("failed to create resolver"), ); let stats = Arc::new(QueryStats::new()); let (routes_proxy, routes_stream) = fidl::endpoints::create_proxy_and_stream::<fidl_fuchsia_net_routes::StateMarker>() .expect("failed to create routes.StateProxy"); let routes_fut = routes_stream.try_for_each(|req| -> futures::future::Ready<Result<(), fidl::Error>> { panic!("Should not call routes/State. Received request {:?}", req) }); let (sender, recv) = mpsc::channel(MAX_PARALLEL_REQUESTS); (name_lookup_proxy, async move { futures::future::try_join3( run_name_lookup(&resolver, stream, sender), routes_fut, create_ip_lookup_fut(&resolver, stats.clone(), routes_proxy, recv).map(Ok), ) .map(|r| match r { Ok(((), (), ())) => (), Err(e) => panic!("namelookup service error {:?}", e), }) .await }) } async fn check_lookup_ip( proxy: &fnet::NameLookupProxy, host: &str, option: fnet::LookupIpOptions, expected: Result<fnet::IpAddressInfo, fnet::LookupError>, ) { let res = proxy.lookup_ip(host, option).await.expect("failed to lookup ip"); assert_eq!(res, expected); } async fn check_lookup_hostname( proxy: &fnet::NameLookupProxy, mut addr: fnet::IpAddress, expected: Result<String, fnet::LookupError>, ) { let res = proxy.lookup_hostname(&mut addr).await.expect("failed to lookup hostname"); assert_eq!(res, expected); } #[fasync::run_singlethreaded(test)] async fn test_lookupip_invalid_option() { let (proxy, fut) = setup_namelookup_service().await; let ((), ()) = futures::future::join(fut, async move { // IP Lookup localhost with invalid option. let res = proxy .lookup_ip(LOCAL_HOST, fnet::LookupIpOptions::CnameLookup) .await .expect("failed to LookupIp"); assert_eq!(res, Err(fnet::LookupError::InvalidArgs)); }) .await; } #[fasync::run_singlethreaded(test)] async fn test_lookupip_localhost() { let (proxy, fut) = setup_namelookup_service().await; let ((), ()) = futures::future::join(fut, async move { // IP Lookup IPv4 and IPv6 for localhost. check_lookup_ip( &proxy, LOCAL_HOST, fnet::LookupIpOptions::V4Addrs | fnet::LookupIpOptions::V6Addrs, Ok(fnet::IpAddressInfo { ipv4_addrs: vec![IPV4_LOOPBACK], ipv6_addrs: vec![IPV6_LOOPBACK], canonical_name: None, }), ) .await; // IP Lookup IPv4 only for localhost. check_lookup_ip( &proxy, LOCAL_HOST, fnet::LookupIpOptions::V4Addrs, Ok(fnet::IpAddressInfo { ipv4_addrs: vec![IPV4_LOOPBACK], ipv6_addrs: vec![], canonical_name: None, }), ) .await; // IP Lookup IPv6 only for localhost. check_lookup_ip( &proxy, LOCAL_HOST, fnet::LookupIpOptions::V6Addrs, Ok(fnet::IpAddressInfo { ipv4_addrs: vec![], ipv6_addrs: vec![IPV6_LOOPBACK], canonical_name: None, }), ) .await; }) .await; } #[fasync::run_singlethreaded(test)] async fn test_lookuphostname_localhost() { let (proxy, fut) = setup_namelookup_service().await; let ((), ()) = futures::future::join(fut, async move { check_lookup_hostname(&proxy, IPV4_LOOPBACK.into_ext(), Ok(String::from(LOCAL_HOST))) .await }) .await; } struct MockResolver { config: ResolverConfig, } impl MockResolver { fn ip_lookup<N: IntoName + Send>(&self, host: N) -> Lookup { let rdatas = match host.into_name().unwrap().to_utf8().as_str() { REMOTE_IPV4_HOST => vec![RData::A(IPV4_HOST)], REMOTE_IPV6_HOST => vec![RData::AAAA(IPV6_HOST)], REMOTE_IPV4_IPV6_HOST => vec![RData::A(IPV4_HOST), RData::AAAA(IPV6_HOST)], _ => vec![], }; let records: Vec<Record> = rdatas .into_iter() .map(|rdata| { Record::from_rdata( Name::new(), // The following ttl value is taken arbitrarily and does not matter in the // test. 60, rdata, ) }) .collect(); Lookup::new_with_max_ttl(Query::default(), Arc::new(records)) } } #[async_trait] impl ResolverLookup for MockResolver { async fn new(config: ResolverConfig, _options: ResolverOpts) -> Self { MockResolver { config } } async fn lookup_ip<N: IntoName + TryParseIp + Send>( &self, host: N, ) -> Result<lookup_ip::LookupIp, ResolveError> { Ok(LookupIp::from(self.ip_lookup(host))) } async fn ipv4_lookup<N: IntoName + Send>( &self, host: N, ) -> Result<lookup::Ipv4Lookup, ResolveError> { Ok(Ipv4Lookup::from(self.ip_lookup(host))) } async fn ipv6_lookup<N: IntoName + Send>( &self, host: N, ) -> Result<lookup::Ipv6Lookup, ResolveError> { Ok(Ipv6Lookup::from(self.ip_lookup(host))) } async fn reverse_lookup( &self, addr: IpAddr, ) -> Result<lookup::ReverseLookup, ResolveError> { let lookup = if addr == IPV4_HOST { Lookup::from_rdata( Query::default(), RData::PTR(Name::from_str(REMOTE_IPV4_HOST).unwrap()), ) } else if addr == IPV6_HOST { Lookup::new_with_max_ttl( Query::default(), Arc::new(vec![ Record::from_rdata( Name::new(), 60, // The value is taken arbitrarily and does not matter // in the test. RData::PTR(Name::from_str(REMOTE_IPV6_HOST).unwrap()), ), Record::from_rdata( Name::new(), 60, // The value is taken arbitrarily and does not matter // in the test. RData::PTR(Name::from_str(REMOTE_IPV6_HOST_EXTRA).unwrap()), ), ]), ) } else { Lookup::new_with_max_ttl(Query::default(), Arc::new(vec![])) }; Ok(ReverseLookup::from(lookup)) } } struct TestEnvironment { shared_resolver: SharedResolver<MockResolver>, config_state: Arc<dns::config::ServerConfigState>, stats: Arc<QueryStats>, } impl TestEnvironment { fn new() -> Self { Self { shared_resolver: SharedResolver::new(MockResolver { config: ResolverConfig::from_parts( None, vec![], // Set name_servers as empty, so it's guaranteed to be different from IPV4_NAMESERVER // and IPV6_NAMESERVER. NameServerConfigGroup::with_capacity(0), ), }), config_state: Arc::new(dns::config::ServerConfigState::new()), stats: Arc::new(QueryStats::new()), } } async fn run_lookup<F, Fut>(&self, f: F) where Fut: futures::Future<Output = ()>, F: FnOnce(fnet::NameLookupProxy) -> Fut, { self.run_lookup_with_routes_handler(f, |req| { panic!("Should not call routes/State. Received request {:?}", req) }) .await } async fn run_lookup_with_routes_handler<F, Fut, R>(&self, f: F, handle_routes: R) where Fut: futures::Future<Output = ()>, F: FnOnce(fnet::NameLookupProxy) -> Fut, R: Fn(fidl_fuchsia_net_routes::StateRequest), { let (name_lookup_proxy, name_lookup_stream) = fidl::endpoints::create_proxy_and_stream::<fnet::NameLookupMarker>() .expect("failed to create NameLookupProxy"); let (routes_proxy, routes_stream) = fidl::endpoints::create_proxy_and_stream::<fidl_fuchsia_net_routes::StateMarker>() .expect("failed to create routes.StateProxy"); let (sender, recv) = mpsc::channel(MAX_PARALLEL_REQUESTS); let ((), (), (), ()) = futures::future::try_join4( run_name_lookup(&self.shared_resolver, name_lookup_stream, sender), f(name_lookup_proxy).map(Ok), routes_stream.try_for_each(|req| futures::future::ok(handle_routes(req))), create_ip_lookup_fut(&self.shared_resolver, self.stats.clone(), routes_proxy, recv) .map(Ok), ) .await .expect("Error running lookup future"); } async fn run_admin<F, Fut>(&self, f: F) where Fut: futures::Future<Output = ()>, F: FnOnce(fname::LookupAdminProxy) -> Fut, { let (lookup_admin_proxy, lookup_admin_stream) = fidl::endpoints::create_proxy_and_stream::<fname::LookupAdminMarker>() .expect("failed to create AdminResolverProxy"); let ((), ()) = futures::future::try_join( run_lookup_admin(&self.shared_resolver, &self.config_state, lookup_admin_stream) .map_err(anyhow::Error::from), f(lookup_admin_proxy).map(Ok), ) .await .expect("Error running admin future"); } } #[fasync::run_singlethreaded(test)] async fn test_lookupip_remotehost_ipv4() { TestEnvironment::new() .run_lookup(|proxy| async move { // IP Lookup IPv4 and IPv6 for REMOTE_IPV4_HOST. check_lookup_ip( &proxy, REMOTE_IPV4_HOST, fnet::LookupIpOptions::V4Addrs | fnet::LookupIpOptions::V6Addrs, Ok(fnet::IpAddressInfo { ipv4_addrs: vec![fnet::Ipv4Address { addr: IPV4_HOST.octets() }], ipv6_addrs: vec![], canonical_name: None, }), ) .await; // IP Lookup IPv4 for REMOTE_IPV4_HOST. check_lookup_ip( &proxy, REMOTE_IPV4_HOST, fnet::LookupIpOptions::V4Addrs, Ok(fnet::IpAddressInfo { ipv4_addrs: vec![fnet::Ipv4Address { addr: IPV4_HOST.octets() }], ipv6_addrs: vec![], canonical_name: None, }), ) .await; // IP Lookup IPv6 for REMOTE_IPV4_HOST. check_lookup_ip( &proxy, REMOTE_IPV4_HOST, fnet::LookupIpOptions::V6Addrs, Err(fnet::LookupError::NotFound), ) .await; }) .await; } #[fasync::run_singlethreaded(test)] async fn test_lookupip_remotehost_ipv6() { TestEnvironment::new() .run_lookup(|proxy| async move { // IP Lookup IPv4 and IPv6 for REMOTE_IPV6_HOST. check_lookup_ip( &proxy, REMOTE_IPV6_HOST, fnet::LookupIpOptions::V4Addrs | fnet::LookupIpOptions::V6Addrs, Ok(fnet::IpAddressInfo { ipv4_addrs: vec![], ipv6_addrs: vec![fnet::Ipv6Address { addr: IPV6_HOST.octets() }], canonical_name: None, }), ) .await; // IP Lookup IPv4 for REMOTE_IPV6_HOST. check_lookup_ip( &proxy, REMOTE_IPV6_HOST, fnet::LookupIpOptions::V4Addrs, Err(fnet::LookupError::NotFound), ) .await; // IP Lookup IPv6 for REMOTE_IPV4_HOST. check_lookup_ip( &proxy, REMOTE_IPV6_HOST, fnet::LookupIpOptions::V6Addrs, Ok(fnet::IpAddressInfo { ipv4_addrs: vec![], ipv6_addrs: vec![fnet::Ipv6Address { addr: IPV6_HOST.octets() }], canonical_name: None, }), ) .await; }) .await; } #[fasync::run_singlethreaded(test)] async fn test_lookup_hostname() { TestEnvironment::new() .run_lookup(|proxy| async move { check_lookup_hostname( &proxy, fnet::IpAddress::Ipv4(fnet::Ipv4Address { addr: IPV4_HOST.octets() }), Ok(String::from(REMOTE_IPV4_HOST)), ) .await; }) .await; } // Multiple hostnames returned from trust-dns* APIs, and only the first one will be returned // by the FIDL. #[fasync::run_singlethreaded(test)] async fn test_lookup_hostname_multi() { TestEnvironment::new() .run_lookup(|proxy| async move { check_lookup_hostname( &proxy, fnet::IpAddress::Ipv6(fnet::Ipv6Address { addr: IPV6_HOST.octets() }), Ok(String::from(REMOTE_IPV6_HOST)), ) .await; }) .await; } #[fasync::run_singlethreaded(test)] async fn test_set_server_names() { let env = TestEnvironment::new(); let to_server_configs = |socket_addr: SocketAddr| -> [NameServerConfig; 2] { [ NameServerConfig { socket_addr, protocol: Protocol::Udp, tls_dns_name: None }, NameServerConfig { socket_addr, protocol: Protocol::Tcp, tls_dns_name: None }, ] }; // Assert that mock config has no servers originally. assert_eq!(env.shared_resolver.read().config.name_servers().to_vec(), vec![]); // Set servers. env.run_admin(|proxy| async move { let () = proxy .set_dns_servers(&mut vec![DHCP_SERVER, NDP_SERVER, DHCPV6_SERVER].iter_mut()) .await .expect("Failed to call SetDnsServers") .expect("SetDnsServers error"); }) .await; assert_eq!( env.shared_resolver.read().config.name_servers().to_vec(), vec![DHCP_SERVER, NDP_SERVER, DHCPV6_SERVER] .into_iter() .map(|s| net_ext::SocketAddress::from(s).0) .flat_map(|x| to_server_configs(x).to_vec().into_iter()) .collect::<Vec<_>>() ); // Clear servers. env.run_admin(|proxy| async move { let () = proxy .set_dns_servers(&mut vec![].into_iter()) .await .expect("Failed to call SetDnsServers") .expect("SetDnsServers error"); }) .await; assert_eq!(env.shared_resolver.read().config.name_servers().to_vec(), Vec::new()); } #[fasync::run_singlethreaded(test)] async fn test_set_server_names_error() { let env = TestEnvironment::new(); // Assert that mock config has no servers originally. assert_eq!(env.shared_resolver.read().config.name_servers().to_vec(), vec![]); env.run_admin(|proxy| async move { // Attempt to set bad addresses. // Multicast not allowed. let status = proxy .set_dns_servers( &mut vec![fnet::SocketAddress::Ipv4(fnet::Ipv4SocketAddress { address: fnet::Ipv4Address { addr: [224, 0, 0, 1] }, port: DEFAULT_PORT, })] .iter_mut(), ) .await .expect("Failed to call SetDnsServers") .expect_err("SetDnsServers should fail for multicast address"); assert_eq!(zx::Status::from_raw(status), zx::Status::INVALID_ARGS); // Unspecified not allowed. let status = proxy .set_dns_servers( &mut vec![fnet::SocketAddress::Ipv6(fnet::Ipv6SocketAddress { address: fnet::Ipv6Address { addr: [0; 16] }, port: DEFAULT_PORT, zone_index: 0, })] .iter_mut(), ) .await .expect("Failed to call SetDnsServers") .expect_err("SetDnsServers should fail for unspecified address"); assert_eq!(zx::Status::from_raw(status), zx::Status::INVALID_ARGS); }) .await; // Assert that config didn't change. assert_eq!(env.shared_resolver.read().config.name_servers().to_vec(), vec![]); } #[fasync::run_singlethreaded(test)] async fn test_get_servers() { let env = TestEnvironment::new(); env.run_admin(|proxy| async move { let expect = vec![NDP_SERVER, DHCP_SERVER, DHCPV6_SERVER, STATIC_SERVER]; let () = proxy .set_dns_servers(&mut expect.clone().iter_mut()) .await .expect("FIDL error") .expect("set_servers failed"); assert_eq!(proxy.get_dns_servers().await.expect("Failed to get DNS servers"), expect); }) .await; } #[fasync::run_singlethreaded(test)] async fn test_config_inspect() { let env = TestEnvironment::new(); let inspector = fuchsia_inspect::Inspector::new(); let _config_state_node = add_config_state_inspect(inspector.root(), env.config_state.clone()); assert_data_tree!(inspector, root:{ servers: {} }); env.run_admin(|proxy| async move { let mut servers = [NDP_SERVER, DHCP_SERVER, DHCPV6_SERVER, STATIC_SERVER]; let () = proxy .set_dns_servers(&mut servers.iter_mut()) .await .expect("FIDL error") .expect("set_servers failed"); }) .await; assert_data_tree!(inspector, root:{ servers: { "0": { address: "[2001:4860:4fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b%2]:53", }, "1": { address: "8.8.4.4:53", }, "2": { address: "[2002:4860:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b%3]:53", }, "3": { address: "8.8.8.8:53", }, } }); } #[fasync::run_singlethreaded(test)] async fn test_query_stats_updated() { let env = TestEnvironment::new(); let inspector = fuchsia_inspect::Inspector::new(); let _query_stats_inspect_node = add_query_stats_inspect(inspector.root(), env.stats.clone()); assert_data_tree!(inspector, root:{ query_stats: {} }); let () = env .run_lookup(|proxy| async move { // IP Lookup IPv4 for REMOTE_IPV4_HOST. check_lookup_ip( &proxy, REMOTE_IPV4_HOST, fnet::LookupIpOptions::V4Addrs, Ok(fnet::IpAddressInfo { ipv4_addrs: vec![fnet::Ipv4Address { addr: IPV4_HOST.octets() }], ipv6_addrs: vec![], canonical_name: None, }), ) .await; }) .await; let () = env .run_lookup(|proxy| async move { // IP Lookup IPv6 for REMOTE_IPV4_HOST. check_lookup_ip( &proxy, REMOTE_IPV4_HOST, fnet::LookupIpOptions::V6Addrs, Err(fnet::LookupError::NotFound), ) .await; }) .await; assert_data_tree!(inspector, root:{ query_stats: { "window 1": { start_time_nanos: NonZeroUintProperty, successful_queries: 2u64, failed_queries: 0u64, average_success_duration_micros: NonZeroUintProperty, errors: { Message: 0u64, NoRecordsFound: 0u64, Io: 0u64, Proto: 0u64, Timeout: 0u64, }, }, } }); } fn run_fake_lookup( exec: &mut fasync::TestExecutor, stats: Arc<QueryStats>, error: Option<ResolveErrorKind>, delay: zx::Duration, ) { let start_time = fasync::Time::now(); let () = exec.set_fake_time(fasync::Time::after(delay)); let update_stats = (|| async { if let Some(error) = error { let () = stats.finish_query(start_time, Some(&error)).await; } else { let () = stats.finish_query(start_time, None).await; } })(); pin_mut!(update_stats); assert!(exec.run_until_stalled(&mut update_stats).is_ready()); } #[test] fn test_query_stats_inspect_average() { let mut exec = fasync::TestExecutor::new_with_fake_time().unwrap(); const START_NANOS: i64 = 1_234_567; let () = exec.set_fake_time(fasync::Time::from_nanos(START_NANOS)); let env = TestEnvironment::new(); let inspector = fuchsia_inspect::Inspector::new(); let _query_stats_inspect_node = add_query_stats_inspect(inspector.root(), env.stats.clone()); const SUCCESSFUL_QUERY_COUNT: u64 = 10; const SUCCESSFUL_QUERY_DURATION: zx::Duration = zx::Duration::from_seconds(30); for _ in 0..SUCCESSFUL_QUERY_COUNT / 2 { let () = run_fake_lookup(&mut exec, env.stats.clone(), None, zx::Duration::from_nanos(0)); let () = run_fake_lookup(&mut exec, env.stats.clone(), None, SUCCESSFUL_QUERY_DURATION); let () = exec.set_fake_time(fasync::Time::after( STAT_WINDOW_DURATION - SUCCESSFUL_QUERY_DURATION, )); } let mut expected = tree_assertion!(query_stats: {}); for i in 0..SUCCESSFUL_QUERY_COUNT / 2 { let name = &format!("window {}", i + 1); let child = tree_assertion!(var name: { start_time_nanos: u64::try_from( START_NANOS + STAT_WINDOW_DURATION.into_nanos() * i64::try_from(i).unwrap() ).unwrap(), successful_queries: 2u64, failed_queries: 0u64, average_success_duration_micros: u64::try_from( SUCCESSFUL_QUERY_DURATION.into_micros() ).unwrap() / 2, errors: { Message: 0u64, NoRecordsFound: 0u64, Io: 0u64, Proto: 0u64, Timeout: 0u64, }, }); expected.add_child_assertion(child); } assert_data_tree!(inspector, root: { expected, }); } #[test] fn test_query_stats_inspect_error_counters() { let mut exec = fasync::TestExecutor::new_with_fake_time().unwrap(); const START_NANOS: i64 = 1_234_567; let () = exec.set_fake_time(fasync::Time::from_nanos(START_NANOS)); let env = TestEnvironment::new(); let inspector = fuchsia_inspect::Inspector::new(); let _query_stats_inspect_node = add_query_stats_inspect(inspector.root(), env.stats.clone()); const FAILED_QUERY_COUNT: u64 = 10; const FAILED_QUERY_DURATION: zx::Duration = zx::Duration::from_millis(500); for _ in 0..FAILED_QUERY_COUNT { let () = run_fake_lookup( &mut exec, env.stats.clone(), Some(ResolveErrorKind::Timeout), FAILED_QUERY_DURATION, ); } assert_data_tree!(inspector, root:{ query_stats: { "window 1": { start_time_nanos: u64::try_from( START_NANOS + FAILED_QUERY_DURATION.into_nanos() ).unwrap(), successful_queries: 0u64, failed_queries: FAILED_QUERY_COUNT, average_failure_duration_micros: u64::try_from( FAILED_QUERY_DURATION.into_micros() ).unwrap(), errors: { Message: 0u64, NoRecordsFound: 0u64, Io: 0u64, Proto: 0u64, Timeout: FAILED_QUERY_COUNT, }, }, } }); } #[test] fn test_query_stats_inspect_oldest_stats_erased() { let mut exec = fasync::TestExecutor::new_with_fake_time().unwrap(); const START_NANOS: i64 = 1_234_567; let () = exec.set_fake_time(fasync::Time::from_nanos(START_NANOS)); let env = TestEnvironment::new(); let inspector = fuchsia_inspect::Inspector::new(); let _query_stats_inspect_node = add_query_stats_inspect(inspector.root(), env.stats.clone()); const DELAY: zx::Duration = zx::Duration::from_millis(100); for _ in 0..STAT_WINDOW_COUNT { let () = run_fake_lookup( &mut exec, env.stats.clone(), Some(ResolveErrorKind::Timeout), DELAY, ); let () = exec.set_fake_time(fasync::Time::after(STAT_WINDOW_DURATION - DELAY)); } for _ in 0..STAT_WINDOW_COUNT { let () = run_fake_lookup(&mut exec, env.stats.clone(), None, DELAY); let () = exec.set_fake_time(fasync::Time::after(STAT_WINDOW_DURATION - DELAY)); } // All the failed queries should be erased from the stats as they are // now out of date. let mut expected = tree_assertion!(query_stats: {}); let start_offset = START_NANOS + DELAY.into_nanos() + STAT_WINDOW_DURATION.into_nanos() * i64::try_from(STAT_WINDOW_COUNT).unwrap(); for i in 0..STAT_WINDOW_COUNT { let name = &format!("window {}", i + 1); let child = tree_assertion!(var name: { start_time_nanos: u64::try_from( start_offset + STAT_WINDOW_DURATION.into_nanos() * i64::try_from(i).unwrap() ).unwrap(), successful_queries: 1u64, failed_queries: 0u64, average_success_duration_micros: u64::try_from(DELAY.into_micros()).unwrap(), errors: { Message: 0u64, NoRecordsFound: 0u64, Io: 0u64, Proto: 0u64, Timeout: 0u64, }, }); expected.add_child_assertion(child); } assert_data_tree!(inspector, root: { expected, }); } struct BlockingResolver {} #[async_trait] impl ResolverLookup for BlockingResolver { async fn new(_config: ResolverConfig, _options: ResolverOpts) -> Self { BlockingResolver {} } async fn lookup_ip<N: IntoName + TryParseIp + Send>( &self, _host: N, ) -> Result<lookup_ip::LookupIp, ResolveError> { futures::future::pending().await } async fn ipv4_lookup<N: IntoName + Send>( &self, _host: N, ) -> Result<lookup::Ipv4Lookup, ResolveError> { futures::future::pending().await } async fn ipv6_lookup<N: IntoName + Send>( &self, _host: N, ) -> Result<lookup::Ipv6Lookup, ResolveError> { futures::future::pending().await } async fn reverse_lookup( &self, _addr: IpAddr, ) -> Result<lookup::ReverseLookup, ResolveError> { panic!("BlockingResolver does not handle reverse lookup") } } #[fasync::run_singlethreaded(test)] async fn test_parallel_query_limit() { // Collect requests by setting up a FIDL proxy and stream for the // NameLookup protocol, because there isn't a good way to directly // construct fake requests to be used for testing. let requests = { let (name_lookup_proxy, name_lookup_stream) = fidl::endpoints::create_proxy_and_stream::<fnet::NameLookupMarker>() .expect("failed to create net.NameLookupProxy"); const NUM_REQUESTS: usize = MAX_PARALLEL_REQUESTS * 2 + 2; for _ in 0..NUM_REQUESTS { // Don't await on this future because we are using these // requests to collect FIDL responders in order to send test // requests later, and will not respond to these requests. let _: fidl::client::QueryResponseFut<fnet::NameLookupLookupIpResult> = name_lookup_proxy.lookup_ip( LOCAL_HOST, fnet::LookupIpOptions::V4Addrs | fnet::LookupIpOptions::V6Addrs, ); } // Terminate the stream so its items can be collected below. drop(name_lookup_proxy); let requests = name_lookup_stream .map(|request| match request.expect("channel error") { NameLookupRequest::LookupIp { hostname, options, responder } => { IpLookupRequest::LookupIp { hostname, options, responder } } req => panic!("Expected NameLookupRequest::LookupIp request, found {:?}", req), }) .collect::<Vec<_>>() .await; assert_eq!(requests.len(), NUM_REQUESTS); requests }; let (mut sender, recv) = mpsc::channel(MAX_PARALLEL_REQUESTS); // The channel's capacity is equal to buffer + num-senders. Thus the // channel has a capacity of `MAX_PARALLEL_REQUESTS` + 1, and the // `for_each_concurrent` future has a limit of `MAX_PARALLEL_REQUESTS`, // so the sender should be able to queue `MAX_PARALLEL_REQUESTS` * 2 + 1 // requests before `send` fails. const BEFORE_LAST_INDEX: usize = MAX_PARALLEL_REQUESTS * 2; const LAST_INDEX: usize = MAX_PARALLEL_REQUESTS * 2 + 1; let send_fut = async { for (i, req) in requests.into_iter().enumerate() { match i { BEFORE_LAST_INDEX => assert_matches!(sender.try_send(req), Ok(())), LAST_INDEX => assert_matches!(sender.try_send(req), Err(e) if e.is_full()), _ => assert_matches!(sender.send(req).await, Ok(())), } } } .fuse(); let recv_fut = { let resolver = SharedResolver::new( BlockingResolver::new(ResolverConfig::default(), ResolverOpts::default()).await, ); let stats = Arc::new(QueryStats::new()); let (routes_proxy, _routes_stream) = fidl::endpoints::create_proxy_and_stream::<fidl_fuchsia_net_routes::StateMarker>() .expect("failed to create routes.StateProxy"); async move { create_ip_lookup_fut(&resolver, stats.clone(), routes_proxy, recv).await } .fuse() }; pin_mut!(send_fut, recv_fut); futures::select! { () = send_fut => {}, () = recv_fut => panic!("recv_fut should never complete"), }; } #[test] fn test_failure_stats() { use anyhow::anyhow; use trust_dns_proto::{error::ProtoError, op::Query}; let mut stats = FailureStats::default(); for (error_kind, expected) in &[ (ResolveErrorKind::Message("foo"), FailureStats { message: 1, ..Default::default() }), ( ResolveErrorKind::Msg("foo".to_string()), FailureStats { message: 2, ..Default::default() }, ), ( ResolveErrorKind::NoRecordsFound { query: Query::default(), valid_until: None }, FailureStats { message: 2, no_records_found: 1, ..Default::default() }, ), ( ResolveErrorKind::Io(std::io::Error::new( std::io::ErrorKind::NotFound, anyhow!("foo"), )), FailureStats { message: 2, no_records_found: 1, io: 1, ..Default::default() }, ), ( ResolveErrorKind::Proto(ProtoError::from("foo")), FailureStats { message: 2, no_records_found: 1, io: 1, proto: 1, ..Default::default() }, ), ( ResolveErrorKind::Timeout, FailureStats { message: 2, no_records_found: 1, io: 1, proto: 1, timeout: 1 }, ), ][..] { let () = stats.increment(error_kind); assert_eq!(&stats, expected, "invalid stats after incrementing with {:?}", error_kind); } } fn test_das_helper( l_addr: fnet::IpAddress, l_src: Option<fnet::IpAddress>, r_addr: fnet::IpAddress, r_src: Option<fnet::IpAddress>, want: std::cmp::Ordering, ) { let left = DasCmpInfo::from_addrs(&l_addr, l_src.as_ref()); let right = DasCmpInfo::from_addrs(&r_addr, r_src.as_ref()); assert_eq!( left.cmp(&right), want, "want = {:?}\n left = {:?}({:?}) DAS={:?}\n right = {:?}({:?}) DAS={:?}", want, l_addr, l_src, left, r_addr, r_src, right ); } macro_rules! add_das_test { ($name:ident, preferred: $pref_dst:expr => $pref_src:expr, other: $other_dst:expr => $other_src:expr) => { #[test] fn $name() { test_das_helper( $pref_dst, $pref_src, $other_dst, $other_src, std::cmp::Ordering::Less, ) } }; } add_das_test!( prefer_reachable, preferred: fidl_ip!("198.51.100.121") => Some(fidl_ip!("198.51.100.117")), other: fidl_ip!("2001:db8:1::1") => Option::<fnet::IpAddress>::None ); // These test cases are taken from RFC 6724, section 10.2. add_das_test!( prefer_matching_scope, preferred: fidl_ip!("198.51.100.121") => Some(fidl_ip!("198.51.100.117")), other: fidl_ip!("2001:db8:1::1") => Some(fidl_ip!("fefc00:e968:6179::de52:7100")) ); add_das_test!( prefer_matching_label, preferred: fidl_ip!("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b") => Some(fidl_ip!("fc00:e968:6179::de52:7100")), other: fidl_ip!("2001:db8:1::1") => Some(fidl_ip!("fc00:e968:6179::de52:7100")) ); add_das_test!( prefer_higher_precedence_1, preferred: fidl_ip!("2001:db8:1::1") => Some(fidl_ip!("2001:db8:1::2")), other: fidl_ip!("10.1.2.3") => Some(fidl_ip!("10.1.2.4")) ); add_das_test!( prefer_higher_precedence_2, preferred: fidl_ip!("2001:db8:1::1") => Some(fidl_ip!("2001:db8:1::2")), other: fidl_ip!("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b") => Some(fidl_ip!("fc00:e968:6179::de52:7100")) ); add_das_test!( prefer_smaller_scope, preferred: fidl_ip!("fe80::1") => Some(fidl_ip!("fe80::2")), other: fidl_ip!("2001:db8:1::1") => Some(fidl_ip!("2001:db8:1::2")) ); add_das_test!( prefer_longest_matching_prefix, preferred: fidl_ip!("2001:db8:1::1") => Some(fidl_ip!("2001:db8:1::2")), other: fidl_ip!("2001:db8:3ffe::1") => Some(fidl_ip!("2001:db8:3f44::2")) ); #[test] fn test_das_equals() { for (dst, src) in [ (fidl_ip!("192.168.0.1"), fidl_ip!("192.168.0.2")), (fidl_ip!("2001:db8::1"), fidl_ip!("2001:db8::2")), ] .iter() { let () = test_das_helper(*dst, None, *dst, None, std::cmp::Ordering::Equal); let () = test_das_helper(*dst, Some(*src), *dst, Some(*src), std::cmp::Ordering::Equal); } } #[test] fn test_valid_policy_table() { // Last element in policy table MUST be ::/0. assert_eq!( POLICY_TABLE.iter().last().expect("empty policy table").prefix, net_types::ip::Subnet::new(net_types::ip::Ipv6::UNSPECIFIED_ADDRESS, 0) .expect("invalid subnet") ); // Policy table must be sorted by prefix length. let () = POLICY_TABLE.windows(2).for_each(|w| { let Policy { prefix: cur, precedence: _, label: _ } = w[0]; let Policy { prefix: nxt, precedence: _, label: _ } = w[1]; assert!( cur.prefix() >= nxt.prefix(), "bad ordering of prefixes, {} must come after {}", cur, nxt ) }); // Assert that POLICY_TABLE declaration does not use any invalid // subnets. for policy in POLICY_TABLE.iter() { assert!(policy.prefix.prefix() <= 128, "Invalid subnet in policy {:?}", policy); } } #[fasync::run_singlethreaded(test)] async fn test_sort_preferred_addresses() { const TEST_IPS: [(fnet::IpAddress, Option<fnet::IpAddress>); 5] = [ (fidl_ip!("127.0.0.1"), Some(fidl_ip!("127.0.0.1"))), (fidl_ip!("::1"), Some(fidl_ip!("::1"))), (fidl_ip!("192.168.50.22"), None), (fidl_ip!("2001::2"), None), (fidl_ip!("2001:db8:1::1"), Some(fidl_ip!("2001:db8:1::2"))), ]; // Declared using std types so we get cleaner output when we assert // expectations. const SORTED: [IpAddr; 5] = [ std_ip!("::1"), std_ip!("2001:db8:1::1"), std_ip!("127.0.0.1"), std_ip!("192.168.50.22"), std_ip!("2001::2"), ]; let (routes_proxy, routes_stream) = fidl::endpoints::create_proxy_and_stream::<fidl_fuchsia_net_routes::StateMarker>() .expect("failed to create routes.StateProxy"); let routes_fut = routes_stream.map(|r| r.context("stream FIDL error")).try_for_each( |fidl_fuchsia_net_routes::StateRequest::Resolve { destination, responder }| { let mut result = TEST_IPS .iter() .enumerate() .find_map(|(i, (dst, src))| { if *dst == destination && src.is_some() { let inner = fidl_fuchsia_net_routes::Destination { address: Some(*dst), source_address: *src, ..fidl_fuchsia_net_routes::Destination::EMPTY }; // Send both Direct and Gateway resolved routes to show we // don't care about that part. if i % 2 == 0 { Some(fidl_fuchsia_net_routes::Resolved::Direct(inner)) } else { Some(fidl_fuchsia_net_routes::Resolved::Gateway(inner)) } } else { None } }) .ok_or(zx::Status::ADDRESS_UNREACHABLE.into_raw()); futures::future::ready( responder.send(&mut result).context("failed to send Resolve response"), ) }, ); let ((), ()) = futures::future::try_join(routes_fut, async move { let addrs = TEST_IPS.iter().map(|(dst, _src)| *dst).collect(); let addrs = sort_preferred_addresses(addrs, &routes_proxy) .await .expect("failed to sort addresses"); let addrs = addrs .into_iter() .map(|a| { let fidl_fuchsia_net_ext::IpAddress(a) = a.into(); a }) .collect::<Vec<_>>(); assert_eq!(&addrs[..], &SORTED[..]); Ok(()) }) .await .expect("error running futures"); } #[fasync::run_singlethreaded(test)] async fn test_lookupip2() { fn map_ip<T: Into<IpAddr>>(addr: T) -> fnet::IpAddress { fidl_fuchsia_net_ext::IpAddress(addr.into()).into() } // Routes handler will say that only IPV6_HOST is reachable. let routes_handler = |fidl_fuchsia_net_routes::StateRequest::Resolve { destination, responder }| { let mut response = if destination == map_ip(IPV6_HOST) { Ok(fidl_fuchsia_net_routes::Resolved::Direct( fidl_fuchsia_net_routes::Destination { address: Some(destination), source_address: Some(destination), ..fidl_fuchsia_net_routes::Destination::EMPTY }, )) } else { Err(zx::Status::ADDRESS_UNREACHABLE.into_raw()) }; let () = responder.send(&mut response).expect("failed to send Resolve FIDL response"); }; TestEnvironment::new() .run_lookup_with_routes_handler( |proxy| async move { let proxy = &proxy; let lookup_ip = |hostname, options| async move { proxy.lookup_ip2(hostname, options).await.expect("FIDL error") }; // All arguments unset. assert_eq!( lookup_ip( REMOTE_IPV4_HOST, fidl_fuchsia_net::LookupIpOptions2 { ..fidl_fuchsia_net::LookupIpOptions2::EMPTY } ) .await, Err(fidl_fuchsia_net::LookupError::InvalidArgs) ); // No IP addresses to look. assert_eq!( lookup_ip( REMOTE_IPV4_HOST, fidl_fuchsia_net::LookupIpOptions2 { ipv4_lookup: Some(false), ipv6_lookup: Some(false), ..fidl_fuchsia_net::LookupIpOptions2::EMPTY } ) .await, Err(fidl_fuchsia_net::LookupError::InvalidArgs) ); // No results for an IPv4 only host. assert_eq!( lookup_ip( REMOTE_IPV4_HOST, fidl_fuchsia_net::LookupIpOptions2 { ipv4_lookup: Some(false), ipv6_lookup: Some(true), ..fidl_fuchsia_net::LookupIpOptions2::EMPTY } ) .await, Err(fidl_fuchsia_net::LookupError::NotFound) ); // Successfully resolve IPv4. assert_eq!( lookup_ip( REMOTE_IPV4_HOST, fidl_fuchsia_net::LookupIpOptions2 { ipv4_lookup: Some(true), ipv6_lookup: Some(true), ..fidl_fuchsia_net::LookupIpOptions2::EMPTY } ) .await, Ok(fidl_fuchsia_net::LookupResult { addresses: Some(vec![map_ip(IPV4_HOST)]), ..fidl_fuchsia_net::LookupResult::EMPTY }) ); // Successfully resolve IPv4 + IPv6 (no sorting). assert_eq!( lookup_ip( REMOTE_IPV4_IPV6_HOST, fidl_fuchsia_net::LookupIpOptions2 { ipv4_lookup: Some(true), ipv6_lookup: Some(true), ..fidl_fuchsia_net::LookupIpOptions2::EMPTY } ) .await, Ok(fidl_fuchsia_net::LookupResult { addresses: Some(vec![map_ip(IPV4_HOST), map_ip(IPV6_HOST)]), ..fidl_fuchsia_net::LookupResult::EMPTY }) ); // Successfully resolve IPv4 + IPv6 (with sorting). assert_eq!( lookup_ip( REMOTE_IPV4_IPV6_HOST, fidl_fuchsia_net::LookupIpOptions2 { ipv4_lookup: Some(true), ipv6_lookup: Some(true), sort_addresses: Some(true), ..fidl_fuchsia_net::LookupIpOptions2::EMPTY } ) .await, Ok(fidl_fuchsia_net::LookupResult { addresses: Some(vec![map_ip(IPV6_HOST), map_ip(IPV4_HOST)]), ..fidl_fuchsia_net::LookupResult::EMPTY }) ); }, routes_handler, ) .await } }
{-# LANGUAGE NoImplicitPrelude #-} {-# LANGUAGE OverloadedStrings #-} module IntegrationTest ( withApp, integrationSpec ) where import BasicPrelude import Data.Aeson (FromJSON, parseJSON, (.:)) import qualified Data.Aeson as JSON import Network.Wai.Test (simpleBody) import Test.Hspec (Spec, SpecWith, before, describe, it) import qualified Yesod.Test as YT import TestSite (App, Route(..)) import TestTools type MyTestApp = YT.TestApp App withApp :: App -> SpecWith (YT.TestApp App) -> Spec withApp app = before $ return (app, id) authUrl :: Text authUrl = "http://localhost:3000/auth/login" data AuthUrl = AuthUrl Text deriving (Eq, Show) instance FromJSON AuthUrl where parseJSON (JSON.Object v) = AuthUrl <$> v .: "authentication_url" parseJSON _ = mempty loginUrl :: Text loginUrl = "http://localhost:3000/auth/page/hashdb/login" data LoginUrl = LoginUrl Text deriving (Eq, Show) instance FromJSON LoginUrl where parseJSON (JSON.Object v) = LoginUrl <$> v .: "loginUrl" parseJSON _ = mempty successMsg :: Text successMsg = "Login Successful" data SuccessMsg = SuccessMsg Text deriving (Eq, Show) instance FromJSON SuccessMsg where parseJSON (JSON.Object v) = SuccessMsg <$> v .: "message" parseJSON _ = mempty getBodyJSON :: FromJSON a => YT.YesodExample site (Maybe a) getBodyJSON = do resp <- YT.getResponse let body = simpleBody <$> resp result = JSON.decode =<< body return result integrationSpec :: SpecWith MyTestApp integrationSpec = do describe "The home page" $ do it "can be accessed" $ do YT.get HomeR YT.statusIs 200 describe "The protected page" $ do it "requires login" $ do needsLogin GET ("/prot" :: Text) it "looks right after login by a valid user" $ do _ <- doLogin "paul" "<PASSWORD>" YT.get ProtectedR YT.statusIs 200 YT.bodyContains "OK, you are logged in so you are allowed to see this!" it "can't be accessed after login then logout" $ do _ <- doLogin "paul" "<PASSWORD>" YT.get $ AuthR LogoutR -- That `get` will get the form from Yesod.Core.Handler.redirectToPost -- which will not be submitted automatically without javascript YT.bodyContains "please click on the button below to be redirected" -- so we do the redirection ourselves: YT.request $ do YT.setMethod "POST" YT.setUrl $ AuthR LogoutR -- yesod-core-1.4.19 added the CSRF token to the redirectToPost form YT.addToken YT.get HomeR YT.statusIs 200 YT.bodyContains "Your current auth ID: Nothing" YT.get ProtectedR YT.statusIs 303 describe "Login" $ do it "fails when incorrect password given" $ do loc <- doLoginPart1 "paul" "<PASSWORD>" checkFailedLogin loc it "fails when unknown user name given" $ do loc <- doLoginPart1 "xyzzy" "<PASSWORD>" checkFailedLogin loc describe "JSON Login" $ do it "JSON access to protected page gives JSON object with auth URL" $ do YT.request $ do YT.setMethod "GET" YT.setUrl ProtectedR YT.addRequestHeader ("Accept", "application/json") YT.statusIs 401 auth <- getBodyJSON YT.assertEq "Authentication URL" auth (Just $ AuthUrl authUrl) it "Custom loginHandler using submitRouteHashDB has correct URL in JSON" $ do YT.request $ do YT.setMethod "GET" YT.setUrl authUrl YT.addRequestHeader ("Accept", "application/json") YT.statusIs 200 login <- getBodyJSON YT.assertEq "Login URL" login (Just $ LoginUrl loginUrl) -- This example needs yesod-test >= 1.5.0.1, since older ones use wrong -- content type for JSON (https://github.com/yesodweb/yesod/issues/1063). it "Sending JSON username and password produces JSON success message" $ do -- This first request is only to get the CSRF token cookie, used below YT.request $ do YT.setMethod "GET" YT.setUrl authUrl YT.addRequestHeader ("Accept", "application/json") YT.request $ do YT.setMethod "POST" YT.setUrl loginUrl YT.addRequestHeader ("Accept", "application/json") YT.addRequestHeader ("Content-Type", "application/json; charset=utf-8") YT.setRequestBody "{\"username\":\"paul\",\"password\":\"<PASSWORD>\"}" -- CSRF token is being checked, since yesod-core >= 1.4.14 is forced YT.addTokenFromCookie YT.statusIs 200 msg <- getBodyJSON YT.assertEq "Login success" msg (Just $ SuccessMsg successMsg)
// UnstableAttr implements fs.InodeOperations.UnstableAttr. func (i *inodeOperations) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) { i.mu.Lock() defer i.mu.Unlock() return i.uattr, nil }
/** \brief constructor for ATCAIface objects * \param[in] cfg points to the logical configuration for the interface * \return ATCAIface */ ATCAIface newATCAIface(ATCAIfaceCfg *cfg) { ATCAIface caiface = (ATCAIface)malloc(sizeof(struct atca_iface)); caiface->mType = cfg->iface_type; caiface->mIfaceCFG = cfg; if (atinit(caiface) != ATCA_SUCCESS) { free(caiface); caiface = NULL; } return caiface; }
/* # Copyright (c) 2012, The Met Office, UK # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # 3. Neither the name of copyright holder nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # */ /* wgdos_pack.c * * Description: * Pack data to a WGDOS packed field * Revision 2.0 2012/08/28 12:19:51 hadmf * SRCE checkin. Type: Undef * Reason: First cut into libmo_unpack * * Revision 2.1 2012/09/17 hadmf * SRCE checkin. Type: Undef * Reason: Reordering code, fixing bugs and testing compatability with genuine data and adding comments * * Revision 2.2 2014/11/27 hshep * SRCE checkin. Type: Update * Reason: Bug fix in the initial bitshifting used to pack a row of data * * Revision 2.2.1 2014/12/08 hshep * SRCE checkin. Type: Update * Reason: Add return code 31, and bug fix for integer overflow when calculating * spread * Information: */ /* Standard header files used */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> #include <limits.h> /* Package header files used */ #include "wgdosstuff.h" #include "logerrors.h" static char message[MAX_MESSAGE_SIZE]; /* End of header */ /* Count up the zeros in the row, no zeros returned means no zero mapping */ int count_zeros(int ncols, float* row_data, function* parent) { int zeros_count=0; int i; float min=row_data[0]; float max=row_data[0]; function subroutine; set_function_name(__func__, &subroutine, parent); /* Find out if we need to use zero bitmaps */ for (i=0;i<ncols;i++) { if(row_data[i]==0) { zeros_count++; } else { if (min>row_data[i]) min=row_data[i]; if (max<row_data[i]) max=row_data[i]; } } /* Just making up an algorithm about whether to store data with zeros mapped out. Since I don't know the accuracy, take it as 50-50 chance if the range is sqrt(2) bigger including zeros than excluding them and this is good enough */ if (zeros_count>0 && min>0.0) { if ((max-min)>(max/sqrt(2))) zeros_count=0; /* turn off zero packing */ } return zeros_count; } /* Fill the bitmap denoting when the given bitmap_value appears along with an easier to parse integer array replication if the value "true" is true, then bitmaps are true bit maps (1 = True). If "true" is false, then the bitmap is inverted */ int fill_bitmap(int ncols, float* row_data, float bitmap_value, int true, unsigned char* bitmap, int* array, function* parent) { int i,j; int count=0; unsigned char byte; function subroutine; set_function_name(__func__, &subroutine, parent); for (i=0;i<ncols;i+=8) { byte=0; for (j=0;j<8;j++) { if(i+j==ncols) { byte=byte<<(8-j); break; /* Stop counting at the end of the row */ } byte=byte<<1; if(row_data[i+j]==bitmap_value) { byte+=1; count++; array[i+j]=1; } else { array[i+j]=0; } } if (true==1) { bitmap[i/8]=byte; } else { /* Invert the logic if true==false */ bitmap[i/8]=~byte; } } return count; } /* Pack a 2-D field of floating point numbers stored linearly, storing the data as a bytestream (MSB first). If packing fails, return a nonzero code */ int wgdos_pack( int ncols, /* Number of columns in each row */ int nrows, /* Number of rows in field */ float* unpacked_data, /* Data to pack */ float mdi, /* Missing data indicator value */ int bpacc, /* WGDOS packing accuracy */ unsigned char* packed_data, /* Packed data */ int* packed_length, /* Packed data length */ function* parent) { float accuracy; /* Absolute accuracy to which data held */ float minval, maxval; unsigned char* mdi_bitmap; /* Missing data bitmap for current row as bitstream */ int* mdi_array; /* Integer array representation of mdi_bitmap 1=TRUE*/ unsigned char* zero_bitmap; /* Zeros bitmap for current row as bitstream*/ int* zero_array; /* Integer array representation of zero_bitmap 1=TRUE*/ int row; /* Number of rows transmitted so far */ int mdis_count; /* Number of missing data elements */ int zeros_count; /* Number of bitmapped zeros */ int ndata; /* Number of non-bitmapped data items */ unsigned int spread; /* Spread of values in each row */ float f_spread; /* A floating point value of spread */ float epsilon_spread; /* Accuracy for floating point comparison of spread */ int bpp; /* Number of bits required to store the packed values */ int bitmap_size; /* wgdos field/row constituents*/ wgdos_field_header* wgdos_field_header_pointer; /* Where the field header will be filled in */ float* row_data; /* Spare location to write the row as its being constructed */ int wgdos_row_header[2]; /* Spare location to write the row header as its being constructed */ int wgdos_field_header[3]; /* Spare location to write the field header as its being constructed */ unsigned char* packed_row; /* Pointer to the packed row being calculated */ int offset; /* Number of bytes into output field to write the next set of bytes */ unsigned int digits; /* Integer equivalent to the row data after compression */ int size_of_packed_field; int size_of_packed_row; int first_value; int i,j; int log_message = (get_verbosity()>=VERBOSITY_MESSAGE); /* Used to reduce the number of sprintf calls for loggin */ function subroutine; set_function_name(__func__, &subroutine, parent); if (ncols <= 1) { MO_syslog(VERBOSITY_ERROR, "Not a two-dimensional field. Cannot pack.", &subroutine); return 1; } epsilon_spread = 0.00001; wgdos_field_header_pointer=(void*)packed_data; bitmap_size=(ncols+7)/8; size_of_packed_field=0; accuracy=powf(2.0, (float)bpacc); /* Reserve work areas */ row_data = (float *) malloc(sizeof(float) * ncols); zero_bitmap = (unsigned char*) malloc(bitmap_size); mdi_bitmap = (unsigned char*) malloc(bitmap_size); packed_row = (unsigned char*) malloc(sizeof(int) * ncols); mdi_array = (int*) malloc(sizeof(int) * ncols); zero_array = (int*) malloc(sizeof(int) * ncols); /* The offset (size of field so far) already skips the field header */ offset=sizeof(wgdos_field_header); /* For each row */ for (row=0;row<nrows;row++) { /* Clean up counts, bitmaps etc */ zeros_count=0; mdis_count=0; size_of_packed_row=0; memset(mdi_bitmap, 0, bitmap_size); memset(zero_bitmap, 0, bitmap_size); memset(packed_row, 0, ncols * sizeof(int)); snprintf(message, MAX_MESSAGE_SIZE, "Row %d size of packed field at start %d", row, size_of_packed_field); MO_syslog(VERBOSITY_MESSAGE, message, &subroutine); first_value=1; /* create the zeros bitmap */ zeros_count=count_zeros(ncols, &unpacked_data[row*ncols], &subroutine); if (zeros_count>0) { zeros_count=fill_bitmap(ncols, &unpacked_data[ncols*row], 0.0, 0, zero_bitmap, zero_array, &subroutine); } /* create the MDI bitmap */ mdis_count=fill_bitmap(ncols, &unpacked_data[ncols*row], mdi, 1, mdi_bitmap, mdi_array, &subroutine); /* collect the remaining data values*/ ndata=0; for (i=0;i<ncols;i++) { if (mdi==unpacked_data[i+ncols*row]) { /* Skip mdis, we're going to bitmap them out */ } else if (zeros_count && (0.0==unpacked_data[i+ncols*row])) { /* Skip zeros if we're going to bitmap them out */ } else { row_data[ndata]=unpacked_data[i+ncols*row]; if (first_value) { minval=row_data[ndata]; maxval=row_data[ndata]; first_value=0; } if (row_data[ndata]<minval) minval=row_data[ndata]; if (row_data[ndata]>maxval) maxval=row_data[ndata]; ndata++; } } if ((mdis_count+zeros_count)==ncols) minval=maxval; /* Calculate the number of bits required to contain the interval at the required accuracy */ spread=(maxval-minval)/accuracy; spread+=((maxval-minval)>=accuracy); /* It is possible to get an error where the value of spread becomes far too large for the capacity of an unsigned integer. Therefore we check that spread is smaller than the max value for an unsigned integer ie. 4294967295 */ f_spread = (maxval-minval)/accuracy; f_spread += (float) ((maxval-minval)>=accuracy); if (f_spread <= (float)UINT_MAX-epsilon_spread) { for (bpp=0;spread;spread=spread>>1,bpp++) {/*nothing*/} } else { //override to failure bpp = 32; } if (bpp>31) { snprintf(message, MAX_MESSAGE_SIZE, "Data spread over the row (%f - %f)too large to manage at this accuracy (%f)", minval, maxval, accuracy); MO_syslog(VERBOSITY_ERROR, message, &subroutine); return INVALID_PACKING_ACCURACY; } snprintf(message, MAX_MESSAGE_SIZE, "scale %d min %f max %f accuracy %f bpacc %d ndata %d bpp %d", spread, minval, maxval, accuracy, bpacc,ndata, bpp); MO_syslog(VERBOSITY_MESSAGE, message, &subroutine); /* Pack the data as a bitstream of integers as per WGDOS packing scheme */ message[0]=0; for (i=0,ndata=0;i<ncols;i++) { if(mdi_array[i]) { if (log_message) { snprintf(message+strlen(message), MAX_MESSAGE_SIZE-strlen(message), "%012g / %-12s ", mdi, "MDI"); } } else if (zeros_count && zero_array[i]){ if (log_message) { snprintf(message+strlen(message), MAX_MESSAGE_SIZE-strlen(message), "%012g / %-12s ", 0.0, "Zero"); } } else { /* Calculate the packed integer equivalent of the row data*/ digits=(row_data[ndata]-minval)/accuracy; if (log_message) { snprintf(message+strlen(message), MAX_MESSAGE_SIZE-strlen(message), "%012g / %-12d ", row_data[ndata], digits); } /* Stuff these bits into the row data spare space */ bitstuff(packed_row, ndata*bpp, digits, bpp, &subroutine); /* Move on to the next value */ ndata++; } /* If we log stuff out, print out four values per line to get clean logging output */ if (i%4==3 && log_message) { snprintf(message+strlen(message), MAX_MESSAGE_SIZE-strlen(message), "%3d",i); MO_syslog(VERBOSITY_MESSAGE, message, &subroutine); message[0]=0; } } /* If we logged, we may not have finished on a four-word boundary */ if (message[0]!=0 && log_message) { snprintf(message+strlen(message), MAX_MESSAGE_SIZE-strlen(message), "%3d",i); MO_syslog(VERBOSITY_MESSAGE, message, &subroutine); message[0]=0; } /* Calculate the WGDOS row header for this row now we have all the info */ wgdos_calc_row_header(wgdos_row_header, minval, bpp, ncols, zeros_count, mdis_count, &subroutine); /* finally, pack all this info into the field */ /* first the row header */ memcpy(&packed_data[offset], wgdos_row_header, sizeof(wgdos_row_header)); offset+=sizeof(wgdos_row_header); /* then the MDI bitmap */ if (mdis_count) { memcpy(&packed_data[offset], mdi_bitmap, bitmap_size); offset+=bitmap_size; } /* Then the zero bitmap */ if (zeros_count) { memcpy(&packed_data[offset], zero_bitmap, bitmap_size); offset+=bitmap_size; } /* Round off to the next word boundary */ offset=4*((offset+3)/4); /* lastly the data */ memcpy(&packed_data[offset], packed_row, 4*((bpp*ndata+31)/32)); size_of_packed_row+=((bpp*ndata+31)/32); offset+=(4*size_of_packed_row); snprintf(message, MAX_MESSAGE_SIZE, "Field length on row %d is %d packed row size %d",row, offset, size_of_packed_row*4); MO_syslog(VERBOSITY_INFO, message, &subroutine); } /* size of packed field is in bytes. Neet to store it as 32-bit words for WGDOS */ size_of_packed_field+=(offset/4); snprintf(message, MAX_MESSAGE_SIZE, "precision %d ncols %d nrows %d length %d\n", bpacc, ncols, nrows, size_of_packed_field); MO_syslog(VERBOSITY_MESSAGE, message, &subroutine); /* Fill in the WGDOS field header at the beginning of the field */ wgdos_field_header_pointer->total_length=htonl(size_of_packed_field); wgdos_field_header_pointer->precision=htonl(bpacc); wgdos_field_header_pointer->pts_in_row=htons(ncols); wgdos_field_header_pointer->rows_in_field=htons(nrows); *packed_length=size_of_packed_field; snprintf(message, MAX_MESSAGE_SIZE, "Packed field size %d", size_of_packed_field); MO_syslog(VERBOSITY_INFO, message, &subroutine); /* Free the work areas */ free(row_data); free(zero_bitmap); free(mdi_bitmap); return 0; } /* A routine written to test the bitstuff routine works properly */ static int test1_in[]={20,4,0,3,30,11,12,12}; static char test1_out[]={161,0,63,45,150}; static int test2_in[]={921,91,2491,1001,3275}; static char test2_out[]={57,144,91,155,179,233,204,176}; static unsigned char test_buffer[72]; int test_bitstuff() { int i,bitnumber, worked; for (i=0,bitnumber=0;i<8;i++,bitnumber+=5) { bitstuff(test_buffer, bitnumber, test1_in[i], 5, NULL); } worked=(0==memcmp(test_buffer, test1_out, 5)); memset(test_buffer, 0, 72); for (i=0,bitnumber=0;i<5;i++,bitnumber+=12) { bitstuff(test_buffer, bitnumber, test2_in[i], 12, NULL); } worked+=2*(0==memcmp(test_buffer, test2_out, 8)); return (worked); } /* The packing version of wgdos_pack_polybits */ int bitstuff(unsigned char* byte, int bitnum, unsigned int value, unsigned int nbits, function* parent) { unsigned long long int shifted; /* The bitshifted value, needs to be definitely bigger than an unsigned int32 */ int bitshift; /* How many bits need to get shifted to insert the next value correctly */ int bytes_used; /* How many bytes out of shifted were used to store the value */ int base_byte; /* The byte offset that we start putting bits from the value passed in to */ int n; function subroutine; set_function_name(__func__, &subroutine, parent); /* Just make sure we don't store more than can be fitted in */ if (nbits>31) { snprintf(message, MAX_MESSAGE_SIZE, "bpp value out of range (%d)",nbits); MO_syslog(VERBOSITY_ERROR, message, &subroutine); return (-1); } /* And another belt-and-braces check */ if (value>=(1<<nbits)) { snprintf(message, MAX_MESSAGE_SIZE, "Value %d too large for a %d bit number",value, nbits); MO_syslog(VERBOSITY_ERROR, message, &subroutine); return (-1); } bitshift=8-(bitnum+nbits)%8; /* How many bits to shift value so it occupies the right byte positions */ shifted=(unsigned long long int)value << bitshift; /* Cast value to long long int before shifting otherwise we run out of bits */ base_byte=bitnum/8; /* The byte that the first (highest) byte in shifted needs to go in to */ bytes_used=(nbits+bitshift+7)/8; /* How many bytes used (nbits are shifted up by bitshift, so add them) */ /* now stuff each used byte from shifted into the bytestream in the appropriate location */ for (n=0;n<bytes_used;n++) { byte[n+base_byte]+=shifted>>(8*(bytes_used-n-1)); /* first byte needs highest byte of shifted put in it */ } return(0); } /* Not used here, but ought to produce the opposite effect of bitstuff */ unsigned int bitsplit(unsigned char* byte, int bitnum, unsigned int nbits, function* parent) { unsigned long long int shifted; int bitshift; int n; int bytes_used; int base_byte; int mask; function subroutine; set_function_name(__func__, &subroutine, parent); if (nbits>31) { snprintf(message, MAX_MESSAGE_SIZE, "bpp out of range (%d)",nbits); MO_syslog(VERBOSITY_ERROR, message, &subroutine); return (-1); } mask=(1<<nbits)-1; /* Mask out bits from any earlier numbers in the bitstream */ bitshift=8-(bitnum+nbits)%8; /* How many bits to shift value so it occupies the right byte positions */ shifted=0; base_byte=bitnum/8; /* The byte that the first (highest) byte in shifted needs to come from */ bytes_used=(nbits+bitshift+7)/8; /* How many bytes used (nbits are shifted up by bitshift, so add them) */ for (n=0;n<bytes_used;n++) { shifted=shifted<<8; /* Any earlier bytes read go higher up the shifted number */ shifted+=byte[n+base_byte]; /* and add the number from this byte */ } return(mask&(shifted>>bitshift)); /* Shift back to remove later bits, mask to hide earier ones */ } /* Calculate the entries in the rather overcomplex WGDOS row header */ int wgdos_calc_row_header(int* wgdos_header, float minval, int bpp, int npts, int zeros, int mdis, function* parent) { int one=1; /* Needed because of the fortran-compatible convert_float... call takes pointers */ int header; /* The 32 bits of the row header, ready to run ntohl on... */ int mapsize=(npts+7)/8; int size_of_row=0; union { int i; float f; } basetemp; /* Used to convert floats to big endian floats */ function subroutine; set_function_name(__func__, &subroutine, parent); /* WGDOS requires IBM floats not IEEE and they need to be Big Endian. Make it so... */ basetemp.f=minval; basetemp.i=htonl(basetemp.i); convert_float_ieee32_to_ibm((int*)&minval, &basetemp.i, &one); basetemp.i=htonl(basetemp.i); npts-=(zeros+mdis); snprintf(message, MAX_MESSAGE_SIZE, "Zero: %d MDI: %d(%d) %d bits per value base value %f %d words taken for %d values", (zeros>0), (mdis>0), mdis, bpp, minval, (bpp*npts+31)/32, npts); MO_syslog(VERBOSITY_MESSAGE, message, &subroutine); /* Do the rest of the header flags, etc */ header=0; if (zeros>0) header+=128; if (mdis>0) header+=32; header+=(bpp&0x1f); header=header<<16; size_of_row=((bpp*npts+31)/32); size_of_row+=(((zeros>0)+(mdis>0))*mapsize+3)/4; header+=size_of_row; header=htonl(header); /* fill in the two-word array that is the WGDOS row header */ wgdos_header[0]=basetemp.i; wgdos_header[1]=header; return 0; }
<gh_stars>0 package main import ( "testing" "sigs.k8s.io/kustomize/kyaml/yaml" ) func TestStarlarkFunctionConfig(t *testing.T) { testcases := []struct { config string expectErrMsg string }{ { config: `apiVersion: fn.kpt.dev/v1alpha1 kind: StarlarkRun metadata: name: my-star-fn namespace: foo source: | def run(r, ns_value): for resource in r: resource["metadata"]["namespace"] = ns_value run(ctx.resource_list["items"], "baz") `, }, { config: `apiVersion: fn.kpt.dev/v1alpha1 kind: StarlarkRun source: | def run(r, ns_value): for resource in r: resource["metadata"]["namespace"] = ns_value run(ctx.resource_list["items"], "baz") `, expectErrMsg: "`metadata.name` must be set in starlark function config", }, { config: `apiVersion: fn.kpt.dev/v1alpha1 kind: StarlarkRun metadata: name: my-star-fn `, expectErrMsg: "`source` must not be empty", }, } for _, tc := range testcases { var sf StarlarkRun if err := yaml.Unmarshal([]byte(tc.config), &sf); err != nil { t.Errorf("unexpcted error: %v", err) continue } err := sf.Validate() switch { case err != nil && tc.expectErrMsg == "": t.Errorf("unexpected error: %v", err) case err == nil && tc.expectErrMsg != "": t.Errorf("expect error: %v, but got nothing", tc.expectErrMsg) case err != nil && err.Error() != tc.expectErrMsg: t.Errorf("expect error: %v, but got: %v", tc.expectErrMsg, err) } } }
N = int(input()) As = list(map(int, input().split())) from itertools import accumulate cAs = list(accumulate(As[::-1]))[::-1][1:] + [0] ans = 0 now_nodes = 1 for i, (cum, a) in enumerate(zip(cAs, As)): if i==N: if now_nodes==a: print(ans+a) else: print(-1) if now_nodes<=a and i!=N: print(-1) break num = min(cum, (now_nodes-a)*2) ans += now_nodes now_nodes = num
/** * Set the loginParameters property: Login parameters to send to the OpenID Connect authorization endpoint when a * user logs in. Each parameter must be in the form "key=value". * * @param loginParameters the loginParameters value to set. * @return the AzureActiveDirectoryLogin object itself. */ public AzureActiveDirectoryLogin withLoginParameters(List<String> loginParameters) { if (this.innerProperties() == null) { this.innerProperties = new AzureActiveDirectoryLoginProperties(); } this.innerProperties().withLoginParameters(loginParameters); return this; }
package _time import "time" //获取时间戳 int64 func Stamp64() int64 { return time.Now().Unix() } //正确的使用方式 //time.Sleep(time.Second * time.Duration(common.C.CpuTempRest)) //获取指定时间的时间戳 //获取一个月之后的时间戳 //30 * 24 * 60 * 60 * time.Second //或者 //30 * 24 * time.Hour func AppointStamp64(t time.Duration) (timestamp int64) { return time.Now().Add(t).Unix() } //将时间戳转换为字符串 func Int64ToString(t int64) string { return time.Unix(t, 0).Format(TimeTemplate["timeTemplate1"]) }
INT DiamondCompressFile( IN NOTIFYPROC CompressNotify, IN LPSTR SourceFile, IN LPSTR TargetFile, IN BOOL Rename, OUT PLZINFO pLZI ); TCOMP DiamondCompressionType; // 0 if not diamond (ie, LZ)
(CNN) -- What is seen by some as the holy four-day weekend for geek culture at San Diego Comic-Con has gone mainstream in a big way for the past few years. Are you a Comic-Con beginner? Get the rules right! Starting today, there will be panels called "I Can't Write, I Can't Draw, But I Love Comics!" and "Indie Comics Marketing 101" taking place alongside "USA Network's 'Psych' " and "Aloha, Earth!" a panel about CBS' upcoming remake of "Hawaii Five-0." But this recent spate of panels about movie and TV properties with no sci-fi or comic book elements has some fans fuming. "The mainstream TV and film representation at Comic-Con has outstripped the original essence of the convention," said iReporter Brad Powers who attended Comic-Con the past two years, mainly for the panels on "Lost." He believed that this "mainstreaming" of the convention has played a hand in it being filled to capacity. Going to San Diego Comic-Con? For one panel, Powers said he arrived a full 45 minutes before it was set to begin. "Thirty minutes passed and I thought for sure I was getting closer to the front door. Finally, 10 minutes after the start, some nice Con attendee started walking back through the line to let everyone know that they were full up and no one else would be getting in." Powers is not attending the convention this year since "Lost" has ended. One iReporter, Michelle Cruz, has not attended in years. "I don't even try anymore," she said." "They should have a Commercialism-Con and bring back Comic-Con the way it used to be." "It's too crowded, too commercial, and, oh yea, it sells out before you even know who's gonna be there," she said. "I'd be really disappointed spending my money and seeing that I was gonna go to a panel discussion about [the Fox series] 'Glee.' " The fact that "Glee" attended last year (and is returning this year) certainly seemed to fuel anger among some Comic-Con devotees. "It used to be the coolest thing in the world and now it's just a reason not to drive downtown for a few days." Erin O'Donoghue, a hardcore fan of "Chuck," is one of those who disagrees. "I am not one of those who believe it should remain solely in the realm of comics and their related forms of media," she said. "I personally really enjoy the fact that SDCC has become a celebration of popular culture and every entertainment medium." Even those who have covered the convention have differing opinions about it. Vic Holtreman of Screenrant.com doesn't think that non-genre panels are a very good idea. "I guess the studios figure, well, as long as we're here we might as well throw this movie out there. ... Maybe we give this a little boost," he said. "Last year, at one of the studio panels, they had the Mike Judge film, 'Extract,' and people saw the footage. Then the movie completely tanked." Fanbolt.com's Emma Loggins, on the other hand, sees a place for these kinds of panels at Comic-Con. "I think that what it's really about is anything that has a strong fan base. It also brings in other people to the convention that otherwise wouldn't be interested," she said. Quite a few people she has talked to, however, disagree with her. She even recalls a quite vocal reaction last year to a movie that, on the surface, would seem like a good fit. "Con-goers were protesting at the Summit Entertainment booth saying ... with signs, 'Twilight Ruined Comic Con,' because fans couldn't get into see panels earlier in the day because of the thousands of 'Twilight' fans that descended on Hall H." This year, those fans can breathe a sigh of relief, since the cast of "The Twilight Saga" won't be there -- this year. But there are plenty of other panels, such as one for Will Ferrell's cop comedy "The Other Guys," which will ensure the debate continues.
New on Kickstarter is the Mono desktop 3D printer with a touchscreen interface. Looks interesting, but the launch has had teething troubles. Recently, ALL3DP ran an article about things to be aware of when backing crowdfunding campaigns. Supporting innovation is important, but so is protecting consumers from shoddy products. The past week has seen the launch of three 3D printing campaigns on Kickstarter; DIY Headphones Kit, CubeForme, and now the Mono Desktop 3D Printer from Mono Industries. We’re curious about the Mono because of its compact design and very attractive price-point — starting at 325 euro — and the inclusion of a touchscreen interface. The Mono Desktop 3D printer provides a build volume of 16 x 16 x 16 cm, and the 7 inch touchscreen offers access to all the essential functions, as well as providing visuals during the printing process. The 3D printer also supports cloud services like Google Drive and Dropbox, and is able to visualise GCode and simulate a 3D print before any filament has been extruded. It sounds amazing, right? But…. alarm bells are ringing. Digging Deeper into Mono Industries For starters, the website listed on the campaign page has been down for the past 72 hours. Not a good way to inspire confidence among potential backers. And while the domain is registered to an address in Poland, the stated location of the project is London, UK. We’ve also seen separate reports (here and here) that the company is actually a startup operating out of Berlin, Germany. What’s going on here? With some confusion about who exactly the Mono team is, we contacted Filip Finke, CEO of Mono Industries Europe, for a few words on the company and story. He explained that he has three business partners: Piotr Bojanowski, CTO; Pawel Moregiel, Sales Manager; and Daria Siwek, law student and management graduate. The team started on a university campus but is now spread around the UK, Germany and Poland. They hope this will help optimize their market and assist in creating a powerful supply chain. Something else is puzzling us, however. The crowdfunding campaign for the Mono Desktop 3D Printer is not confined to Kickstarter. An identical campaign, operated by the same team, was launched on Indiegogo nearly two weeks ago (and has raised scarcely any funding). There are no specific rules against running two crowdfunding campaigns simultaneously, but it’s widely considered bad practice. From the backer perspective, the limited exclusivity of the tiered rewards evaporates if, say, a tier is “all gone!” on one platform but still available on another. The Super Early Bird reward for a Mono on Kickstarter — where you can snap one up for 325 euro — is no longer available, but the equivalent award is still freely available on Indiegogo for 280 pounds. Another reason not to do it, from the entrepreneur perspective, is that you’re diluting the impact of your campaign by appealing to backers on separate platforms. Potential revenue streams are split, which is especially dangerous with Kickstarter; a product only becomes available to backers when it’s been fully funded. Last, but not least, running a crowdfunding campaign is extremely hard work. Why divide your focus across two platforms, when it’s difficult enough to manage one to a satisfactory level? Should you back the Mono Desktop 3D Printer? Sifting though the shaky foundations of this campaign, it seems the problems are less about fraudulent intentions than a series of unfortunate choices. The team have been unlucky with their website, perhaps, but created unnecessary confusion by not being clear about their base of operations. Worse is hopping between different crowdfunding platforms, making their backer rewards worthless in the process. Which is a shame, because the Mono Desktop 3D printer actually looks very promising, and has (according to both campaign pages) benefited from the European Social Fund in its development cycle. The lesson here is that clarity and planning is of the utmost importance in a crowdfunding project. You can still give Mono Industries the benefit of the doubt and bless them with your backing. But if do you, make sure you go in knowing all the facts. Questions or comments on this story? Let us know!
(Newser) – The Texas plumber whose truck infamously ended up in the hands of jihdists is suing a car dealership for more than $1 million, the Houston Chronicle reports. At issue: the dealership’s failure to remove a decal bearing the name and phone number of the Texas City man's business, which he says destroyed his livelihood and resulted in ongoing "shock, fear, anxiety, mental anguish, humiliation, and degradation" after a photo of his truck in Syria was widely circulated. In the suit filed last week, Mark Oberholtzer of Mark-1 Plumbing claims that, as he traded in the 2005 Ford F-250 in October 2013, he began to remove the decal, but a salesman said to "let them handle it." The suit traces the pickup's journey from a local auto auction to Mersin, Turkey. At some point, it entered Syria, as evidenced by a photo of the vehicle in Aleppo tweeted last December by an Ansar al-Deen militant. The photo showed the truck, now outfitted with an anti-aircraft weapon, with "plaintiffs' logo and phone number … still on the vehicle door," the suit says. Further, that photo reached millions of people through social media and TV (the final episode of the Colbert Report, for one). On Dec. 17, 2014, the suit says, Oberholtzer’s business and personal phones received more than 1,000 calls from people who variously yelled expletives, sang in Arabic, and made "threats of injury or death." In his suit, Oberholtzer alleges that AutoNation Ford Gulf Freeway admitted to having "never touched the truck"; said removing the decal wasn't its responsibility; and hung up on him. Oberholtzer, per Courthouse News, continues to get phone threats "whenever ISIS commits an atrocity that is reported nationally." (ISIS somehow keeps getting this particular kind of pickup truck.)
with open("../input/day3.txt", 'r') as inputFile: data = inputFile.readlines() def determineCount(slope): pos = (0, 0) treeCount = 0 while pos[1] < len(data): if pos != (0, 0): line = data[pos[1]].rstrip() char = line[pos[0] % len(line)] if char == "#": treeCount += 1 pos = (pos[0] + slope[0], pos[1] + slope[1]) return treeCount # Part 1 print(determineCount((3, 1))) # Part 2 counts = [ determineCount((1, 1)), determineCount((3, 1)), determineCount((5, 1)), determineCount((7, 1)), determineCount((1, 2)) ] part2 = 1 for count in counts: part2 *= count print(part2)
/** * Start any class in the classpath as game * @param args * @throws Exception */ public static void main(String[] args) throws Exception { int n = args[0].lastIndexOf('.'); String pkg = args[0].substring(0, n); String main = args[0].substring(n+1); ClasspathCartridge c = new ClasspathCartridge(args[0], pkg, main); launch(c, false, args); }
Information Transfer Capacity of Articulators in American Sign Language The ability to convey information is a fundamental property of communicative signals. For sign languages, which are overtly produced with multiple, completely visible articulators, the question arises as to how the various channels co-ordinate and interact with each other. We analyze motion capture data of American Sign Language (ASL) narratives, and show that the capacity of information throughput, mathematically defined, is highest on the dominant hand (DH). We further demonstrate that information transfer capacity is also significant for the non-dominant hand (NDH), and the head channel too, as compared to control channels (ankles). We discuss both redundancy and independence in articulator motion in sign language, and argue that the NDH and the head articulators contribute to the overall information transfer capacity, indicating that they are neither completely redundant to, nor completely independent of, the DH.
/** * Associated with a JFrame. * <br> * @author Charles Bentley * */ public class FrameScreenManager implements IStringable { private boolean isFullScreen = false; private int prevX, prevY, prevWidth, prevHeight; private CBentleyFrame f; private boolean isOnlyHeight; private int screenID; /** * The {@link CBentleyFrame} provides the context. * @param frame */ public FrameScreenManager(CBentleyFrame frame) { f = frame; } /** * Used only when asked for fullscreen * @param id */ public void setScreenID(int id) { screenID = id; } public void setOnlyHeight(boolean v) { isOnlyHeight = v; } public boolean isFullScreen() { return isFullScreen; } /** * * @param v */ public void setFullScreen(boolean v) { if (v) { if (!isFullScreen) { setFullScreen(); } } else { if (isFullScreen) { exitFS(); } } } public boolean toggleFullScreen() { if (isFullScreen == false) { setFullScreen(); return true; } else { exitFS(); return false; } } /** * */ protected void exitFS() { f.setVisible(true); f.setBounds(prevX, prevY, prevWidth, prevHeight); f.dispose(); f.setUndecorated(false); f.setVisible(true); isFullScreen = false; } protected void setFullScreen() { saveFrameBounds(); f.dispose(); //Destroys the whole JFrame but keeps organized every Component //Needed if you want to use Undecorated JFrame //dispose() is the reason that this trick doesn't work with videos f.setUndecorated(true); int fsWidth = prevWidth; int fsHeight = prevHeight; int fsX = 0; int fsY = 0; //decide on which screen to fullscreen GraphicsEnvironment ge = GraphicsEnvironment.getLocalGraphicsEnvironment(); GraphicsDevice[] gds = ge.getScreenDevices(); if (screenID >= 0 && screenID < gds.length) { Rectangle bounds = gds[screenID].getDefaultConfiguration().getBounds(); int bx = (int) bounds.getX(); int by = (int) bounds.getY(); if (bx <= prevX && prevX <= bx + bounds.getWidth()) { if (by <= prevY && prevY <= by + bounds.getHeight()) { fsX = bx; fsY = by; fsWidth = (int) bounds.getWidth(); fsHeight = (int) bounds.getHeight(); } } } else { //check relative to x,y position for (int i = 0; i < gds.length; i++) { GraphicsDevice gd = gds[i]; Rectangle bounds = gd.getDefaultConfiguration().getBounds(); int bx = (int) bounds.getX(); int by = (int) bounds.getY(); if (bx <= prevX && prevX <= bx + bounds.getWidth()) { if (by <= prevY && prevY <= by + bounds.getHeight()) { fsX = bx; fsY = by; fsWidth = (int) bounds.getWidth(); fsHeight = (int) bounds.getHeight(); break; } } } } if (isOnlyHeight) { fsWidth = prevWidth; } f.setBounds(fsX, fsY, fsWidth, fsHeight); f.setVisible(true); isFullScreen = true; } public void saveFrameBounds() { prevX = f.getX(); prevY = f.getY(); prevWidth = f.getWidth(); prevHeight = f.getHeight(); } public int getPrevX() { return prevX; } public int getPrevY() { return prevY; } public int getPrevWidth() { return prevWidth; } public int getPrevHeight() { return prevHeight; } public int getScreenID() { return screenID; } /** * Returns the screen on which the Frame is located. * <br> * -1 if none. * @param frame * @return */ public int getScreenIDFromXY() { GraphicsEnvironment ge = GraphicsEnvironment.getLocalGraphicsEnvironment(); GraphicsDevice[] gds = ge.getScreenDevices(); for (int i = 0; i < gds.length; i++) { GraphicsDevice gd = gds[i]; Rectangle bounds = gd.getDefaultConfiguration().getBounds(); int bx = (int) bounds.getX(); int by = (int) bounds.getY(); if (bx <= f.getX() && f.getX() <= bx + bounds.getWidth()) { if (by <= f.getY() && f.getY() <= by + bounds.getHeight()) { return i; } } } return -1; } /** * If frame is not visible, move it to 0,0 * @param frame */ public void verifyAndMove() { int screenID = getScreenIDFromXY(); if (screenID == -1) { f.setLocation(0, 0); } } //#mdebug public String toString() { return Dctx.toString(this); } public void toString(Dctx dc) { dc.root(this, "FrameScreenManager"); dc.appendVarWithSpace("isFullscreen", isFullScreen); dc.appendVarWithSpace("isOnlyHeight", isOnlyHeight); dc.appendVarWithSpace("screenID", screenID); dc.nl(); dc.appendVarWithSpace("prevX", prevX); dc.appendVarWithSpace("prevY", prevY); dc.appendVarWithSpace("prevWidth", prevWidth); dc.appendVarWithSpace("prevHeight", prevHeight); dc.nlLvl(f); } public UCtx toStringGetUCtx() { return f.getSc().getUCtx(); } public String toString1Line() { return Dctx.toString1Line(this); } public void toString1Line(Dctx dc) { dc.root1Line(this, "FrameScreenManager"); dc.appendVarWithSpace("isFullscreen", isFullScreen); dc.appendVarWithSpace("isOnlyHeight", isOnlyHeight); dc.appendVarWithSpace("screenID", screenID); dc.appendVarWithSpace("prevX", prevX); dc.appendVarWithSpace("prevY", prevY); dc.appendVarWithSpace("prevWidth", prevWidth); dc.appendVarWithSpace("prevHeight", prevHeight); } //#enddebug }
package net.osmand.plus; import android.annotation.SuppressLint; import android.graphics.Bitmap; import android.graphics.Matrix; import android.os.AsyncTask; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.core.content.ContextCompat; import net.osmand.CallbackWithObject; import net.osmand.GPXUtilities; import net.osmand.GPXUtilities.GPXFile; import net.osmand.GPXUtilities.GPXTrackAnalysis; import net.osmand.GPXUtilities.Route; import net.osmand.GPXUtilities.Track; import net.osmand.GPXUtilities.TrkSegment; import net.osmand.GPXUtilities.WptPt; import net.osmand.IProgress; import net.osmand.IndexConstants; import net.osmand.PlatformUtil; import net.osmand.data.LatLon; import net.osmand.plus.GPXDatabase.GpxDataItem; import net.osmand.plus.activities.SavingTrackHelper; import net.osmand.plus.helpers.GpxUiHelper; import net.osmand.plus.helpers.GpxUiHelper.GPXDataSetAxisType; import net.osmand.plus.helpers.GpxUiHelper.GPXDataSetType; import net.osmand.plus.helpers.GpxUiHelper.GPXInfo; import net.osmand.plus.helpers.SearchHistoryHelper; import net.osmand.plus.helpers.enums.MetricsConstants; import net.osmand.plus.mapcontextmenu.other.TrackDetailsMenu.ChartPointLayer; import net.osmand.plus.mapmarkers.MapMarkersGroup; import net.osmand.plus.mapmarkers.MapMarkersHelper; import net.osmand.plus.routing.GPXRouteParams.GPXRouteParamsBuilder; import net.osmand.plus.track.GpxSplitType; import net.osmand.util.Algorithms; import org.apache.commons.logging.Log; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import java.io.File; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.StringTokenizer; public class GpxSelectionHelper { private final static Log LOG = PlatformUtil.getLog(GpxSelectionHelper.class); public static final String CURRENT_TRACK = "currentTrack"; private static final String FILE = "file"; private static final String BACKUP = "backup"; private static final String BACKUPMODIFIEDTIME = "backupTime"; private static final String COLOR = "color"; private static final String SELECTED_BY_USER = "selected_by_user"; private static final String HIDDEN_GROUPS = "hidden_groups"; private final OsmandApplication app; private final SavingTrackHelper savingTrackHelper; @NonNull private List<SelectedGpxFile> selectedGPXFiles = new ArrayList<>(); private final Map<GPXFile, Long> selectedGpxFilesBackUp = new HashMap<>(); private SelectGpxTask selectGpxTask; public GpxSelectionHelper(OsmandApplication app, SavingTrackHelper trackHelper) { this.app = app; savingTrackHelper = trackHelper; } public void clearAllGpxFilesToShow(boolean backupSelection) { selectedGpxFilesBackUp.clear(); if (backupSelection) { for (SelectedGpxFile s : selectedGPXFiles) { selectedGpxFilesBackUp.put(s.gpxFile, s.modifiedTime); } } selectedGPXFiles = new ArrayList<>(); saveCurrentSelections(); } public void restoreSelectedGpxFiles() { for (Entry<GPXFile, Long> gpxEntry : selectedGpxFilesBackUp.entrySet()) { if (!Algorithms.isEmpty(gpxEntry.getKey().path)) { File file = new File(gpxEntry.getKey().path); if (file.exists() && !file.isDirectory()) { if (file.lastModified() > gpxEntry.getValue()) { new GpxFileLoaderTask(file, new CallbackWithObject<GPXFile>() { @Override public boolean processResult(GPXFile result) { if (result != null) { selectGpxFile(result, true, false); } return true; } }).execute(); } else { selectGpxFile(gpxEntry.getKey(), true, false); } } } saveCurrentSelections(); } } public static class GpxFileLoaderTask extends AsyncTask<Void, Void, GPXFile> { private final File fileToLoad; private final CallbackWithObject<GPXFile> callback; public GpxFileLoaderTask(File fileToLoad, CallbackWithObject<GPXFile> callback) { this.fileToLoad = fileToLoad; this.callback = callback; } @Override protected GPXFile doInBackground(Void... voids) { return GPXUtilities.loadGPXFile(fileToLoad); } @Override protected void onPostExecute(GPXFile gpxFile) { if (callback != null) { callback.processResult(gpxFile); } } } public boolean isShowingAnyGpxFiles() { return !selectedGPXFiles.isEmpty(); } @NonNull public List<SelectedGpxFile> getSelectedGPXFiles() { return selectedGPXFiles; } public Map<GPXFile, Long> getSelectedGpxFilesBackUp() { return selectedGpxFilesBackUp; } public static boolean isGpxFileSelected(@NonNull OsmandApplication app, @Nullable GPXFile gpxFile) { return gpxFile != null && ((gpxFile.showCurrentTrack && app.getSelectedGpxHelper().getSelectedCurrentRecordingTrack() != null) || (gpxFile.path != null && app.getSelectedGpxHelper().getSelectedFileByPath(gpxFile.path) != null)); } @SuppressLint({"StringFormatInvalid", "StringFormatMatches"}) public String getGpxDescription() { if (selectedGPXFiles.size() == 1) { GPXFile currentGPX = app.getSavingTrackHelper().getCurrentGpx(); if (selectedGPXFiles.get(0).getGpxFile() == currentGPX) { return app.getString(R.string.current_track); } File file = new File(selectedGPXFiles.get(0).getGpxFile().path); return Algorithms.getFileNameWithoutExtension(file).replace('_', ' '); } else if (selectedGPXFiles.size() == 0) { return null; } else { return app.getString(R.string.number_of_gpx_files_selected_pattern, selectedGPXFiles.size()); } } public SelectedGpxFile getSelectedGPXFile(WptPt point) { for (SelectedGpxFile selectedGpxFile : selectedGPXFiles) { GPXFile gpxFile = selectedGpxFile.getGpxFile(); if (gpxFile.containsPoint(point) || gpxFile.containsRoutePoint(point)) { return selectedGpxFile; } } return null; } public static boolean processSplit(OsmandApplication app) { if (app == null || app.isApplicationInitializing()) { return false; } List<GpxDataItem> items = app.getGpxDbHelper().getSplitItems(); for (GpxDataItem dataItem : items) { SelectedGpxFile selectedGpxFile = app.getSelectedGpxHelper().getSelectedFileByPath(dataItem.getFile().getAbsolutePath()); if (selectedGpxFile != null && selectedGpxFile.getGpxFile() != null) { GPXFile gpxFile = selectedGpxFile.getGpxFile(); List<GpxDisplayGroup> groups = app.getSelectedGpxHelper().collectDisplayGroups(gpxFile); GpxSplitType splitType = GpxSplitType.getSplitTypeByTypeId(dataItem.getSplitType()); if (splitType == GpxSplitType.NO_SPLIT) { for (GpxDisplayGroup model : groups) { model.noSplit(app); } } else if (splitType == GpxSplitType.DISTANCE) { for (GpxDisplayGroup model : groups) { model.splitByDistance(app, dataItem.getSplitInterval(), dataItem.isJoinSegments()); } } else if (splitType == GpxSplitType.TIME) { for (GpxDisplayGroup model : groups) { model.splitByTime(app, (int) dataItem.getSplitInterval(), dataItem.isJoinSegments()); } } selectedGpxFile.setDisplayGroups(groups, app); } } return true; } private String getString(int resId, Object... formatArgs) { return app.getString(resId, formatArgs); } public GpxDisplayGroup buildGeneralGpxDisplayGroup(GPXFile g, Track t) { GpxDisplayGroup group = new GpxDisplayGroup(g); String name = getGroupName(g); group.gpxName = name; group.color = t.getColor(g.getColor(0)); group.setType(GpxDisplayItemType.TRACK_SEGMENT); group.setTrack(t); group.setName(getString(R.string.gpx_selection_track, name, "")); String d = ""; if (t.name != null && t.name.length() > 0) { d = t.name + " " + d; } group.setDescription(d); group.setGeneralTrack(true); processGroupTrack(app, group); return group; } public GpxDisplayGroup buildGpxDisplayGroup(GPXFile g, int trackIndex, String name) { Track t = g.tracks.get(trackIndex); GpxDisplayGroup group = new GpxDisplayGroup(g); group.gpxName = name; group.color = t.getColor(g.getColor(0)); group.setType(GpxDisplayItemType.TRACK_SEGMENT); group.setTrack(t); String ks = (trackIndex + 1) + ""; group.setName(getString(R.string.gpx_selection_track, name, g.tracks.size() == 1 ? "" : ks)); String d = ""; if (t.name != null && t.name.length() > 0) { d = t.name + " " + d; } group.setDescription(d); group.setGeneralTrack(t.generalTrack); processGroupTrack(app, group); return group; } public String getGroupName(GPXFile g) { String name = g.path; if (g.showCurrentTrack) { name = getString(R.string.shared_string_currently_recording_track); } else if (Algorithms.isEmpty(name)) { name = getString(R.string.current_route); } else { int i = name.lastIndexOf('/'); if (i >= 0) { name = name.substring(i + 1); } i = name.lastIndexOf('\\'); if (i >= 0) { name = name.substring(i + 1); } if (name.toLowerCase().endsWith(IndexConstants.GPX_FILE_EXT)) { name = name.substring(0, name.length() - 4); } name = name.replace('_', ' '); } return name; } public List<GpxDisplayGroup> collectDisplayGroups(GPXFile g) { List<GpxDisplayGroup> dg = new ArrayList<>(); String name = getGroupName(g); if (g.tracks.size() > 0) { for (int i = 0; i < g.tracks.size(); i++) { GpxDisplayGroup group = buildGpxDisplayGroup(g, i, name); if (group.getModifiableList().size() > 0) { dg.add(group); } } } if (g.routes.size() > 0) { int k = 0; for (Route route : g.routes) { GpxDisplayGroup group = new GpxDisplayGroup(g); group.gpxName = name; group.setType(GpxDisplayItemType.TRACK_ROUTE_POINTS); String d = getString(R.string.gpx_selection_number_of_points, name, route.points.size()); if (route.name != null && route.name.length() > 0) { d = route.name + " " + d; } group.setDescription(d); String ks = (k++) + ""; group.setName(getString(R.string.gpx_selection_route_points, name, g.routes.size() == 1 ? "" : ks)); dg.add(group); List<GpxDisplayItem> list = group.getModifiableList(); int t = 0; for (WptPt r : route.points) { GpxDisplayItem item = new GpxDisplayItem(); item.group = group; item.description = r.desc; item.expanded = true; item.name = r.name; t++; if (Algorithms.isEmpty(item.name)) { item.name = getString(R.string.gpx_selection_point, t + ""); } item.locationStart = r; item.locationEnd = r; list.add(item); } } } if (!g.isPointsEmpty()) { GpxDisplayGroup group = new GpxDisplayGroup(g); group.gpxName = name; group.setType(GpxDisplayItemType.TRACK_POINTS); group.setDescription(getString(R.string.gpx_selection_number_of_points, g.getPointsSize())); group.setName(getString(R.string.gpx_selection_points, name)); dg.add(group); List<GpxDisplayItem> list = group.getModifiableList(); int k = 0; for (WptPt r : g.getPoints()) { GpxDisplayItem item = new GpxDisplayItem(); item.group = group; item.description = r.desc; item.name = r.name; k++; if (Algorithms.isEmpty(item.name)) { item.name = getString(R.string.gpx_selection_point, k + ""); } item.expanded = true; item.locationStart = r; item.locationEnd = r; list.add(item); } } return dg; } private static void processGroupTrack(@NonNull OsmandApplication app, @NonNull GpxDisplayGroup group) { processGroupTrack(app, group, false); } private static void processGroupTrack(@NonNull OsmandApplication app, @NonNull GpxDisplayGroup group, boolean joinSegments) { if (group.track == null) { return; } List<GpxDisplayItem> list = group.getModifiableList(); String timeSpanClr = Algorithms.colorToString(ContextCompat.getColor(app, R.color.gpx_time_span_color)); String speedClr = Algorithms.colorToString(ContextCompat.getColor(app, R.color.gpx_speed)); String ascClr = Algorithms.colorToString(ContextCompat.getColor(app, R.color.gpx_altitude_asc)); String descClr = Algorithms.colorToString(ContextCompat.getColor(app, R.color.gpx_altitude_desc)); String distanceClr = Algorithms.colorToString(ContextCompat.getColor(app, R.color.gpx_distance_color)); final float eleThreshold = 3; for (int segmentIdx = 0; segmentIdx < group.track.segments.size(); segmentIdx++) { TrkSegment segment = group.track.segments.get(segmentIdx); if (segment.points.size() == 0) { continue; } GPXTrackAnalysis[] as; boolean split = true; if (group.splitDistance > 0) { List<GPXTrackAnalysis> trackSegments = segment.splitByDistance(group.splitDistance, joinSegments); as = trackSegments.toArray(new GPXTrackAnalysis[0]); } else if (group.splitTime > 0) { List<GPXTrackAnalysis> trackSegments = segment.splitByTime(group.splitTime, joinSegments); as = trackSegments.toArray(new GPXTrackAnalysis[0]); } else { split = false; as = new GPXTrackAnalysis[] {GPXTrackAnalysis.segment(0, segment)}; } for (GPXTrackAnalysis analysis : as) { GpxDisplayItem item = new GpxDisplayItem(); item.group = group; if (split) { item.splitMetric = analysis.metricEnd; item.secondarySplitMetric = analysis.secondaryMetricEnd; item.splitName = formatSplitName(analysis.metricEnd, group, app); item.splitName += " (" + formatSecondarySplitName(analysis.secondaryMetricEnd, group, app) + ") "; } if (!group.generalTrack && !split) { item.trackSegmentName = buildTrackSegmentName(group.gpx, group.track, segment, app); } item.description = GpxUiHelper.getDescription(app, analysis, true); item.analysis = analysis; String name = ""; if (!group.isSplitDistance()) { name += GpxUiHelper.getColorValue(distanceClr, OsmAndFormatter.getFormattedDistance(analysis.totalDistance, app)); } if ((analysis.timeSpan > 0 || analysis.timeMoving > 0) && !group.isSplitTime()) { long tm = analysis.timeMoving; if (tm == 0) { tm = analysis.timeSpan; } if (name.length() != 0) name += ", "; name += GpxUiHelper.getColorValue(timeSpanClr, Algorithms.formatDuration((int) (tm / 1000), app.accessibilityEnabled())); } if (analysis.isSpeedSpecified()) { if (name.length() != 0) name += ", "; name += GpxUiHelper.getColorValue(speedClr, OsmAndFormatter.getFormattedSpeed(analysis.avgSpeed, app)); } // add min/max elevation data to split track analysis to facilitate easier track/segment identification if (analysis.isElevationSpecified()) { if (name.length() != 0) name += ", "; name += GpxUiHelper.getColorValue(descClr, OsmAndFormatter.getFormattedAlt(analysis.minElevation, app)); name += " - "; name += GpxUiHelper.getColorValue(ascClr, OsmAndFormatter.getFormattedAlt(analysis.maxElevation, app)); } if (analysis.isElevationSpecified() && (analysis.diffElevationUp > eleThreshold || analysis.diffElevationDown > eleThreshold)) { if (name.length() != 0) name += ", "; if (analysis.diffElevationDown > eleThreshold) { name += GpxUiHelper.getColorValue(descClr, " \u2193 " + OsmAndFormatter.getFormattedAlt(analysis.diffElevationDown, app)); } if (analysis.diffElevationUp > eleThreshold) { name += GpxUiHelper.getColorValue(ascClr, " \u2191 " + OsmAndFormatter.getFormattedAlt(analysis.diffElevationUp, app)); } } item.name = name; item.locationStart = analysis.locationStart; item.locationEnd = analysis.locationEnd; list.add(item); } } } private static String formatSecondarySplitName(double metricEnd, GpxDisplayGroup group, OsmandApplication app) { if (group.isSplitDistance()) { return Algorithms.formatDuration((int) metricEnd, app.accessibilityEnabled()); } else { return OsmAndFormatter.getFormattedDistance((float) metricEnd, app); } } @NonNull public static String buildTrackSegmentName(GPXFile gpxFile, Track track, TrkSegment segment, OsmandApplication app) { String trackTitle = getTrackTitle(gpxFile, track, app); String segmentTitle = getSegmentTitle(segment, track.segments.indexOf(segment), app); boolean oneSegmentPerTrack = gpxFile.getNonEmptySegmentsCount() == gpxFile.getNonEmptyTracksCount(); boolean oneOriginalTrack = gpxFile.hasGeneralTrack() && gpxFile.getNonEmptyTracksCount() == 2 || !gpxFile.hasGeneralTrack() && gpxFile.getNonEmptyTracksCount() == 1; if (oneSegmentPerTrack) { return trackTitle; } else if (oneOriginalTrack) { return segmentTitle; } else { return app.getString(R.string.ltr_or_rtl_combine_via_dash, trackTitle, segmentTitle); } } @NonNull private static String getTrackTitle(GPXFile gpxFile, Track track, OsmandApplication app) { String trackName; if (Algorithms.isBlank(track.name)) { int trackIdx = gpxFile.tracks.indexOf(track); int visibleTrackIdx = gpxFile.hasGeneralTrack() ? trackIdx : trackIdx + 1; trackName = String.valueOf(visibleTrackIdx); } else { trackName = track.name; } String trackString = app.getString(R.string.shared_string_gpx_track); return app.getString(R.string.ltr_or_rtl_combine_via_colon, trackString, trackName); } @NonNull private static String getSegmentTitle(TrkSegment segment, int segmentIdx, OsmandApplication app) { String segmentName = Algorithms.isBlank(segment.name) ? String.valueOf(segmentIdx + 1) : segment.name; String segmentString = app.getString(R.string.gpx_selection_segment_title); return app.getString(R.string.ltr_or_rtl_combine_via_colon, segmentString, segmentName); } private static String formatSplitName(double metricEnd, GpxDisplayGroup group, OsmandApplication app) { if (group.isSplitDistance()) { MetricsConstants mc = app.getSettings().METRIC_SYSTEM.get(); if (mc == MetricsConstants.KILOMETERS_AND_METERS) { final double sd = group.getSplitDistance(); int digits = sd < 100 ? 2 : (sd < 1000 ? 1 : 0); int rem1000 = (int) (metricEnd + 0.5) % 1000; if (rem1000 > 1 && digits < 1) { digits = 1; } int rem100 = (int) (metricEnd + 0.5) % 100; if (rem100 > 1 && digits < 2) { digits = 2; } return OsmAndFormatter.getFormattedRoundDistanceKm((float) metricEnd, digits, app); } else { return OsmAndFormatter.getFormattedDistance((float) metricEnd, app); } } else { return Algorithms.formatDuration((int) metricEnd, app.accessibilityEnabled()); } } @Nullable public SelectedGpxFile getSelectedFileByPath(String path) { List<SelectedGpxFile> newList = new ArrayList<>(selectedGPXFiles); for (SelectedGpxFile s : newList) { if (s.getGpxFile().path.equals(path)) { return s; } } return null; } @Nullable public SelectedGpxFile getSelectedFileByName(String path) { for (SelectedGpxFile s : selectedGPXFiles) { if (s.getGpxFile().path.endsWith("/" + path)) { return s; } } return null; } @Nullable public SelectedGpxFile getSelectedCurrentRecordingTrack() { for (SelectedGpxFile s : selectedGPXFiles) { if (s.isShowCurrentTrack()) { return s; } } return null; } @Nullable public WptPt getVisibleWayPointByLatLon(@NonNull LatLon latLon) { for (SelectedGpxFile selectedGpx : selectedGPXFiles) { GPXFile gpx; if (selectedGpx != null && (gpx = selectedGpx.getGpxFile()) != null) { for (WptPt pt : gpx.getPoints()) { if (latLon.equals(new LatLon(pt.getLatitude(), pt.getLongitude()))) { return pt; } } } } return null; } public void setGpxFileToDisplay(GPXFile... gpxs) { // special case for gpx current route for (GPXFile gpx : gpxs) { selectGpxFile(gpx, true, false); } saveCurrentSelections(); } public void loadGPXTracks(IProgress p) { String load = app.getSettings().SELECTED_GPX.get(); if (!Algorithms.isEmpty(load)) { try { JSONArray ar = new JSONArray(load); boolean save = false; for (int i = 0; i < ar.length(); i++) { JSONObject obj = ar.getJSONObject(i); boolean selectedByUser = obj.optBoolean(SELECTED_BY_USER, true); if (obj.has(FILE)) { File fl = new File(obj.getString(FILE)); if (p != null) { p.startTask(getString(R.string.loading_smth, fl.getName()), -1); } GPXFile gpx = GPXUtilities.loadGPXFile(fl); if (obj.has(COLOR)) { int clr = parseColor(obj.getString(COLOR)); gpx.setColor(clr); } if (gpx.error != null) { save = true; } else if (obj.has(BACKUP)) { selectedGpxFilesBackUp.put(gpx, gpx.modifiedTime); } else { save = true; SelectedGpxFile file = selectGpxFile(gpx, true, false, true, selectedByUser, false, false, false); if (obj.has(HIDDEN_GROUPS)) { readHiddenGroups(file, obj.getString(HIDDEN_GROUPS)); } } gpx.addGeneralTrack(); } else if (obj.has(CURRENT_TRACK)) { SelectedGpxFile file = savingTrackHelper.getCurrentTrack(); file.selectedByUser = selectedByUser; List<SelectedGpxFile> newSelectedGPXFiles = new ArrayList<>(selectedGPXFiles); newSelectedGPXFiles.add(file); selectedGPXFiles = newSelectedGPXFiles; } } if (save) { saveCurrentSelections(); } } catch (Exception e) { app.getSettings().SELECTED_GPX.set(""); e.printStackTrace(); } } } private String saveHiddenGroups(SelectedGpxFile selectedGpxFile) { StringBuilder stringBuilder = new StringBuilder(); Iterator<String> it = selectedGpxFile.hiddenGroups.iterator(); while (it.hasNext()) { String name = it.next(); stringBuilder.append(name != null ? name : " "); if (it.hasNext()) { stringBuilder.append(","); } } return stringBuilder.toString(); } public void readHiddenGroups(SelectedGpxFile selectedGpxFile, String text) { StringTokenizer toks = new StringTokenizer(text, ","); Set<String> res = new HashSet<>(); while (toks.hasMoreTokens()) { String token = toks.nextToken(); if (!Algorithms.isBlank(token)) { res.add(token); } else { res.add(null); } } selectedGpxFile.hiddenGroups = res; } private int parseColor(String color) { try { return Algorithms.isEmpty(color) ? 0 : Algorithms.parseColor(color); } catch (IllegalArgumentException e) { return 0; } } private void saveCurrentSelections() { JSONArray ar = new JSONArray(); for (SelectedGpxFile s : selectedGPXFiles) { if (s.gpxFile != null && !s.notShowNavigationDialog) { JSONObject obj = new JSONObject(); try { if (s.isShowCurrentTrack()) { obj.put(CURRENT_TRACK, true); } else if (!Algorithms.isEmpty(s.gpxFile.path)) { obj.put(FILE, s.gpxFile.path); if (s.gpxFile.getColor(0) != 0) { obj.put(COLOR, Algorithms.colorToString(s.gpxFile.getColor(0))); } obj.put(HIDDEN_GROUPS, saveHiddenGroups(s)); } obj.put(SELECTED_BY_USER, s.selectedByUser); } catch (JSONException e) { e.printStackTrace(); } ar.put(obj); } } for (Map.Entry<GPXFile, Long> s : selectedGpxFilesBackUp.entrySet()) { if (s != null) { try { JSONObject obj = new JSONObject(); if (Algorithms.isEmpty(s.getKey().path)) { obj.put(CURRENT_TRACK, true); } else { obj.put(FILE, s.getKey().path); } obj.put(SELECTED_BY_USER, true); obj.put(BACKUP, true); obj.put(BACKUPMODIFIEDTIME, s.getValue()); ar.put(obj); } catch (JSONException e) { e.printStackTrace(); } } } app.getSettings().SELECTED_GPX.set(ar.toString()); } private SelectedGpxFile selectGpxFileImpl(GPXFile gpx, GpxDataItem dataItem, boolean show, boolean notShowNavigationDialog, boolean syncGroup, boolean selectedByUser, boolean addToHistory) { boolean displayed; SelectedGpxFile sf; if (gpx != null && gpx.showCurrentTrack) { sf = savingTrackHelper.getCurrentTrack(); sf.notShowNavigationDialog = notShowNavigationDialog; displayed = selectedGPXFiles.contains(sf); if (!displayed && show) { sf.selectedByUser = selectedByUser; } } else { assert gpx != null; sf = getSelectedFileByPath(gpx.path); displayed = sf != null; if (show) { if (sf == null) { sf = new SelectedGpxFile(); } if (dataItem != null) { sf.setJoinSegments(dataItem.isJoinSegments()); } sf.setGpxFile(gpx, app); sf.notShowNavigationDialog = notShowNavigationDialog; sf.selectedByUser = selectedByUser; } } if (sf != null && sf.isLoaded()) { if (displayed != show) { addRemoveSelected(show, sf); } } if (syncGroup) { syncGpxWithMarkers(gpx); } if (sf != null) { sf.splitProcessed = false; } if (show && selectedByUser && addToHistory) { String relativePath = GpxUiHelper.getGpxFileRelativePath(app, gpx.path); GPXInfo gpxInfo = GpxUiHelper.getGpxInfoByFileName(app, relativePath); if (gpxInfo != null) { SearchHistoryHelper.getInstance(app).addNewItemToHistory(gpxInfo); } } return sf; } private void addRemoveSelected(boolean show, SelectedGpxFile sf) { List<SelectedGpxFile> newSelectedGPXFiles = new ArrayList<>(selectedGPXFiles); if (show) { if (!newSelectedGPXFiles.contains(sf)) { newSelectedGPXFiles.add(sf); } } else { newSelectedGPXFiles.remove(sf); } selectedGPXFiles = newSelectedGPXFiles; } public void updateSelectedGpxFile(SelectedGpxFile selectedGpxFile) { if (selectedGPXFiles.contains(selectedGpxFile)) { saveCurrentSelections(); } } public SelectedGpxFile selectGpxFile(GPXFile gpx, boolean show, boolean notShowNavigationDialog) { return selectGpxFile(gpx, show, notShowNavigationDialog, true, true, true); } public SelectedGpxFile selectGpxFile(GPXFile gpx, GpxDataItem dataItem, boolean show, boolean notShowNavigationDialog, boolean syncGroup, boolean selectedByUser, boolean addToHistory, boolean saveSelection) { SelectedGpxFile sf = selectGpxFileImpl(gpx, dataItem, show, notShowNavigationDialog, syncGroup, selectedByUser, addToHistory); if (saveSelection) { saveCurrentSelections(); } return sf; } public SelectedGpxFile selectGpxFile(GPXFile gpx, boolean show, boolean notShowNavigationDialog, boolean syncGroup, boolean selectedByUser, boolean canAddToMarkers) { return selectGpxFile(gpx, show, notShowNavigationDialog, syncGroup, selectedByUser, canAddToMarkers, true, true); } public SelectedGpxFile selectGpxFile(GPXFile gpx, boolean show, boolean notShowNavigationDialog, boolean syncGroup, boolean selectedByUser, boolean canAddToMarkers, boolean addToHistory, boolean saveSelection) { GpxDataItem dataItem = app.getGpxDbHelper().getItem(new File(gpx.path)); if (canAddToMarkers && show && dataItem != null && dataItem.isShowAsMarkers()) { app.getMapMarkersHelper().addOrEnableGroup(gpx); } return selectGpxFile(gpx, dataItem, show, notShowNavigationDialog, syncGroup, selectedByUser, addToHistory, saveSelection); } public void clearPoints(GPXFile gpxFile) { gpxFile.clearPoints(); syncGpxWithMarkers(gpxFile); } public void addPoint(WptPt point, GPXFile gpxFile) { gpxFile.addPoint(point); syncGpxWithMarkers(gpxFile); } public void addPoints(Collection<? extends WptPt> collection, GPXFile gpxFile) { gpxFile.addPoints(collection); syncGpxWithMarkers(gpxFile); } public boolean removePoint(WptPt point, GPXFile gpxFile) { boolean res = gpxFile.deleteWptPt(point); syncGpxWithMarkers(gpxFile); return res; } private void syncGpxWithMarkers(GPXFile gpxFile) { MapMarkersHelper mapMarkersHelper = app.getMapMarkersHelper(); MapMarkersGroup group = mapMarkersHelper.getMarkersGroup(gpxFile); if (group != null) { mapMarkersHelper.runSynchronization(group); } } public static class SelectedGpxFile { public boolean notShowNavigationDialog = false; public boolean selectedByUser = true; private GPXFile gpxFile; private GPXTrackAnalysis trackAnalysis; private Set<String> hiddenGroups = new HashSet<>(); private List<TrkSegment> processedPointsToDisplay = new ArrayList<>(); private List<GpxDisplayGroup> displayGroups; private int color; private long modifiedTime = -1; private boolean routePoints; private boolean joinSegments; private boolean showCurrentTrack; private boolean splitProcessed = false; public void setGpxFile(GPXFile gpxFile, OsmandApplication app) { this.gpxFile = gpxFile; if (gpxFile.tracks.size() > 0) { this.color = gpxFile.tracks.get(0).getColor(0); } processPoints(app); } public boolean isLoaded() { return gpxFile.modifiedTime != -1; } public GPXTrackAnalysis getTrackAnalysis(OsmandApplication app) { if (modifiedTime != gpxFile.modifiedTime) { update(app); } return trackAnalysis; } private void update(OsmandApplication app) { modifiedTime = gpxFile.modifiedTime; trackAnalysis = gpxFile.getAnalysis( Algorithms.isEmpty(gpxFile.path) ? System.currentTimeMillis() : new File(gpxFile.path).lastModified()); displayGroups = null; splitProcessed = GpxSelectionHelper.processSplit(app); } public void processPoints(OsmandApplication app) { update(app); this.processedPointsToDisplay = gpxFile.proccessPoints(); if (this.processedPointsToDisplay.isEmpty()) { this.processedPointsToDisplay = gpxFile.processRoutePoints(); routePoints = !this.processedPointsToDisplay.isEmpty(); } } public boolean isRoutePoints() { return routePoints; } public List<TrkSegment> getPointsToDisplay() { if (joinSegments) { if (gpxFile != null && gpxFile.getGeneralTrack() != null) { return gpxFile.getGeneralTrack().segments; } else { return Collections.emptyList(); } } return processedPointsToDisplay; } public List<TrkSegment> getModifiablePointsToDisplay() { return processedPointsToDisplay; } public Set<String> getHiddenGroups() { return Collections.unmodifiableSet(hiddenGroups); } public void addHiddenGroups(String group) { hiddenGroups.add(group); } public void removeHiddenGroups(String group) { hiddenGroups.remove(group); } public GPXFile getGpxFile() { return gpxFile; } public GPXFile getModifiableGpxFile() { // call process points after return gpxFile; } public boolean isShowCurrentTrack() { return showCurrentTrack; } public void setShowCurrentTrack(boolean showCurrentTrack) { this.showCurrentTrack = showCurrentTrack; } public boolean isJoinSegments() { return joinSegments; } public void setJoinSegments(boolean joinSegments) { this.joinSegments = joinSegments; } public int getColor() { return color; } public void resetSplitProcessed() { splitProcessed = false; } public List<GpxDisplayGroup> getDisplayGroups(OsmandApplication app) { if (modifiedTime != gpxFile.modifiedTime || !splitProcessed) { update(app); } return displayGroups; } public void setDisplayGroups(List<GpxDisplayGroup> displayGroups, OsmandApplication app) { if (modifiedTime != gpxFile.modifiedTime) { update(app); } this.displayGroups = displayGroups; } public boolean isFollowTrack(OsmandApplication app) { GPXRouteParamsBuilder routeParams = app.getRoutingHelper().getCurrentGPXRoute(); if (routeParams != null) { return gpxFile.path.equals(routeParams.getFile().path); } return false; } } public enum GpxDisplayItemType { TRACK_SEGMENT, TRACK_POINTS, TRACK_ROUTE_POINTS } public static class GpxDisplayGroup { private GpxDisplayItemType type = GpxDisplayItemType.TRACK_SEGMENT; private List<GpxDisplayItem> list = new ArrayList<>(); private final GPXFile gpx; private String gpxName; private String name; private String description; private Track track; private double splitDistance = -1; private int splitTime = -1; private int color; private boolean generalTrack; public GpxDisplayGroup(GPXFile gpx) { this.gpx = gpx; } public void setTrack(Track track) { this.track = track; } public GPXFile getGpx() { return gpx; } public Track getTrack() { return track; } public GpxDisplayGroup cloneInstance() { GpxDisplayGroup group = new GpxDisplayGroup(gpx); group.type = type; group.name = name; group.description = description; group.track = track; group.list = new ArrayList<>(list); return group; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public void setName(String name) { this.name = name; } public String getGpxName() { return gpxName; } public String getName() { return name; } public List<GpxDisplayItem> getModifiableList() { return list; } public GpxDisplayItemType getType() { return type; } public void setType(GpxDisplayItemType type) { this.type = type; } public boolean isSplitDistance() { return splitDistance > 0; } public double getSplitDistance() { return splitDistance; } public boolean isSplitTime() { return splitTime > 0; } public int getSplitTime() { return splitTime; } public String getGroupName() { return name; } public void noSplit(OsmandApplication app) { list.clear(); splitDistance = -1; splitTime = -1; processGroupTrack(app, this); } public void splitByDistance(OsmandApplication app, double meters, boolean joinSegments) { list.clear(); splitDistance = meters; splitTime = -1; processGroupTrack(app, this, joinSegments); } public void splitByTime(OsmandApplication app, int seconds, boolean joinSegments) { list.clear(); splitDistance = -1; splitTime = seconds; processGroupTrack(app, this, joinSegments); } public int getColor() { return color; } public void setColor(int color) { this.color = color; } public boolean isGeneralTrack() { return generalTrack; } public void setGeneralTrack(boolean generalTrack) { this.generalTrack = generalTrack; } } public static class GpxDisplayItem { public GPXTrackAnalysis analysis; public GpxDisplayGroup group; public WptPt locationStart; public WptPt locationEnd; public double splitMetric = -1; public double secondarySplitMetric = -1; public String trackSegmentName; public String splitName; public String name; public String description; public String url; public Bitmap image; public boolean expanded; public boolean wasHidden = true; public WptPt locationOnMap; public GPXDataSetType[] chartTypes; public GPXDataSetAxisType chartAxisType = GPXDataSetAxisType.DISTANCE; public ChartPointLayer chartPointLayer = ChartPointLayer.GPX; public Matrix chartMatrix; public float chartHighlightPos = -1f; public boolean isGeneralTrack() { return group != null && group.isGeneralTrack(); } } public void runSelection(Map<String, Boolean> selectedItems, SelectGpxTaskListener gpxTaskListener) { if (selectGpxTask != null && (selectGpxTask.getStatus() == AsyncTask.Status.RUNNING)) { selectGpxTask.cancel(false); } selectGpxTask = new SelectGpxTask(selectedItems, gpxTaskListener); selectGpxTask.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR); } public interface SelectGpxTaskListener { void gpxSelectionInProgress(); void gpxSelectionStarted(); void gpxSelectionFinished(); } public class SelectGpxTask extends AsyncTask<Void, Void, String> { private final Set<GPXFile> originalSelectedItems = new HashSet<>(); private final Map<String, Boolean> selectedItems; private final SelectGpxTaskListener gpxTaskListener; SelectGpxTask(Map<String, Boolean> selectedItems, SelectGpxTaskListener gpxTaskListener) { this.selectedItems = selectedItems; this.gpxTaskListener = gpxTaskListener; } @Override protected String doInBackground(Void... params) { for (GPXFile gpxFile : originalSelectedItems) { if (isCancelled()) { break; } if (!gpxFile.showCurrentTrack) { gpxFile = GPXUtilities.loadGPXFile(new File(gpxFile.path)); } selectGpxFile(gpxFile, true, false); publishProgress(); } return ""; } @Override protected void onProgressUpdate(Void... values) { gpxTaskListener.gpxSelectionInProgress(); } @Override protected void onPreExecute() { collectSelectedItems(); gpxTaskListener.gpxSelectionStarted(); } private void collectSelectedItems() { for (String filePath : selectedItems.keySet()) { SelectedGpxFile sf; if (!filePath.equals(CURRENT_TRACK)) { sf = getSelectedFileByPath(filePath); if (sf == null) { sf = new SelectedGpxFile(); sf.setGpxFile(new GPXFile(null), app); } sf.getGpxFile().path = filePath; } else { sf = getSelectedCurrentRecordingTrack(); if (sf == null) { sf = savingTrackHelper.getCurrentTrack(); } } boolean visible = false; if (selectedItems.get(filePath) != null) { visible = selectedItems.get(filePath); } if (visible) { if (!sf.isShowCurrentTrack()) { sf.getGpxFile().modifiedTime = -1; } originalSelectedItems.add(sf.getGpxFile()); } addRemoveSelected(visible, sf); } } @Override protected void onPostExecute(String result) { if (gpxTaskListener != null) { gpxTaskListener.gpxSelectionFinished(); } } } }
// SPDX-License-Identifier: GPL-2.0 /* * Timer events oriented CPU idle governor * * Copyright (C) 2018 Intel Corporation * Author: Rafael J. Wysocki <[email protected]> * * The idea of this governor is based on the observation that on many systems * timer events are two or more orders of magnitude more frequent than any * other interrupts, so they are likely to be the most significant source of CPU * wakeups from idle states. Moreover, information about what happened in the * (relatively recent) past can be used to estimate whether or not the deepest * idle state with target residency within the time to the closest timer is * likely to be suitable for the upcoming idle time of the CPU and, if not, then * which of the shallower idle states to choose. * * Of course, non-timer wakeup sources are more important in some use cases and * they can be covered by taking a few most recent idle time intervals of the * CPU into account. However, even in that case it is not necessary to consider * idle duration values greater than the time till the closest timer, as the * patterns that they may belong to produce average values close enough to * the time till the closest timer (sleep length) anyway. * * Thus this governor estimates whether or not the upcoming idle time of the CPU * is likely to be significantly shorter than the sleep length and selects an * idle state for it in accordance with that, as follows: * * - Find an idle state on the basis of the sleep length and state statistics * collected over time: * * o Find the deepest idle state whose target residency is less than or equal * to the sleep length. * * o Select it if it matched both the sleep length and the observed idle * duration in the past more often than it matched the sleep length alone * (i.e. the observed idle duration was significantly shorter than the sleep * length matched by it). * * o Otherwise, select the shallower state with the greatest matched "early" * wakeups metric. * * - If the majority of the most recent idle duration values are below the * target residency of the idle state selected so far, use those values to * compute the new expected idle duration and find an idle state matching it * (which has to be shallower than the one selected so far). */ #include <linux/cpuidle.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/sched/clock.h> #include <linux/tick.h> /* * The PULSE value is added to metrics when they grow and the DECAY_SHIFT value * is used for decreasing metrics on a regular basis. */ #define PULSE 1024 #define DECAY_SHIFT 3 /* * Number of the most recent idle duration values to take into consideration for * the detection of wakeup patterns. */ #define INTERVALS 8 /** * struct teo_idle_state - Idle state data used by the TEO cpuidle governor. * @early_hits: "Early" CPU wakeups "matching" this state. * @hits: "On time" CPU wakeups "matching" this state. * @misses: CPU wakeups "missing" this state. * * A CPU wakeup is "matched" by a given idle state if the idle duration measured * after the wakeup is between the target residency of that state and the target * residency of the next one (or if this is the deepest available idle state, it * "matches" a CPU wakeup when the measured idle duration is at least equal to * its target residency). * * Also, from the TEO governor perspective, a CPU wakeup from idle is "early" if * it occurs significantly earlier than the closest expected timer event (that * is, early enough to match an idle state shallower than the one matching the * time till the closest timer event). Otherwise, the wakeup is "on time", or * it is a "hit". * * A "miss" occurs when the given state doesn't match the wakeup, but it matches * the time till the closest timer event used for idle state selection. */ struct teo_idle_state { unsigned int early_hits; unsigned int hits; unsigned int misses; }; /** * struct teo_cpu - CPU data used by the TEO cpuidle governor. * @time_span_ns: Time between idle state selection and post-wakeup update. * @sleep_length_ns: Time till the closest timer event (at the selection time). * @states: Idle states data corresponding to this CPU. * @interval_idx: Index of the most recent saved idle interval. * @intervals: Saved idle duration values. */ struct teo_cpu { s64 time_span_ns; s64 sleep_length_ns; struct teo_idle_state states[CPUIDLE_STATE_MAX]; int interval_idx; u64 intervals[INTERVALS]; }; static DEFINE_PER_CPU(struct teo_cpu, teo_cpus); /** * teo_update - Update CPU data after wakeup. * @drv: cpuidle driver containing state data. * @dev: Target CPU. */ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); int i, idx_hit = 0, idx_timer = 0; unsigned int hits, misses; u64 measured_ns; if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) { /* * One of the safety nets has triggered or the wakeup was close * enough to the closest timer event expected at the idle state * selection time to be discarded. */ measured_ns = U64_MAX; } else { u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns; /* * The computations below are to determine whether or not the * (saved) time till the next timer event and the measured idle * duration fall into the same "bin", so use last_residency_ns * for that instead of time_span_ns which includes the cpuidle * overhead. */ measured_ns = dev->last_residency_ns; /* * The delay between the wakeup and the first instruction * executed by the CPU is not likely to be worst-case every * time, so take 1/2 of the exit latency as a very rough * approximation of the average of it. */ if (measured_ns >= lat_ns) measured_ns -= lat_ns / 2; else measured_ns /= 2; } /* * Decay the "early hits" metric for all of the states and find the * states matching the sleep length and the measured idle duration. */ for (i = 0; i < drv->state_count; i++) { unsigned int early_hits = cpu_data->states[i].early_hits; cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT; if (drv->states[i].target_residency_ns <= cpu_data->sleep_length_ns) { idx_timer = i; if (drv->states[i].target_residency_ns <= measured_ns) idx_hit = i; } } /* * Update the "hits" and "misses" data for the state matching the sleep * length. If it matches the measured idle duration too, this is a hit, * so increase the "hits" metric for it then. Otherwise, this is a * miss, so increase the "misses" metric for it. In the latter case * also increase the "early hits" metric for the state that actually * matches the measured idle duration. */ hits = cpu_data->states[idx_timer].hits; hits -= hits >> DECAY_SHIFT; misses = cpu_data->states[idx_timer].misses; misses -= misses >> DECAY_SHIFT; if (idx_timer == idx_hit) { hits += PULSE; } else { misses += PULSE; cpu_data->states[idx_hit].early_hits += PULSE; } cpu_data->states[idx_timer].misses = misses; cpu_data->states[idx_timer].hits = hits; /* * Save idle duration values corresponding to non-timer wakeups for * pattern detection. */ cpu_data->intervals[cpu_data->interval_idx++] = measured_ns; if (cpu_data->interval_idx >= INTERVALS) cpu_data->interval_idx = 0; } static bool teo_time_ok(u64 interval_ns) { return !tick_nohz_tick_stopped() || interval_ns >= TICK_NSEC; } /** * teo_find_shallower_state - Find shallower idle state matching given duration. * @drv: cpuidle driver containing state data. * @dev: Target CPU. * @state_idx: Index of the capping idle state. * @duration_ns: Idle duration value to match. */ static int teo_find_shallower_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, int state_idx, s64 duration_ns) { int i; for (i = state_idx - 1; i >= 0; i--) { if (dev->states_usage[i].disable) continue; state_idx = i; if (drv->states[i].target_residency_ns <= duration_ns) break; } return state_idx; } /** * teo_select - Selects the next idle state to enter. * @drv: cpuidle driver containing state data. * @dev: Target CPU. * @stop_tick: Indication on whether or not to stop the scheduler tick. */ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, bool *stop_tick) { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); s64 latency_req = cpuidle_governor_latency_req(dev->cpu); int max_early_idx, prev_max_early_idx, constraint_idx, idx0, idx, i; unsigned int hits, misses, early_hits; ktime_t delta_tick; s64 duration_ns; if (dev->last_state_idx >= 0) { teo_update(drv, dev); dev->last_state_idx = -1; } cpu_data->time_span_ns = local_clock(); duration_ns = tick_nohz_get_sleep_length(&delta_tick); cpu_data->sleep_length_ns = duration_ns; hits = 0; misses = 0; early_hits = 0; max_early_idx = -1; prev_max_early_idx = -1; constraint_idx = drv->state_count; idx = -1; idx0 = idx; for (i = 0; i < drv->state_count; i++) { struct cpuidle_state *s = &drv->states[i]; if (dev->states_usage[i].disable) { /* * Ignore disabled states with target residencies beyond * the anticipated idle duration. */ if (s->target_residency_ns > duration_ns) continue; /* * This state is disabled, so the range of idle duration * values corresponding to it is covered by the current * candidate state, but still the "hits" and "misses" * metrics of the disabled state need to be used to * decide whether or not the state covering the range in * question is good enough. */ hits = cpu_data->states[i].hits; misses = cpu_data->states[i].misses; if (early_hits >= cpu_data->states[i].early_hits || idx < 0) continue; /* * If the current candidate state has been the one with * the maximum "early hits" metric so far, the "early * hits" metric of the disabled state replaces the * current "early hits" count to avoid selecting a * deeper state with lower "early hits" metric. */ if (max_early_idx == idx) { early_hits = cpu_data->states[i].early_hits; continue; } /* * The current candidate state is closer to the disabled * one than the current maximum "early hits" state, so * replace the latter with it, but in case the maximum * "early hits" state index has not been set so far, * check if the current candidate state is not too * shallow for that role. */ if (teo_time_ok(drv->states[idx].target_residency_ns)) { prev_max_early_idx = max_early_idx; early_hits = cpu_data->states[i].early_hits; max_early_idx = idx; } continue; } if (idx < 0) { idx = i; /* first enabled state */ hits = cpu_data->states[i].hits; misses = cpu_data->states[i].misses; idx0 = i; } if (s->target_residency_ns > duration_ns) break; if (s->exit_latency_ns > latency_req && constraint_idx > i) constraint_idx = i; idx = i; hits = cpu_data->states[i].hits; misses = cpu_data->states[i].misses; if (early_hits < cpu_data->states[i].early_hits && teo_time_ok(drv->states[i].target_residency_ns)) { prev_max_early_idx = max_early_idx; early_hits = cpu_data->states[i].early_hits; max_early_idx = i; } } /* * If the "hits" metric of the idle state matching the sleep length is * greater than its "misses" metric, that is the one to use. Otherwise, * it is more likely that one of the shallower states will match the * idle duration observed after wakeup, so take the one with the maximum * "early hits" metric, but if that cannot be determined, just use the * state selected so far. */ if (hits <= misses) { /* * The current candidate state is not suitable, so take the one * whose "early hits" metric is the maximum for the range of * shallower states. */ if (idx == max_early_idx) max_early_idx = prev_max_early_idx; if (max_early_idx >= 0) { idx = max_early_idx; duration_ns = drv->states[idx].target_residency_ns; } } /* * If there is a latency constraint, it may be necessary to use a * shallower idle state than the one selected so far. */ if (constraint_idx < idx) idx = constraint_idx; if (idx < 0) { idx = 0; /* No states enabled. Must use 0. */ } else if (idx > idx0) { unsigned int count = 0; u64 sum = 0; /* * The target residencies of at least two different enabled idle * states are less than or equal to the current expected idle * duration. Try to refine the selection using the most recent * measured idle duration values. * * Count and sum the most recent idle duration values less than * the current expected idle duration value. */ for (i = 0; i < INTERVALS; i++) { u64 val = cpu_data->intervals[i]; if (val >= duration_ns) continue; count++; sum += val; } /* * Give up unless the majority of the most recent idle duration * values are in the interesting range. */ if (count > INTERVALS / 2) { u64 avg_ns = div64_u64(sum, count); /* * Avoid spending too much time in an idle state that * would be too shallow. */ if (teo_time_ok(avg_ns)) { duration_ns = avg_ns; if (drv->states[idx].target_residency_ns > avg_ns) idx = teo_find_shallower_state(drv, dev, idx, avg_ns); } } } /* * Don't stop the tick if the selected state is a polling one or if the * expected idle duration is shorter than the tick period length. */ if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || duration_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) { *stop_tick = false; /* * The tick is not going to be stopped, so if the target * residency of the state to be returned is not within the time * till the closest timer including the tick, try to correct * that. */ if (idx > idx0 && drv->states[idx].target_residency_ns > delta_tick) idx = teo_find_shallower_state(drv, dev, idx, delta_tick); } return idx; } /** * teo_reflect - Note that governor data for the CPU need to be updated. * @dev: Target CPU. * @state: Entered state. */ static void teo_reflect(struct cpuidle_device *dev, int state) { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); dev->last_state_idx = state; /* * If the wakeup was not "natural", but triggered by one of the safety * nets, assume that the CPU might have been idle for the entire sleep * length time. */ if (dev->poll_time_limit || (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) { dev->poll_time_limit = false; cpu_data->time_span_ns = cpu_data->sleep_length_ns; } else { cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns; } } /** * teo_enable_device - Initialize the governor's data for the target CPU. * @drv: cpuidle driver (not used). * @dev: Target CPU. */ static int teo_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); int i; memset(cpu_data, 0, sizeof(*cpu_data)); for (i = 0; i < INTERVALS; i++) cpu_data->intervals[i] = U64_MAX; return 0; } static struct cpuidle_governor teo_governor = { .name = "teo", .rating = 19, .enable = teo_enable_device, .select = teo_select, .reflect = teo_reflect, }; static int __init teo_governor_init(void) { return cpuidle_register_governor(&teo_governor); } postcore_initcall(teo_governor_init);
<gh_stars>0 import React from 'react' interface SpotifyPlayerProps { url: string height: string } const SpotifyPlayer = ({ url, height }: SpotifyPlayerProps) => { return ( <div className='spotify-playlist'> <iframe src={url} title={Date.now().toString()} width='100%' height={height} frameBorder='0' allowTransparency allow='encrypted-media' /> </div> ) } export default SpotifyPlayer
/** * Return a {@link NumericDoubleValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. * * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then <code>missingValue</code> is returned. * * Allowed Modes: SUM, AVG, MIN, MAX * * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ public NumericDoubleValues select(final SortedNumericDoubleValues values, final double missingValue, final BitSet rootDocs, final DocIdSetIterator innerDocs, int maxDoc) throws IOException { if (rootDocs == null || innerDocs == null) { return select(FieldData.emptySortedNumericDoubles(maxDoc), missingValue); } return new NumericDoubleValues() { int lastSeenRootDoc = 0; double lastEmittedValue = missingValue; @Override public double get(int rootDoc) { assert rootDocs.get(rootDoc) : "can only sort root documents"; assert rootDoc >= lastSeenRootDoc : "can only evaluate current and upcoming root docs"; if (rootDoc == lastSeenRootDoc) { return lastEmittedValue; } try { final int prevRootDoc = rootDocs.prevSetBit(rootDoc - 1); final int firstNestedDoc; if (innerDocs.docID() > prevRootDoc) { firstNestedDoc = innerDocs.docID(); } else { firstNestedDoc = innerDocs.advance(prevRootDoc + 1); } lastSeenRootDoc = rootDoc; lastEmittedValue = pick(values, missingValue, innerDocs, firstNestedDoc, rootDoc); return lastEmittedValue; } catch (IOException e) { throw new RuntimeException(e); } } }; }
Effect of Slip and Convective Boundary Conditions on Entropy Generation in a Porous Channel due to Micropolar Fluid Flow Abstract This article presents the effect of convective heating and velocity slip on flow generation of an incompressible micropolar fluid through a porous channel. The flow is induced by a constant axial pressure gradient applied in the flow direction. The non-linear governing equations are linearized using the quasilinearization technique and then solved by Chebyshev spectral collocation method. The numerical values of the velocity, microrotation and temperature are used to derive the corresponding entropy generation number and Bejan number within the porous channel. The influences of pertinent parameters on velocity, microrotation, temperature, entropy generation and Bejan number are discussed through graphs. It is observed that the convective heating tends to increase the entropy generation within the channel.
import { Terminal } from '../src' import readline from 'readline' describe('Terminal', () => { test('Can write to the cli', () => { const action = jest.spyOn(process.stdout, 'write').mockImplementation(() => true) const t = new Terminal() t.write('foo') expect(action).toHaveBeenCalledWith('foo') }) test('Can write to the error cli', () => { const action = jest.spyOn(process.stderr, 'write').mockImplementation(() => true) const t = new Terminal() t.writeError('foo') expect(action).toHaveBeenCalledWith('foo') }) test('Can clear the console contents', () => { const action = jest.spyOn(console, 'clear') const t = new Terminal() t.clear() expect(action).toHaveBeenCalled() }) test('Can clear a single line of contents', () => { const action = jest.spyOn(readline, 'clearLine') const t = new Terminal() t.clearLine() expect(action).toHaveBeenCalledWith(process.stdout, -1) }) test('Can change the position of the cursor', () => { const action = jest.spyOn(readline, 'cursorTo') const t = new Terminal() t.cursorReset() expect(action).toHaveBeenCalledWith(process.stdout, 0) }) test('Can exit the current program', () => { const action = jest.spyOn(process, 'exit').mockImplementation(() => { // tslint:disable-next-line:no-string-throw throw 'EXIT' }) const t = new Terminal() expect(() => t.exit()).toThrow() expect(action).toHaveBeenCalled() }) test('Can exit the current program with an exitcode', () => { const action = jest.spyOn(process, 'exit').mockImplementation(() => { // tslint:disable-next-line:no-string-throw throw 'EXIT' }) const t = new Terminal() expect(() => t.exit(0)).toThrow() expect(action).toHaveBeenCalledWith(0) }) test('Get the enviroment', () => { ;(process.env as any).NODE_ENV = 'foobar' const t = new Terminal() expect(t.mode()).toBe('foobar') }) test('Can promt the user', async () => { jest.spyOn(process.stdout, 'write').mockImplementation(() => true) const action = jest.spyOn(readline, 'createInterface').mockImplementation(() => { return { close: () => null, question: (question: string, result: (answer: string) => void) => { result('y') }, } as any }) const t = new Terminal() expect(await t.question('Foo')).toBe('y') expect(action).toHaveBeenCalled() }) test('Can promt the user hidden', async () => { jest.spyOn(process.stdout, 'write').mockImplementation(() => true) const action = jest.spyOn(readline, 'createInterface').mockImplementation(() => { return { close: () => null, question: (question: string, result: (answer: string) => void) => { result('y') }, } as any }) const t = new Terminal() expect(await t.hiddenQuestion('Foo')).toBe('y') expect(action).toHaveBeenCalled() }) test('Get the size of the cli', () => { ;(process.stdout as any).columns = 123 ;(process.stdout as any).rows = 321 const t = new Terminal() expect(t.width()).toBe(123) expect(t.height()).toBe(321) }) })
module Main where import Lib a :: A a = A b :: B b = B x :: A x = f main :: IO () main = someFunc
<gh_stars>1-10 /** Copyright 2010-2019 Red Anchor Trading Co. Ltd. Distributed under the Boost Software License, Version 1.0. See <http://www.boost.org/LICENSE_1_0.txt> */ #include "fost-cli.hpp" #include <f5/cord/iostream.hpp> #include <fost/log> #include <iostream> namespace { const char *nl_space = "\n "; struct disp { typedef void result_type; std::ostream &channel; template<typename T> void operator()(T const &t) const { channel << nl_space << t << '\n' << std::endl; } void operator()(const fostlib::json::object_p &o) const { if (o->find(fostlib::string()) != o->end()) { fostlib::json copy = *o; fostlib::string m = fostlib::coerce<fostlib::string>(copy[""]); fostlib::jcursor("").del_key(copy); channel << nl_space << m << '\n'; if (copy.size()) channel << copy; channel << std::endl; } else { channel << '\n' << *o << std::endl; } } void operator()(const fostlib::json::array_p &a) const { if (a->size() > 0 && (*a)[0].isatom()) { channel << nl_space << fostlib::coerce<fostlib::string>((*a)[0]) << '\n'; for (std::size_t i(1); i != a->size(); ++i) { channel << (*a)[i] << '\n'; } channel << std::endl; } else { channel << '\n' << *a << std::endl; } } }; class ostream_logger { const std::size_t log_level; const bool colour; std::ostream &channel; public: ostream_logger(const fostlib::json &conf) : log_level(fostlib::coerce<fostlib::nullable<int>>(conf["log-level"]) .value_or(fostlib::log::error_level_tag::level())), colour(fostlib::coerce<fostlib::nullable<bool>>(conf["color"]) .value_or(false)), channel(conf["channel"] == fostlib::json("stderr") ? std::cerr : std::cout) {} bool operator()(const fostlib::log::message &m) { if (colour) { if (m.level() <= fostlib::log::debug_level_tag::level() + 0x100) { channel << "\33[0;37m"; } else if ( m.level() <= fostlib::log::info_level_tag::level() + 0x300) { channel << "\33[0;32m"; } else if ( m.level() <= fostlib::log::warning_level_tag::level() + 0x2000) { channel << "\33[1;33m"; } else if ( m.level() <= fostlib::log::error_level_tag::level() + 0x20000) { channel << "\33[0;31m"; } else { channel << "\33[1;31m"; } } if (m.level() >= log_level) { channel << m.when() << " " << m.name() << " " << m.module(); if (colour) { disp d{channel}; m.body().apply_visitor(d); } else { channel << '\n' << m.body() << std::endl; } } if (colour) { channel << "\33[0;39m"; } return true; } }; const fostlib::log::global_sink<ostream_logger> std_out("stdout"); }
<commit_msg>Allow editing amount field in expensenote <commit_before>from django.contrib import admin from expense.models import ExpenseNote class ExpenseNoteAdmin(admin.ModelAdmin): list_display = ['date', 'save_in_ledger', 'details', 'contact', 'credit_account', 'debit_account', 'amount'] list_filter = ['date', 'save_in_ledger'] readonly_fields = ['amount'] fields = ('date', 'contact', 'number', 'details', 'credit_account', 'debit_account', 'amount', 'save_in_ledger') admin.site.register(ExpenseNote, ExpenseNoteAdmin) <commit_after>from django.contrib import admin from expense.models import ExpenseNote class ExpenseNoteAdmin(admin.ModelAdmin): list_display = ['date', 'save_in_ledger', 'details', 'contact', 'credit_account', 'debit_account', 'amount'] list_filter = ['date', 'save_in_ledger'] fields = ('date', 'contact', 'number', 'details', 'credit_account', 'debit_account', 'amount', 'save_in_ledger') admin.site.register(ExpenseNote, ExpenseNoteAdmin)
def store( self, sitename ): if sitename in self.__db['sitenames']: if not self.__user_select( 'Overwrite existing credentials for "{}"? [Y/n] '.format( sitename ) ): return else: self.__db['sitenames'].append( sitename ) print( 'Please enter credentials for "{}":'.format( sitename ) ) key = '{}_user'.format( sitename ) value = getpass( 'User name:' ) self.__db[key] = self.__crypto.encrypt( value ).decode('utf-8') del value key = '{}_pwd'.format( sitename ) value = getpass( 'Password:' ) self.__db[key] = self.__crypto.encrypt( value ).decode('utf-8') del value with open( self.cache_file_name, 'wb' ) as f: serialized_content = json_dumps( self.__db ) encrypted_content = self.__crypto.encrypt( serialized_content ) f.write( encrypted_content ) self.__file_permissions.restrict_access( self.cache_file_name )
<filename>0118 Pascals Triangle/solution.py #!python3 class Solution(object): def generate(self, numRows): """ :type numRows: int :rtype: List[List[int]] """ res = [] for row in range(0, numRows): cur = [None for _ in range(row+1)] cur[0] = cur[-1] = 1 for j in range(1, len(cur)-1): cur[j] = res[row-1][j-1] + res[row-1][j] res.append(cur) return res if __name__ == "__main__": print(Solution().generate(5))
import { useMemo } from "react"; import useGameContext from "../components/GameContext"; export default function Reset() { const context = useGameContext(); const saveData = useMemo(() => context.saveAs(true), []); function resetGame() { if (confirm("Are you sure you want to reset? This cannot be undone.")) { context.reset(); window.location.reload(); } } return ( <div> <div className="panel options"> <div className="title-bar">Load Failure!</div> <div className="full"> Looks like something went very wrong in loading the game. It's possible that the game version is out of date, or that the save data is corrupt. </div> <hr className="separator" /> <div className="full"> For your records, here is the current contents of the save state. Keep this somewhere before resetting the game: </div> <div className="full center"> <textarea value={saveData} readOnly={true} /> </div> </div> <div className="panel options reset"> <div className="title-bar warning">Game Reset</div> <div className="full"> WARNING: this will completely reset your game and delete all saved progress and settings. This will hard reset the game from beginning. </div> <div className="full right"> <input type="button" value="Hard Reset Everything" onClick={resetGame} /> </div> </div> </div> ); }
<filename>src/frames/file/add_directory_dialog.cpp ///////////////////////////////////////////////////////////////////////////// // Name: add_directory_dialog.cpp // Purpose: // Author: // Modified by: // Created: 05/09/2018 20:13:34 // RCS-ID: // Copyright: // Licence: ///////////////////////////////////////////////////////////////////////////// // Generated by DialogBlocks (unregistered), 05/09/2018 20:13:34 // For compilers that support precompilation, includes "wx/wx.h". ////@begin includes ////@end includes #include "add_directory_dialog.h" ////@begin XPM images ////@end XPM images /* * AddDirectoryDialog type definition */ IMPLEMENT_DYNAMIC_CLASS(AddDirectoryDialog, wxDialog) /* * AddDirectoryDialog event table definition */ BEGIN_EVENT_TABLE(AddDirectoryDialog, wxDialog) ////@begin AddDirectoryDialog event table entries ////@end AddDirectoryDialog event table entries END_EVENT_TABLE() /* * AddDirectoryDialog constructors */ AddDirectoryDialog::AddDirectoryDialog() { Init(); } AddDirectoryDialog::AddDirectoryDialog(wxWindow* parent, wxWindowID id, const wxString& caption, const wxPoint& pos, const wxSize& size, long style) { Init(); Create(parent, id, caption, pos, size, style); } /* * AddDirectoryDialog creator */ bool AddDirectoryDialog::Create(wxWindow* parent, wxWindowID id, const wxString& caption, const wxPoint& pos, const wxSize& size, long style) { ////@begin AddDirectoryDialog creation SetExtraStyle(wxWS_EX_VALIDATE_RECURSIVELY | wxWS_EX_BLOCK_EVENTS); wxDialog::Create(parent, id, caption, pos, size, style); CreateControls(); Centre(); ////@end AddDirectoryDialog creation return true; } /* * AddDirectoryDialog destructor */ AddDirectoryDialog::~AddDirectoryDialog() { ////@begin AddDirectoryDialog destruction ////@end AddDirectoryDialog destruction } /* * Member initialisation */ void AddDirectoryDialog::Init() { ////@begin AddDirectoryDialog member initialisation ////@end AddDirectoryDialog member initialisation } /* * Control creation for AddDirectoryDialog */ void AddDirectoryDialog::CreateControls() { ////@begin AddDirectoryDialog content construction // Generated by DialogBlocks, 05/09/2018 20:13:34 (unregistered) AddDirectoryDialog* itemDialog1 = this; wxBoxSizer* itemBoxSizer2 = new wxBoxSizer(wxVERTICAL); itemDialog1->SetSizer(itemBoxSizer2); wxBoxSizer* itemBoxSizer1 = new wxBoxSizer(wxVERTICAL); itemBoxSizer2->Add(itemBoxSizer1, 1, wxGROW, 5); wxBoxSizer* itemBoxSizer3 = new wxBoxSizer(wxHORIZONTAL); itemBoxSizer1->Add(itemBoxSizer3, 1, wxGROW | wxALL, 5); wxBoxSizer* itemBoxSizer4 = new wxBoxSizer(wxHORIZONTAL); itemBoxSizer3->Add(itemBoxSizer4, 1, wxALIGN_CENTER_VERTICAL | wxALL, 5); wxStaticText* itemStaticText5 = new wxStaticText(itemDialog1, wxID_STATIC, _("New Directory Name"), wxDefaultPosition, wxDefaultSize, 0); itemBoxSizer4->Add(itemStaticText5, 0, wxALIGN_CENTER_VERTICAL | wxALL, 5); directoryNameInput = new wxTextCtrl(itemDialog1, wxID_ANY, wxEmptyString, wxDefaultPosition, wxDefaultSize, 0); directoryNameInput->SetMaxLength(128); itemBoxSizer4->Add(directoryNameInput, 1, wxALIGN_CENTER_VERTICAL | wxALL, 5); wxStdDialogButtonSizer* itemStdDialogButtonSizer7 = new wxStdDialogButtonSizer; itemBoxSizer1->Add(itemStdDialogButtonSizer7, 0, wxALIGN_RIGHT | wxALL, 5); wxButton* itemButton8 = new wxButton(itemDialog1, wxID_OK, _("&OK"), wxDefaultPosition, wxDefaultSize, 0); itemStdDialogButtonSizer7->AddButton(itemButton8); wxButton* itemButton9 = new wxButton(itemDialog1, wxID_CANCEL, _("&Cancel"), wxDefaultPosition, wxDefaultSize, 0); itemStdDialogButtonSizer7->AddButton(itemButton9); itemStdDialogButtonSizer7->Realize(); ////@end AddDirectoryDialog content construction } /* * Should we show tooltips? */ bool AddDirectoryDialog::ShowToolTips() { return true; } /* * Get bitmap resources */ wxBitmap AddDirectoryDialog::GetBitmapResource(const wxString& name) { // Bitmap retrieval ////@begin AddDirectoryDialog bitmap retrieval wxUnusedVar(name); return wxNullBitmap; ////@end AddDirectoryDialog bitmap retrieval } /* * Get icon resources */ wxIcon AddDirectoryDialog::GetIconResource(const wxString& name) { // Icon retrieval ////@begin AddDirectoryDialog icon retrieval wxUnusedVar(name); return wxNullIcon; ////@end AddDirectoryDialog icon retrieval } wxString AddDirectoryDialog::GetUserInput() { return directoryNameInput->GetValue(); }
/* * vec_alignment_plus * GetSymmetricAlignmentLength * * It returns the average between the two possible alignment lengths. */ int vec_alignment_plus::GetSymmetricAlignmentLength( int align_id ) const { int al_pos = all_aligns_ids_[align_id]; int len1 = alignments_[al_pos].a.Pos1( ) - alignments_[al_pos].a.pos1( ); int len2 = alignments_[al_pos].a.Pos2( ) - alignments_[al_pos].a.pos2( ); return ( ( len1 + len2 ) / 2 ); }
<reponame>velocity-9/v9_worker // I'd like the most pedantic warning level #![warn( clippy::cargo, clippy::needless_borrow, clippy::pedantic, clippy::redundant_clone )] // But I don't care about these ones #![allow( clippy::cast_precision_loss, // There is no way to avoid this precision loss clippy::module_name_repetitions, // Sometimes clear naming calls for repetition clippy::multiple_crate_versions // There is no way to easily fix this without modifying our dependencies )] #[macro_use] extern crate failure; #[macro_use] extern crate log; #[macro_use] extern crate serde; mod component; mod docker; mod error; mod fs_utils; mod model; mod named_pipe; mod request_handler; mod server; use std::env; use std::sync::Arc; use std::thread; use std::time::Duration; use crate::request_handler::HttpRequestHandler; const HEARTBEAT_PERIODICITY: Duration = Duration::from_secs(1); fn main() { // TODO: Graceful shutdown on control-c / API call would be good // Initialize logging let log_spec = "debug, hyper=info, mio=info, tokio_reactor=info, tokio_threadpool=info"; flexi_logger::Logger::with_str(log_spec).start().unwrap(); info!("worker starting... (logging initialized)"); // Parse command line arguments let development_mode = env::args().any(|arg| arg == "--development"); if development_mode { info!("running in development mode"); } // Pre-initialize idle container creation lazy_static::initialize(&docker::idle_container_creator::GLOBAL_IDLE_CONTAINER_CREATOR); // Create handler to deal with HTTP requests let http_request_handler = Arc::new(HttpRequestHandler::new()); // Create a heartbeat thread for the ComponentManager // (We want a periodic signal to check on our components, and perhaps shut them down) let heartbeat_handler_ref = http_request_handler.clone(); thread::spawn(move || loop { heartbeat_handler_ref.component_manager().read().heartbeat(); thread::sleep(HEARTBEAT_PERIODICITY); }); // Start up a server to respond to REST requests server::start_server( development_mode, http_request_handler, request_handler::global_request_entrypoint, ); warn!("Sever loop finished, shutting down..."); }
def _init_model(self): if self._random_state is not None: torch.manual_seed(self._random_state) torch.backends.cudnn.deterministic = True if not isinstance(self._model, nn.Module): raise TypeError("`model` must be a `torch.nn.Module`.") self._model = self._model.to(self._device)
<gh_stars>1-10 import {ITaskQueue, TaskItem, ActiveTask, ActiveQueues, ActiveQueue} from './index.d'; export default class Pubus { static throttle = 30; // The default throttle interval 30ms private throttle: number; // the time of waitting every task between, the unit is 'ms' private holdQueue:ITaskQueue<TaskItem> = {}; // This queue is for registered listener private activeQueues:ActiveQueues; // This quesu is for working or will work listenter /** * @constructor * @param throttle the time of waitting every task between, the unit is 'ms' */ constructor(throttle = Pubus.throttle) { this.throttle = throttle; this.activeQueues = {} as ActiveQueues; } /** * Register an event listenter * @param eventName Event name * @param cbFunc callback function * @param tag event tag */ public addListener(eventName: string, cbFunc: Function, tag?: string){ if(!this.holdQueue[eventName]){ this.holdQueue[eventName] = []; } const task:TaskItem = {cb: cbFunc, tag}; this.holdQueue[eventName].push(task); } /** * Registe an event listenter * @param eventName Event name * @param cbFunc Callback function * @param tag event tag */ public on(eventName: string, cbFunc: Function, tag?: string) { this.addListener(eventName, cbFunc, tag); } public emit(eventName: string, ...payload:any[]){ if(this.holdQueue[eventName]){ if(!this.activeQueues[eventName]){ this.activeQueues[eventName] = {running: false, activeTasks:[]} as ActiveQueue; } // Construct active task OBJECTS const holdTasks = [...this.holdQueue[eventName]]; // Check hold tasks is vaild if(holdTasks.length === 0) {return false;} else { holdTasks.map(holdTask => { const activeTask:ActiveTask = { payload, task: holdTask, timestamp: (new Date).getTime() } this.activeQueues[eventName].activeTasks.push(activeTask); }); // Start run event loop this.emitEvent(eventName); } }else { // Slince } } /** * Remove the listener for the event * @param eventName Event name * @param tag The tag for special callback function */ public off(eventName: string, tag?:string) { if(this.holdQueue[eventName]) { if(tag){ this.holdQueue[eventName].forEach((task, index) => { if(task.tag && task.tag === tag) {this.holdQueue[eventName].splice(index,1);} }) } else { delete this.holdQueue[eventName]; } } else { // Silnce } } /** * * @param eventName */ private emitEvent(eventName:string) { if(!this.activeQueues[eventName].running) { this.activeQueues[eventName].running = true; this.runloop(this.activeQueues[eventName], this.throttle); } } /** * * @param activeTasks */ private runloop(activeQueue: ActiveQueue, delay: number) { if(activeQueue.activeTasks.length > 0) { const at:ActiveTask = activeQueue.activeTasks.shift() as ActiveTask; const cb = at.task.cb; const payload = at.payload; cb(...payload); setTimeout(() => {this.runloop(activeQueue, delay)}, delay); }else { activeQueue.running = false; } } }
// Read reads the file specified by the path. func Read(ctx context.Context, path string) ([]byte, error) { if strings.HasPrefix(path, gcsBucketPrefix) { return readGCSFile(ctx, path) } return readLocalFile(path) }
/** * Creates a new acceleration that is linked to the given {@code referenceAcceleration} as follows: * * <pre> * newAcceleration = scale * referenceAcceleration * </pre> * * where the scale is obtained from the given {@code scaleSupplier}. * * @param scaleSupplier the supplier to get the scale. * @param referenceAcceleration the reference acceleration. Not modified. * @return the new acceleration linked to the {@code referenceAcceleration}. */ public static SpatialAccelerationReadOnly newSpatialAccelerationVectorReadOnly(DoubleSupplier scaleSupplier, SpatialAccelerationReadOnly referenceAcceleration) { return new SpatialAccelerationReadOnly() { private final FrameVector3DReadOnly angularPart = EuclidFrameFactories.newLinkedFrameVector3DReadOnly(scaleSupplier, referenceAcceleration.getAngularPart()); private final FrameVector3DReadOnly linearPart = EuclidFrameFactories.newLinkedFrameVector3DReadOnly(scaleSupplier, referenceAcceleration.getLinearPart()); @Override public ReferenceFrame getBodyFrame() { return referenceAcceleration.getBodyFrame(); } @Override public ReferenceFrame getBaseFrame() { return referenceAcceleration.getBaseFrame(); } @Override public ReferenceFrame getReferenceFrame() { return referenceAcceleration.getReferenceFrame(); } @Override public FrameVector3DReadOnly getAngularPart() { return angularPart; } @Override public FrameVector3DReadOnly getLinearPart() { return linearPart; } @Override public boolean equals(Object object) { if (object == this) return true; else if (object instanceof SpatialAccelerationReadOnly) return equals((SpatialAccelerationReadOnly) object); else return false; } @Override public String toString() { return MecanoIOTools.getSpatialAccelerationString(this); } }; }
<gh_stars>0 package graph import ( i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization" ) // AccessReviewNotificationRecipientItem type AccessReviewNotificationRecipientItem struct { // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. additionalData map[string]interface{}; // Determines the recipient of the notification email. notificationRecipientScope *AccessReviewNotificationRecipientScope; // Indicates the type of access review email to be sent. Supported template type is CompletedAdditionalRecipients, which sends review completion notifications to the recipients. notificationTemplateType *string; } // NewAccessReviewNotificationRecipientItem instantiates a new accessReviewNotificationRecipientItem and sets the default values. func NewAccessReviewNotificationRecipientItem()(*AccessReviewNotificationRecipientItem) { m := &AccessReviewNotificationRecipientItem{ } m.SetAdditionalData(make(map[string]interface{})); return m } // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *AccessReviewNotificationRecipientItem) GetAdditionalData()(map[string]interface{}) { if m == nil { return nil } else { return m.additionalData } } // GetNotificationRecipientScope gets the notificationRecipientScope property value. Determines the recipient of the notification email. func (m *AccessReviewNotificationRecipientItem) GetNotificationRecipientScope()(*AccessReviewNotificationRecipientScope) { if m == nil { return nil } else { return m.notificationRecipientScope } } // GetNotificationTemplateType gets the notificationTemplateType property value. Indicates the type of access review email to be sent. Supported template type is CompletedAdditionalRecipients, which sends review completion notifications to the recipients. func (m *AccessReviewNotificationRecipientItem) GetNotificationTemplateType()(*string) { if m == nil { return nil } else { return m.notificationTemplateType } } // GetFieldDeserializers the deserialization information for the current model func (m *AccessReviewNotificationRecipientItem) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) { res := make(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) res["notificationRecipientScope"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewAccessReviewNotificationRecipientScope() }) if err != nil { return err } if val != nil { m.SetNotificationRecipientScope(val.(*AccessReviewNotificationRecipientScope)) } return nil } res["notificationTemplateType"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetStringValue() if err != nil { return err } if val != nil { m.SetNotificationTemplateType(val) } return nil } return res } func (m *AccessReviewNotificationRecipientItem) IsNil()(bool) { return m == nil } // Serialize serializes information the current object func (m *AccessReviewNotificationRecipientItem) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) { { err := writer.WriteObjectValue("notificationRecipientScope", m.GetNotificationRecipientScope()) if err != nil { return err } } { err := writer.WriteStringValue("notificationTemplateType", m.GetNotificationTemplateType()) if err != nil { return err } } { err := writer.WriteAdditionalData(m.GetAdditionalData()) if err != nil { return err } } return nil } // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *AccessReviewNotificationRecipientItem) SetAdditionalData(value map[string]interface{})() { if m != nil { m.additionalData = value } } // SetNotificationRecipientScope sets the notificationRecipientScope property value. Determines the recipient of the notification email. func (m *AccessReviewNotificationRecipientItem) SetNotificationRecipientScope(value *AccessReviewNotificationRecipientScope)() { if m != nil { m.notificationRecipientScope = value } } // SetNotificationTemplateType sets the notificationTemplateType property value. Indicates the type of access review email to be sent. Supported template type is CompletedAdditionalRecipients, which sends review completion notifications to the recipients. func (m *AccessReviewNotificationRecipientItem) SetNotificationTemplateType(value *string)() { if m != nil { m.notificationTemplateType = value } }
<gh_stars>0 package loom import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSyntaxRules(t *testing.T) { // (define-syntax and // (syntax-rules () // ((and) #t) // ((and test) test) // ((and test1 test2 ...) // (if test1 (and test2 ...) #f)))) x, err := ParseString( `(define-syntax and (syntax-rules () ((and) #t) ((and test) test) ((and test1 test2 ...) (if test1 (and test2 ...) #f))))`) require.NoError(t, err) scope := globalScope.push() _ = eval(x, scope, false) rules, ok := scope.syntax["and"] require.True(t, ok) parse := func(s string) *Pair { v, err := ParseString(s) require.NoError(t, err) return v.(*Pair) } v, ok := rules.match(parse("(and)"), globalScope) if assert.True(t, ok) { assert.True(t, equal(v, Boolean(true), map[Value]struct{}{})) } v, ok = rules.match(parse("(and #t)"), globalScope) if assert.True(t, ok) { assert.True(t, equal(v, Boolean(true), map[Value]struct{}{})) } v, ok = rules.match(parse("(and #t #f)"), globalScope) if assert.True(t, ok) { assert.True(t, equal(v, parse("(if #t #f #f)"), map[Value]struct{}{})) assert.Equal(t, "(if #t #f #f)", EncodeToString(v)) } begin, err := ParseString( `(define-syntax begin (syntax-rules () ((begin exp ...) ((lambda () exp ...)))))`) require.NoError(t, err) _ = eval(begin, scope, false) rules, ok = scope.syntax["begin"] require.True(t, ok) cond, err := ParseString( `(define-syntax cond (syntax-rules (else =>) ((cond (else result1 result2 ...)) (begin result1 result2 ...)) ((cond (test => result)) (let ((temp test)) (if temp (result temp)))) ((cond (test => result) clause1 clause2 ...) (let ((temp test)) (if temp (result temp) (cond clause1 clause2 ...)))) ((cond (test)) test) ((cond (test) clause1 clause2 ...) (let ((temp test)) (if temp temp (cond clause1 clause2 ...)))) ((cond (test result1 result2 ...)) (if test (begin result1 result2 ...))) ((cond (test result1 result2 ...) clause1 clause2 ...) (if test (begin result1 result2 ...) (cond clause1 clause2 ...)))))`) require.NoError(t, err) _ = eval(cond, scope, false) rules, ok = scope.syntax["cond"] require.True(t, ok) v, ok = rules.match(parse("(cond ((> 3 2) 'greater) ((< 3 2) 'less))"), globalScope) if assert.True(t, ok) { assert.True(t, equal(v, parse("(if (> 3 2) (begin (quote greater)) (if (< 3 2) (begin (quote less))))"), map[Value]struct{}{})) assert.Equal(t, "(if (> 3 2) (begin (quote greater)) (if (< 3 2) (begin (quote less))))", EncodeToString(v)) } }
<filename>common/primitives/json.go // Copyright 2017 Factom Foundation // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package primitives import ( "encoding/json" "fmt" ) type JSON2Request struct { JSONRPC string `json:"jsonrpc"` ID interface{} `json:"id"` Params interface{} `json:"params,omitempty"` Method string `json:"method,omitempty"` } func (e *JSON2Request) JSONByte() ([]byte, error) { return EncodeJSON(e) } func (e *JSON2Request) JSONString() (string, error) { return EncodeJSONString(e) } func (e *JSON2Request) String() string { str, _ := e.JSONString() return str } func NewJSON2RequestBlank() *JSON2Request { j := new(JSON2Request) j.JSONRPC = "2.0" return j } func NewJSON2Request(method string, id, params interface{}) *JSON2Request { j := new(JSON2Request) j.JSONRPC = "2.0" j.ID = id j.Params = params j.Method = method return j } func ParseJSON2Request(request string) (*JSON2Request, error) { j := new(JSON2Request) err := json.Unmarshal([]byte(request), j) if err != nil { return nil, err } if j.JSONRPC != "2.0" { return nil, fmt.Errorf("Invalid JSON RPC version - `%v`, should be `2.0`", j.JSONRPC) } return j, nil } type JSON2Response struct { JSONRPC string `json:"jsonrpc"` ID interface{} `json:"id"` Error *JSONError `json:"error,omitempty"` Result interface{} `json:"result,omitempty"` } func (e *JSON2Response) JSONByte() ([]byte, error) { return EncodeJSON(e) } func (e *JSON2Response) JSONString() (string, error) { return EncodeJSONString(e) } func (e *JSON2Response) String() string { str, _ := e.JSONString() return str } func NewJSON2Response() *JSON2Response { j := new(JSON2Response) j.JSONRPC = "2.0" return j } func (j *JSON2Response) AddError(code int, message string, data interface{}) { e := NewJSONError(code, message, data) j.Error = e } type JSONError struct { Code int `json:"code"` Message string `json:"message"` Data interface{} `json:"data,omitempty"` } func NewJSONError(code int, message string, data interface{}) *JSONError { j := new(JSONError) j.Code = code j.Message = message j.Data = data return j } func (j *JSONError) Error() string { str, ok := j.Data.(string) if ok == false { return j.Message } return j.Message + ": " + str }
Methyltetrahydrofolic Acid Mediates N- and O-Methylation of Biogenic Amines A variety of mammalian and avian tissues N- and O-methylate indoleamines and phenylethylamines, with methyltetrahydrofolic acid as the methyl donor. Because it is considerably more efficient than S-adenosylmethionine, methyltetrahydrofolic acid may be the natural methyl donor in this reaction. With methyltetrahydrofolic acid, serotonin is O-methylated to 5-methoxytryptamine, a novel indoleamine in mammalian brain.
// See the cfg-if crate. #[allow(unused_macro_rules)] macro_rules! cfg_if { // match if/else chains with a final `else` ($( if #[cfg($($meta:meta),*)] { $($it:item)* } ) else * else { $($it2:item)* }) => { cfg_if! { @__items () ; $( ( ($($meta),*) ($($it)*) ), )* ( () ($($it2)*) ), } }; // match if/else chains lacking a final `else` ( if #[cfg($($i_met:meta),*)] { $($i_it:item)* } $( else if #[cfg($($e_met:meta),*)] { $($e_it:item)* } )* ) => { cfg_if! { @__items () ; ( ($($i_met),*) ($($i_it)*) ), $( ( ($($e_met),*) ($($e_it)*) ), )* ( () () ), } }; // Internal and recursive macro to emit all the items // // Collects all the negated cfgs in a list at the beginning and after the // semicolon is all the remaining items (@__items ($($not:meta,)*) ; ) => {}; (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { // Emit all items within one block, applying an approprate #[cfg]. The // #[cfg] will require all `$m` matchers specified and must also negate // all previous matchers. cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } // Recurse to emit all other items in `$rest`, and when we do so add all // our `$m` matchers to the list of `$not` matchers as future emissions // will have to negate everything we just matched as well. cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } }; // Internal macro to Apply a cfg attribute to a list of items (@__apply $m:meta, $($it:item)*) => { $(#[$m] $it)* }; } // Helper macro for specialization. This also helps avoid parse errors if the // default fn syntax for specialization changes in the future. #[cfg(feature = "nightly")] macro_rules! default_fn { (#[$($a:tt)*] $($tt:tt)*) => { #[$($a)*] default $($tt)* } } #[cfg(not(feature = "nightly"))] macro_rules! default_fn { ($($tt:tt)*) => { $($tt)* } }
<reponame>sheyll/isobmff-builder -- | The payload of AAC DASH streams. module Data.ByteString.Mp4.Dash.Aac ( DashAacMedia (..), BinaryDashAacMedia (..), buildDashAacMedia, dashAacMediaBuilder, module X, ) where import qualified Data.ByteString as BS import Data.ByteString.IsoBaseFileFormat.Box import Data.ByteString.IsoBaseFileFormat.Boxes import Data.ByteString.IsoBaseFileFormat.Brands.Dash as X import Data.ByteString.IsoBaseFileFormat.MediaFile as X import Data.ByteString.IsoBaseFileFormat.ReExports as X import Data.ByteString.IsoBaseFileFormat.Util.BoxFields as X import Data.ByteString.IsoBaseFileFormat.Util.Time as X import qualified Data.ByteString.Lazy as BL -- | Media fragment segment parameters of an aac audio stream mp4 file, as well as a -- list of audio segments consisting of a duration in pcm samples and aac encoded audio. data DashAacMedia = DashAacMedia { dashAacMediaSeqNum :: !Word32, dashAacMediaDecodeTime :: !Word64, dashAacMediaAudioSegments :: ![(Word32, BS.ByteString)] } -- | Binary version of `DashAacMedia`. newtype BinaryDashAacMedia = BinaryDashAacMedia {binaryDashAacMediaData :: BS.ByteString} instance Show BinaryDashAacMedia where show (BinaryDashAacMedia d) = printf "DASH AAC MEDIA - size: %14d" (BS.length d) -- | Convert a 'DashAacMedia' record to a 'BinaryDashAacMedia'. buildDashAacMedia :: DashAacMedia -> BinaryDashAacMedia buildDashAacMedia x = BinaryDashAacMedia { binaryDashAacMediaData = BL.toStrict $ toLazyByteString $ dashAacMediaBuilder x } -- | Convert a 'DashAacMedia record to a binary builder. dashAacMediaBuilder :: DashAacMedia -> Builder dashAacMediaBuilder DashAacMedia {..} = mediaBuilder dash ( styp :. movieFragment ( mfhd :| trackFragment ( tfhd :. tfdt :| trun ) ) :| mdat ) where !styp = segmentTypeBox (SegmentType "msdh" 0 ["msdh", "dash"]) !mfhd = movieFragmentHeader (MovieFragmentHeader (Scalar dashAacMediaSeqNum)) !tfhd = trackFragmentHeader def !tfdt = trackFragBaseMediaDecodeTime (TSv1 dashAacMediaDecodeTime) !trun = trackRunIso5 mdatOffset dashAacMediaAudioSegments where !mdatOffset = movieFragmentStaticSize + movieFragmentHeaderStaticSize + trackFragmentStaticSize + trackFragmentHeaderStaticSize + trackFragBaseMediaDecodeTimeStaticSize64 !mdat = mediaData (MediaData (BS.concat (snd <$> dashAacMediaAudioSegments)))