content
stringlengths
10
4.9M
package com.swust.client; import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.util.concurrent.DefaultThreadFactory; import lombok.Getter; import lombok.Setter; import lombok.experimental.Accessors; import lombok.extern.slf4j.Slf4j; import java.time.LocalDateTime; import java.util.*; import java.util.concurrent.ConcurrentHashMap; /** * 2020/4/20 13:42 */ @Slf4j public class ClientManager { public static NioEventLoopGroup PROXY_WORK = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors() << 2, new DefaultThreadFactory("client-proxy-work")); /** * 锁集合,主要防止代理服务在高并发环境下出现内网代理客户端还未连接上,但是外网数据包已经到了,这时会出现匹配不到对应得内网客户端 */ public static final Map<String, InnerLock> MONITOR_MAP = new ConcurrentHashMap<>(); /** * key 与外网代理服务端连接的channelid * value 连接到内网开启的服务端的客户端channel */ public static final Map<String, ChannelHandlerContext> ID_SERVICE_CHANNEL_MAP = new ConcurrentHashMap<>(); /** * key 为与外网服务端连接的channel * value 内网客户端开启的内网客户端,会连接到指定的内网服务,可以是多个客户端 */ public static final ConcurrentHashMap<Channel, List<IntranetClient>> CHANNEL_MAP = new ConcurrentHashMap<>(); /** * key 开启的内网channel id * value timestamp */ public static final ConcurrentHashMap<String, Long> CHANNEL_TIME_MAP = new ConcurrentHashMap<>(); /** * lock */ private final static Object MONITOR = new Object(); public static void lock(final String channelId) { synchronized (MONITOR) { System.out.println("lock:" + channelId); InnerLock innerLock = MONITOR_MAP.get(channelId); if (Objects.nonNull(innerLock) && innerLock.success) { return; } //2s long waitTime = 2000; CHANNEL_TIME_MAP.put(channelId, System.currentTimeMillis()); while (true) { try { MONITOR.wait(waitTime); InnerLock newLock = MONITOR_MAP.get(channelId); if (Objects.nonNull(newLock) && newLock.success) { break; } if (System.currentTimeMillis() - CHANNEL_TIME_MAP.get(channelId) > 1000 * 60) { System.out.println("channel id :" + channelId + " 的数据超过1min未被处理将被丢弃"); break; } } catch (InterruptedException e) { e.printStackTrace(); } } CHANNEL_TIME_MAP.remove(channelId); } } public static void unlock(final String channelId) { synchronized (MONITOR) { MONITOR_MAP.put(channelId, new InnerLock().setDateTime(LocalDateTime.now()).setSuccess(true)); MONITOR.notifyAll(); } } public static void add2ChannelMap(Channel key, IntranetClient target) { List<IntranetClient> channels = CHANNEL_MAP.get(key); if (Objects.isNull(channels)) { channels = Collections.synchronizedList(new ArrayList<>(16)); } channels.add(target); } /** * 根据channel id,移除channel映射 */ public static void removeChannelMapByProxyClient(Channel channel, String channelId) { List<IntranetClient> intranetClients = CHANNEL_MAP.get(channel); if (Objects.nonNull(intranetClients)) { intranetClients.removeIf(intranetClient -> intranetClient.getChannel().id().asLongText().equals(channelId)); } } /** * 重置容器 */ public static void reset() { } @Accessors(chain = true) @Setter @Getter private static class InnerLock { private boolean success; private boolean dataFast; private LocalDateTime dateTime; } }
/// Adds a new entry to the fill output. pub fn entry(&mut self, entry: &dyn Format<Context>) -> &mut Self { self.result = self.result.and_then(|_| { let mut buffer = VecBuffer::new(self.fmt.state_mut()); write!(buffer, [entry])?; let item = buffer.into_element(); if !item.is_empty() { self.items.push(item); } Ok(()) }); self }
<gh_stars>0 package com.example.tinyhousemonitoring; import android.content.DialogInterface; import android.content.Intent; import android.content.SharedPreferences; import android.os.Bundle; import android.view.MenuItem; import com.example.tinyhousemonitoring.fragments.HealthFragment; import com.example.tinyhousemonitoring.fragments.HomeFragment; import com.example.tinyhousemonitoring.iot_environment.ApplicationEnvironment; import com.google.android.material.bottomnavigation.BottomNavigationView; import be.kuleuven.msec.iot.iotframework.*; import java.util.concurrent.CountDownLatch; import androidx.annotation.NonNull; import androidx.appcompat.app.AppCompatActivity; import androidx.fragment.app.FragmentTransaction; import be.kuleuven.msec.iot.iotframework.callbackinterfaces.OnRequestCompleted; public class MainActivity extends AppCompatActivity { ApplicationEnvironment environment; private BottomNavigationView.OnNavigationItemSelectedListener mOnNavigationItemSelectedListener = new BottomNavigationView.OnNavigationItemSelectedListener() { @Override public boolean onNavigationItemSelected(@NonNull MenuItem item) { switch (item.getItemId()) { case R.id.navigation_home: getSupportFragmentManager().beginTransaction().replace(R.id.frame_layout, new HomeFragment()).commit(); return true; case R.id.navigation_health: getSupportFragmentManager().beginTransaction().replace(R.id.frame_layout, new HealthFragment()).commit(); return true; case R.id.navigation_switch_room: //selectedFragment = SwitchFragment.newInstance(); SharedPreferences values = getSharedPreferences(getString(R.string.app_values), MODE_PRIVATE); SharedPreferences.Editor editor = values.edit(); editor.clear(); editor.commit(); Intent intent = new Intent(getApplicationContext(), SwitchRoomActivity.class); intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP); startActivity(intent); finishAffinity(); return true; } return false; } }; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); loadView(); } private void loadView() { final String roomID = getSharedPreferences(getString(R.string.app_values), MODE_PRIVATE).getString(getString(R.string.room_id), null); System.out.println(roomID); if(roomID!=null){ final CountDownLatch latch = new CountDownLatch(1); environment = new ApplicationEnvironment(this); environment.getConfigurationFromServer(this, roomID+".json" , new OnRequestCompleted<Boolean>() { @Override public void onSuccess(Boolean response) { environment.loadEnvironment(new OnRequestCompleted<Boolean>() { @Override public void onSuccess(Boolean response) { latch.countDown(); } }); } // }); try { latch.await(); } catch (InterruptedException e1) { e1.printStackTrace(); } } else { Intent intent = new Intent(this, SwitchRoomActivity.class); this.startActivity(intent); } setContentView(R.layout.activity_main); BottomNavigationView navigation = (BottomNavigationView) findViewById(R.id.navigation); navigation.setOnNavigationItemSelectedListener(mOnNavigationItemSelectedListener); getSupportActionBar().setTitle("Tiny House Monitoring - " +roomID); getSupportActionBar().setDisplayShowHomeEnabled(true); getSupportActionBar().setLogo(R.drawable.distrinet); getSupportActionBar().setDisplayUseLogoEnabled(true); //Manually displaying the first fragment - one time only FragmentTransaction transaction = getSupportFragmentManager().beginTransaction(); transaction.replace(R.id.frame_layout, new HomeFragment()); transaction.commitAllowingStateLoss(); } }
#!/usr/bin/env python # -*- coding: UTF-8 -*- # --------------------------------------------------------------------------- # ___ __ __ __ ___ # / | \ | \ | \ / Automatic # \__ |__/ |__/ |___| \__ Annotation # \ | | | | \ of # ___/ | | | | ___/ Speech # # # http://www.sppas.org/ # # --------------------------------------------------------------------------- # Laboratoire Parole et Langage, Aix-en-Provence, France # Copyright (C) 2011-2016 <NAME> # # This banner notice must not be removed # --------------------------------------------------------------------------- # Use of this software is governed by the GNU Public License, version 3. # # SPPAS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SPPAS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SPPAS. If not, see <http://www.gnu.org/licenses/>. # # --------------------------------------------------------------------------- # File: basestats.py # --------------------------------------------------------------------------- import wx import codecs # ------------------------------------------------------------------------ def writecsv(filename, rows, separator="\t", encoding="utf-8-sig"): """Write the rows to the file. Args: filename (string): rows (list): separator (string): encoding (string): """ with codecs.open(filename, "w+", encoding) as f: for row in rows: tmp = [] for s in row: if isinstance(s, (float, int)): s = str(s) else: s = '"%s"' % s tmp.append(s) f.write('%s\n' % separator.join(tmp)) # ---------------------------------------------------------------------------- # Base Stat Panel # ---------------------------------------------------------------------------- class BaseStatPanel( wx.Panel ): """ @author: <NAME> @contact: <EMAIL> @license: GPL @summary: Base stat panel. """ def __init__(self, parent, prefsIO, name): wx.Panel.__init__(self, parent) self.preferences = prefsIO self.name = name.lower() self.rowdata = [] self.sizer = wx.BoxSizer(wx.HORIZONTAL) self.SetSizer(self.sizer) self.ShowNothing() self.sizer.FitInside(self) self.SetMinSize((320,200)) # ------------------------------------------------------------------------ def ShowNothing(self): """ Method to show a message in the panel. """ self.sizer.DeleteWindows() self.sizer.Add(wx.StaticText(self, -1, "Nothing to view!"), 1, flag=wx.ALL|wx.EXPAND, border=5) self.SendSizeEvent() # ------------------------------------------------------------------------ def ShowStats(self, tier): """ Base method to show a tier in the panel. """ self.ShowNothing() # ------------------------------------------------------------------------ def SaveAs(self, outfilename="stats.csv"): dlg = wx.FileDialog(self, "Save as", "Save as", outfilename, "UTF-16 (*.csv)|*.csv |UTF-8 (*.csv)|*.csv", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) if dlg.ShowModal() != wx.ID_OK: dlg.Destroy() return path, index = dlg.GetPath(), dlg.GetFilterIndex() dlg.Destroy() encoding = "utf-16" if index == 0 else "utf-8" self.rowdata.insert(0, self.cols) writecsv(path, self.rowdata, separator=";", encoding=encoding) self.rowdata.pop(0) # ------------------------------------------------------------------------ def AppendRow(self, i, row, listctrl): # append the row in the list pos = self.statctrl.InsertStringItem(i, row[0]) for j in range(1,len(row)): s = row[j] if isinstance(s, float): s = str(round(s,4)) elif isinstance(s, int): s = str(s) listctrl.SetStringItem(pos, j, s)
/** * * Note on parallelization: The permutations are split into different tasks. Rotations + point placements is not. * */ public class ParallelBruteForcePackager extends AbstractBruteForcePackager { public static ParallelBruteForcePackagerBuilder newBuilder() { return new ParallelBruteForcePackagerBuilder(); } public static class ParallelBruteForcePackagerBuilder { protected List<Container> containers; protected int checkpointsPerDeadlineCheck = 1; protected int threads = -1; protected int parallelizationCount = -1; protected ExecutorService executorService; public ParallelBruteForcePackagerBuilder withThreads(int threads) { if(threads < 1) { throw new IllegalArgumentException("Unexpected thread count " + threads); } this.threads = threads; return this; } /** * * Number of units to split the work into. This number should by an order of magnitude larger than the threads. * * @param parallelizationCount number of pieces to split the workload into * @return this builder */ public ParallelBruteForcePackagerBuilder withParallelizationCount(int parallelizationCount) { if(parallelizationCount < 1) { throw new IllegalArgumentException("Unexpected parallelization count " + parallelizationCount); } this.parallelizationCount = parallelizationCount; return this; } public ParallelBruteForcePackagerBuilder withContainers(Container ... containers) { if(this.containers == null) { this.containers = new ArrayList<>(); } for (Container container : containers) { this.containers.add(container); } return this; } public ParallelBruteForcePackagerBuilder withContainers(List<Container> containers) { this.containers = containers; return this; } public ParallelBruteForcePackagerBuilder withCheckpointsPerDeadlineCheck(int n) { this.checkpointsPerDeadlineCheck = n; return this; } public ParallelBruteForcePackagerBuilder withExecutorService(ExecutorService executorService) { this.executorService = executorService; return this; } public ParallelBruteForcePackagerBuilder withAvailableProcessors(int factor) { this.threads = Runtime.getRuntime().availableProcessors() / factor; return this; } public ParallelBruteForcePackager build() { if(containers == null) { throw new IllegalStateException("Expected containers"); } if(executorService == null) { if(threads == -1) { threads = Runtime.getRuntime().availableProcessors(); } if(executorService == null) { executorService = Executors.newFixedThreadPool(threads); } if(parallelizationCount == -1) { parallelizationCount = 16 * threads; } } else { if(threads != -1) { throw new IllegalArgumentException("Not expection both thread count and executor service"); } if(parallelizationCount == -1) { // auto detect if(executorService instanceof ThreadPoolExecutor) { ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor)executorService; parallelizationCount = 16 * threadPoolExecutor.getMaximumPoolSize(); } else { throw new IllegalArgumentException("Expected a parallelization count for custom exectutor service"); } } } return new ParallelBruteForcePackager(containers, executorService, parallelizationCount, checkpointsPerDeadlineCheck); } } private final ExecutorCompletionService<BruteForcePackagerResult> executorCompletionService; private final int parallelizationCount; private final ExecutorService executorService; public ParallelBruteForcePackager(List<Container> containers, ExecutorService executorService, int parallelizationCount, int checkpointsPerDeadlineCheck) { super(containers, checkpointsPerDeadlineCheck); this.parallelizationCount = parallelizationCount; this.executorService = executorService; this.executorCompletionService = new ExecutorCompletionService<BruteForcePackagerResult>(executorService); } private class RunnableAdapter implements Callable<BruteForcePackagerResult> { private Container container; private PermutationRotationIterator iterator; private List<StackPlacement> placements; private ExtremePoints3DStack extremePoints3D; private BooleanSupplier interrupt; public RunnableAdapter(int placementsCount, long minStackableItemVolume, long minStackableArea) { this.placements = getPlacements(placementsCount); this.extremePoints3D = new ExtremePoints3DStack(1, 1, 1, placementsCount + 1); } public void setContainer(Container container) { this.container = container; } public void setIterator(PermutationRotationIterator iterator) { this.iterator = iterator; } public void setInterrupt(BooleanSupplier interrupt) { this.interrupt = interrupt; } @Override public BruteForcePackagerResult call() { return ParallelBruteForcePackager.this.pack(extremePoints3D, placements, container, iterator, interrupt); } } private class ParallelAdapter implements Adapter<BruteForcePackagerResult> { private final List<Container> containers; private final ParallelPermutationRotationIterator[] iterators; // per container private final RunnableAdapter[] runnables; // per thread private final BooleanSupplier[] interrupts; protected ParallelAdapter(List<StackableItem> stackables, List<Container> containers, BooleanSupplier interrupt) { this.containers = containers; this.interrupts = new BooleanSupplier[parallelizationCount]; // clone nth interrupts so that everything is not slowed down by sharing a single counter if(interrupt instanceof ClonableBooleanSupplier) { ClonableBooleanSupplier c = (ClonableBooleanSupplier)interrupt; for(int i = 0; i < parallelizationCount; i++) { this.interrupts[i] = (BooleanSupplier) c.clone(); } } else { for(int i = 0; i < parallelizationCount; i++) { this.interrupts[i] = interrupt; } } int count = 0; for (StackableItem stackable : stackables) { count += stackable.getCount(); } long minStackableItemVolume = getMinStackableItemVolume(stackables); long minStackableArea = getMinStackableItemArea(stackables); runnables = new RunnableAdapter[parallelizationCount]; for(int i = 0; i < parallelizationCount; i++) { runnables[i] = new RunnableAdapter(count, minStackableItemVolume, minStackableArea); } iterators = new ParallelPermutationRotationIterator[containers.size()]; for (int i = 0; i < containers.size(); i++) { Container container = containers.get(i); ContainerStackValue[] stackValues = container.getStackValues(); iterators[i] = new ParallelPermutationRotationIterator(new Dimension(stackValues[0].getLoadDx(), stackValues[0].getLoadDy(), stackValues[0].getLoadDz()), stackables, parallelizationCount); } } @Override public BruteForcePackagerResult attempt(int i, BruteForcePackagerResult currentBest) { // run on single thread for a small amount of combinations ParallelPermutationRotationIterator parallelPermutationRotationIterator = iterators[i]; if(parallelPermutationRotationIterator.countPermutations() * parallelPermutationRotationIterator.countRotations() > parallelizationCount * 2) { // somewhat conservative, as the number of rotations is unknown // interrupt needs not be accurate (i.e. atomic boolean) Boolean[] localInterrupt = new Boolean[32]; // add padding to avoid false sharing List<Future<BruteForcePackagerResult>> futures = new ArrayList<>(runnables.length); for (int j = 0; j < runnables.length; j++) { RunnableAdapter runnableAdapter = runnables[j]; runnableAdapter.setContainer(containers.get(i)); runnableAdapter.setIterator(new ParallelPermutationRotationIteratorAdapter(iterators[i], j)); BooleanSupplier interruptBooleanSupplier = interrupts[i]; BooleanSupplier booleanSupplier = () -> localInterrupt[15] != null || interruptBooleanSupplier.getAsBoolean(); runnableAdapter.setInterrupt(booleanSupplier); futures.add(executorCompletionService.submit(runnableAdapter)); } try { BruteForcePackagerResult best = null; for (int j = 0; j < runnables.length; j++) { try { Future<BruteForcePackagerResult> future = executorCompletionService.take(); BruteForcePackagerResult result = future.get(); if(result != null) { if (best == null || result.isBetterThan(best)) { best = result; if (best.containsLastStackable()) { // will not match any better than this // cancel others localInterrupt[15] = Boolean.TRUE; // don't break, so we're waiting for all the remaining threads to finish } } } } catch (InterruptedException e1) { // ignore localInterrupt[15] = Boolean.TRUE; return null; } catch (Exception e) { localInterrupt[15] = Boolean.TRUE; throw new PackagerException(e); } } // was the search interrupted? if(interrupts[i].getAsBoolean()) { return null; } return best; } finally { for(Future<BruteForcePackagerResult> future : futures) { future.cancel(true); } } } // no need to split this job // run with linear approach return ParallelBruteForcePackager.this.pack(runnables[0].extremePoints3D, runnables[0].placements, containers.get(i), parallelPermutationRotationIterator, interrupts[i]); } @Override public Container accept(BruteForcePackagerResult bruteForceResult) { Container container = bruteForceResult.getContainer(); if (!bruteForceResult.containsLastStackable()) { // this result does not consume all placements // remove consumed items from the iterators int size = container.getStack().getSize(); PermutationRotationIterator iterator = bruteForceResult.getPermutationRotationIteratorForState(); int[] permutations = iterator.getPermutations(); List<Integer> p = new ArrayList<>(size); for (int i = 0; i < size; i++) { p.add(permutations[i]); } for (PermutationRotationIterator it : iterators) { if (it == bruteForceResult.getPermutationRotationIteratorForState()) { it.removePermutations(size); } else { it.removePermutations(p); } } for(RunnableAdapter runner : runnables) { runner.placements = runner.placements.subList(size, runner.placements.size()); } } else { for(RunnableAdapter runner : runnables) { runner.placements = Collections.emptyList(); } } return container; } } public void shutdown() { executorService.shutdownNow(); } public ExecutorService getExecutorService() { return executorService; } @Override protected Adapter<BruteForcePackagerResult> adapter(List<StackableItem> boxes, List<Container> containers, BooleanSupplier interrupt) { return new ParallelAdapter(boxes, containers, interrupt); } }
<reponame>cmdallas/aws-app-mesh-controller-for-k8s<filename>test/e2e/framework/resource/virtualrouter/manager.go package virtualrouter import ( "context" appmesh "github.com/aws/aws-app-mesh-controller-for-k8s/apis/appmesh/v1beta2" "github.com/aws/aws-app-mesh-controller-for-k8s/pkg/aws/services" "github.com/aws/aws-app-mesh-controller-for-k8s/pkg/references" "github.com/aws/aws-app-mesh-controller-for-k8s/pkg/virtualrouter" "github.com/aws/aws-app-mesh-controller-for-k8s/test/e2e/framework/k8s" "github.com/aws/aws-app-mesh-controller-for-k8s/test/e2e/framework/utils" "github.com/aws/aws-sdk-go/aws" appmeshsdk "github.com/aws/aws-sdk-go/service/appmesh" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" ) type Manager interface { WaitUntilVirtualRouterActive(ctx context.Context, vr *appmesh.VirtualRouter) (*appmesh.VirtualRouter, error) WaitUntilVirtualRouterDeleted(ctx context.Context, vr *appmesh.VirtualRouter) error CheckVirtualRouterInAWS(ctx context.Context, ms *appmesh.Mesh, vr *appmesh.VirtualRouter) error } func NewManager(k8sClient client.Client, appMeshSDK services.AppMesh) Manager { return &defaultManager{ k8sClient: k8sClient, appMeshSDK: appMeshSDK, } } type defaultManager struct { k8sClient client.Client appMeshSDK services.AppMesh } func (m *defaultManager) WaitUntilVirtualRouterActive(ctx context.Context, vr *appmesh.VirtualRouter) (*appmesh.VirtualRouter, error) { observedVR := &appmesh.VirtualRouter{} return observedVR, wait.PollImmediateUntil(utils.PollIntervalShort, func() (bool, error) { if err := m.k8sClient.Get(ctx, k8s.NamespacedName(vr), observedVR); err != nil { return false, err } for _, condition := range observedVR.Status.Conditions { if condition.Type == appmesh.VirtualRouterActive && condition.Status == corev1.ConditionTrue { return true, nil } } return false, nil }, ctx.Done()) } func (m *defaultManager) WaitUntilVirtualRouterDeleted(ctx context.Context, vr *appmesh.VirtualRouter) error { observedVR := &appmesh.VirtualRouter{} return wait.PollImmediateUntil(utils.PollIntervalShort, func() (bool, error) { if err := m.k8sClient.Get(ctx, k8s.NamespacedName(vr), observedVR); err != nil { if apierrs.IsNotFound(err) { return true, nil } return false, err } return false, nil }, ctx.Done()) } func (m *defaultManager) CheckVirtualRouterInAWS(ctx context.Context, ms *appmesh.Mesh, vr *appmesh.VirtualRouter) error { // TODO: handle aws throttling desiredSDKVRSpec, err := virtualrouter.BuildSDKVirtualRouterSpec(vr) if err != nil { return err } resp, err := m.appMeshSDK.DescribeVirtualRouterWithContext(ctx, &appmeshsdk.DescribeVirtualRouterInput{ MeshName: ms.Spec.AWSName, MeshOwner: ms.Spec.MeshOwner, VirtualRouterName: vr.Spec.AWSName, }) if err != nil { return err } opts := cmpopts.EquateEmpty() if !cmp.Equal(desiredSDKVRSpec, resp.VirtualRouter.Spec, opts) { return errors.New(cmp.Diff(desiredSDKVRSpec, resp.VirtualRouter.Spec, opts)) } vnByKey := make(map[types.NamespacedName]*appmesh.VirtualNode) vnRefs := virtualrouter.ExtractVirtualNodeReferences(vr) for _, vnRef := range vnRefs { vn := &appmesh.VirtualNode{} if err := m.k8sClient.Get(ctx, references.ObjectKeyForVirtualNodeReference(vr, vnRef), vn); err != nil { return err } vnByKey[k8s.NamespacedName(vn)] = vn } for _, route := range vr.Spec.Routes { desiredRouteSpec, err := virtualrouter.BuildSDKRouteSpec(vr, route, vnByKey) if err != nil { return err } resp, err := m.appMeshSDK.DescribeRouteWithContext(ctx, &appmeshsdk.DescribeRouteInput{ MeshName: ms.Spec.AWSName, MeshOwner: ms.Spec.MeshOwner, VirtualRouterName: vr.Spec.AWSName, RouteName: aws.String(route.Name), }) if err != nil { return err } if !cmp.Equal(desiredRouteSpec, resp.Route.Spec, opts) { return errors.New(cmp.Diff(desiredRouteSpec, resp.Route.Spec, opts)) } } return nil }
Attorneys for the state argued that Utah's law promotes the state's interest in "responsible procreation." Seth Anderson, left, and Michael Ferguson were one of the first couples married after a Utah judge declared the state's ban on same-sex marriage unconstitutional on Dec. 20 (Photo11: Kim Raff, AP) Story Highlights Judge says state failed to show that allowing same-sex marriages would affect opposite-sex marriages Salt Lake began issuing marriage licenses to gay couples Friday State filed both a notice of appeal of the ruling and a request for an emergency stay SALT LAKE CITY (AP) — Elisa Noel rushed to the county clerk's office with her partner immediately after learning that a federal judge overturned Utah's ban on gay marriage. They waited in line for a wedding license and were married in an impromptu ceremony punctuated with Noel giving the officiant a high-five. "I can't believe this is Utah," Noel said moments after a ceremony that took place about 3 miles from the headquarters of the Mormon church. Others had a similar reaction after a ruling by U.S. District Judge Robert J. Shelby that declared Utah's voter-approved ban on gay marriage unconstitutional. The recent appointee by President Obama said the ban violates the constitutional rights of gay couples and ruled Utah failed to show that allowing same-sex marriages would affect opposite-sex marriages in any way. The ruling prompted a frenzy of activity by lawyers and gay couples. The Republican governor blasted the ruling as going against the will of the people. Gay couples rushed to the Salt Lake County Clerk's office en masse to secure marriage licenses, waiting in line by the dozens and getting married on the spot by the mayor and ministers. It was a jubilant affair as cheers broke out after ceremonies were completed. A gay bar in Salt Lake quickly made plans for a Friday night party to mark the event. Some made plans to march on the capitol Monday. "I am very disappointed an activist federal judge is attempting to override the will of the people of Utah. I am working with my legal counsel and the acting attorney general to determine the best course to defend traditional marriage within the borders of Utah," Gov. Gary Herbert said. Late Friday, the state filed both a notice of appeal of the ruling and a request for an emergency stay that would stop marriage licenses from being issued to same-sex couples. It's unknown when the judge will make a decision on whether to grant the stay. The ruling has thrust the judge into the national spotlight less than two years after Congress approved his nomination to the federal bench. Shelby was appointed by Obama after GOP Sen. Orrin Hatch recommended him in November 2011. Shelby served in the Utah Army National Guard from 1988 to 1996 and was a combat engineer in Operation Desert Storm. He graduated from the University of Virginia law school in 1998 and clerked for the U.S. District Judge J. Thomas Greene in Utah, then spent about 12 years in private practice before he became a judge. Many similar challenges to same-sex marriage bans are pending in other states, but the Utah case has been closely watched because of the state's history of steadfast opposition to gay marriage as the home of The Church of Jesus Christ of Latter-day Saints. The church said in a statement Friday that it stands by its support for "traditional marriage." "We continue to believe that voters in Utah did the right thing by providing clear direction in the state constitution that marriage should be between a man and a woman, and we are hopeful that this view will be validated by a higher court," the church said. Not all Mormons were disappointed. A group called Mormons for Equality applauded the ruling, saying it was particularly sweet coming in "the heartland of our faith." The group has been among the leaders of growing movement among Mormons to push the church to teach that homosexuality isn't a sin. The Mormon church's stance has softened considerably since it was one of the leading forces behind California's short-lived same-sex-marriage ban, Proposition 8, in 2008. A church website launched this year encourages more compassion toward gays, and church leaders backed the Boy Scouts' recent policy allowing gay youth. The Utah ruling comes the same week New Mexico's highest court legalized gay marriage after declaring it unconstitutional to deny marriage licenses to same-sex couples. A new law passed in Hawaii last month now allows gay couples to marry there. If the ruling stands, Utah would become the 18th state to allow gay marriages, said Jon Davidson, director of Lambda Legal, which pursues litigation on LGBT issues nationwide. That's up from six before the U.S. Supreme Court last summer struck down part of the Defense of Marriage Act that defined marriage as between a man and a woman. The District of Columbia also allows same-sex marriage. Deputy Salt Lake County Clerk Dahnelle Burton-Lee said the district attorney authorized her office to begin issuing the licenses but she couldn't immediately say how many had been issued. But it was clear from the line at the clerk's office that was several dozen. "The momentum we are seeing is unprecedented in any human rights struggle," Davidson said. "To have this fast a change in the law and in public opinion, is quite remarkable." State Sen. Jim Dabakis, chairman of the Utah Democratic Party, was one of the first to get married in Salt Lake City with his longtime partner, Stephen Justesen. "Do you, Jim, take Steven, to be your lawfully wedded spouse?" the mayor asked. But at the Utah County clerk's office in Provo, same sex-couples were still denied marriage licenses. Patsy Carter, 42, and her partner of eight years, 39-year-old Raylynn Marvel, said they went to the office immediately after hearing about the ruling but the clerk said they office was still reviewing the ruling and consulting with the county attorney. Carter said the ruling was still a positive step and she believes Utah County, considered one of Utah's most conservative, will eventually have to start granting the licenses. "If my marriage licenses could say, 'Provo, Utah,' that's probably the most epic contradiction ever," she said. Utah's lawsuit was brought by three gay and lesbian couples, including one that was legally married in Iowa and just wants that license recognized in Utah. During a nearly four-hour hearing on the case earlier this month, attorneys for the state argued that Utah's law promotes the state's interest in "responsible procreation" and the "optimal mode of child-rearing." They also asserted it's not the courts' role to determine how a state defines marriage, and that the U.S. Supreme Court's ruling doesn't give same-sex couples the universal right to marry. In the ruling, Shelby wrote that the right to marry is a fundamental right protected by the U.S. Constitution. "These rights would be meaningless if the Constitution did not also prevent the government from interfering with the intensely personal choices an individual makes when that person decides to make a solemn commitment to another human being," Shelby wrote. Copyright 2013 The Associated Press. All rights reserved. This material may not be published, broadcast, rewritten or redistributed. Read or Share this story: http://usat.ly/1bWzWbp
<filename>src/hooks/useTeoTablesForceUpdate.ts import { useState, useEffect } from 'react'; //** Возвращает функцию, которая устанавливает года, для которых нужно обновить данные */ export default (refetch: () => void, currentYearState: number) => { const [yearsToUpdate, setYearsToUpdate] = useState<Set<number>>(); // console.log(yearsToUpdate); useEffect(() => { if (yearsToUpdate?.has(currentYearState)) { refetch(); const years = new Set(yearsToUpdate); years.delete(currentYearState); setYearsToUpdate(years); } }, [currentYearState, yearsToUpdate]); const _setYearsToUpdate = (years: number[]) => { const newYears = new Set(yearsToUpdate?.size ? yearsToUpdate : []); years.forEach((y) => newYears.add(y)); setYearsToUpdate(newYears); }; return _setYearsToUpdate; };
<gh_stars>100-1000 package com.medusa.gruul.platform.model.dto; import io.swagger.annotations.ApiModelProperty; import lombok.Data; import org.hibernate.validator.constraints.Range; import javax.validation.constraints.NotNull; /** * @author whh * @description * @data: 2020/8/8 */ @Data public class ShopPackageOrderDto { @ApiModelProperty(value = "操作类型 1-订购 2-续费 3-升级") @NotNull(message = "操作类型为空", groups = {User.class, Admin.class}) @Range(min = 1, max = 3, message = "操作类型错误", groups = {User.class, Admin.class}) private Integer optionType; @ApiModelProperty(value = "支付方式 1:余额支付2:微信3:支付宝4:汇款支付 5:系统支付 ") @NotNull(message = "支付方式为空", groups = User.class) @Range(min = 1, max = 4, message = "支付方式错误", groups = User.class) private Integer payType; @ApiModelProperty(value = "汇款支付时填写支付方信息json") private String payInfo; @ApiModelProperty(value = "购买周期,单位为天数") private Integer buyPeriod; @ApiModelProperty(value = "指定套餐Id") @NotNull(message = "套餐未选择", groups = {User.class, Admin.class}) private Long packageId; @ApiModelProperty(value = "店铺id") @NotNull(message = "店铺id不能为空", groups = {User.class, Admin.class}) private Long shopId; @ApiModelProperty(value = "是否自动续费 0-不自动 1-自动") private Integer autoDeduct; @ApiModelProperty(value = "是否同意协议 1-同意") @Range(min = 1, max = 1, message = "协议未同意", groups = {User.class}) @NotNull(message = "协议未同意", groups = {User.class}) private Integer agreeProtocol; /** * 订单来源 0-用户购买 1-管理台购买(店铺列表,购买,续费) 2-平台赠送(用户创建店铺时自动赠送) 3-平台创建(为指定商户直接创建指定套餐店铺) 4-代理付费 */ @ApiModelProperty(hidden = true) private Integer orderSource; public interface User { } public interface Admin { } }
<filename>internal/testutils/repositoryTestUtils.go package testutils import ( "ethereum-service/model" "log" "math/big" "time" "github.com/CHainGate/backend/pkg/enum" "github.com/DATA-DOG/go-sqlmock" "github.com/google/uuid" "gorm.io/driver/postgres" "gorm.io/gorm" ) var ( chaingateAcc *model.Account merchantAcc *model.Account emptyPayment *model.Payment waitingPayment *model.Payment partiallyPayment *model.Payment paidPayment *model.Payment ) func createEmptyPayment(acc model.Account, mAcc model.Account) *model.Payment { return &model.Payment{ Account: &acc, Mode: "Test", Base: model.Base{ID: uuid.New()}, AccountID: acc.ID, PriceAmount: 100, PriceCurrency: "USD", UserWallet: mAcc.Address, } } func CreatePaymentState(paymentID uuid.UUID, accountID uuid.UUID, state enum.State, amountReceived *big.Int) model.PaymentState { return model.PaymentState{ Base: model.Base{ID: uuid.New()}, StatusName: state.String(), AccountID: paymentID, AmountReceived: model.NewBigInt(amountReceived), PayAmount: model.NewBigIntFromInt(100000000000000), PaymentID: accountID, } } func addWaitingPaymentState(payment model.Payment) *model.Payment { state := CreatePaymentState(payment.ID, payment.AccountID, enum.StateWaiting, big.NewInt(0)) payment.CurrentPaymentStateId = &state.ID payment.CurrentPaymentState = state payment.PaymentStates = append(payment.PaymentStates, state) return &payment } func addPartiallyPaidPaymentState(payment model.Payment) *model.Payment { state := CreatePaymentState(payment.ID, payment.AccountID, enum.StatePartiallyPaid, big.NewInt(10)) payment.CurrentPaymentStateId = &state.ID payment.CurrentPaymentState = state payment.PaymentStates = append(payment.PaymentStates, state) return &payment } func addPaidPaymentState(payment model.Payment) *model.Payment { state := CreatePaymentState(payment.ID, payment.AccountID, enum.StatePaid, big.NewInt(100000000000000)) payment.CurrentPaymentStateId = &state.ID payment.CurrentPaymentState = state payment.PaymentStates = append(payment.PaymentStates, state) return &payment } func GetChaingateAcc() model.Account { if chaingateAcc == nil { chaingateAcc = model.CreateAccount() chaingateAcc.ID = uuid.New() } return *chaingateAcc } func GetMerchantAcc() model.Account { if merchantAcc == nil { merchantAcc = model.CreateAccount() merchantAcc.ID = uuid.New() } return *merchantAcc } func GetEmptyPayment() model.Payment { if emptyPayment == nil { emptyPayment = createEmptyPayment(GetChaingateAcc(), GetMerchantAcc()) } return *emptyPayment } func GetWaitingPayment() model.Payment { if waitingPayment == nil { waitingPayment = addWaitingPaymentState(GetEmptyPayment()) } return *waitingPayment } func GetPartiallyPayment() model.Payment { if partiallyPayment == nil { partiallyPayment = addPartiallyPaidPaymentState(GetWaitingPayment()) } return *partiallyPayment } func GetPaidPayment() model.Payment { if paidPayment == nil { paidPayment = addPaidPaymentState(GetWaitingPayment()) } return *paidPayment } func getPaymentRow(p model.Payment) *sqlmock.Rows { return sqlmock.NewRows([]string{"id", "user_wallet", "mode", "price_amount", "price_currency", "current_payment_state_id"}). AddRow(p.ID, GetMerchantAcc().Address, "Test", "100", "USD", p.CurrentPaymentStateId) } func getAccountRow(a model.Account) *sqlmock.Rows { return sqlmock.NewRows([]string{"id", "created_at", "updated_at", "deleted_at", "private_key", "address", "nonce", "used", "remainder"}). AddRow(a.ID, time.Now(), time.Now(), time.Now(), a.PrivateKey, a.Address, a.Nonce, true, a.Remainder) } func getFreeAccountRow(a model.Account) *sqlmock.Rows { return sqlmock.NewRows([]string{"id", "created_at", "updated_at", "deleted_at", "private_key", "address", "nonce", "used", "remainder"}). AddRow(a.ID, time.Now(), time.Now(), time.Now(), a.PrivateKey, a.Address, a.Nonce, false, a.Remainder) } func getPaymentStatesRow(a model.Account, p model.Payment) *sqlmock.Rows { return sqlmock.NewRows([]string{"id", "created_at", "updated_at", "deleted_at", "account_id", "pay_amount", "amount_received", "status_name", "payment_id"}). AddRow(p.CurrentPaymentStateId, time.Now(), time.Now(), time.Now(), a.ID, p.CurrentPaymentState.PayAmount, p.CurrentPaymentState.AmountReceived, p.CurrentPaymentState.StatusName, p.ID) } func SetupCreatePayment(mock sqlmock.Sqlmock) sqlmock.Sqlmock { ep := GetEmptyPayment() wp := GetWaitingPayment() ma := GetMerchantAcc() ca := GetChaingateAcc() paymentRows := getPaymentRow(ep) accRows := getAccountRow(ca) stateRows := getPaymentStatesRow(ca, wp) mock.ExpectBegin() mock.ExpectQuery("INSERT INTO \"accounts\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.PrivateKey, ca.Address, ca.Nonce, true, ca.Remainder, ca.ID). WillReturnRows(accRows) mock.ExpectQuery("INSERT INTO \"payment_states\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.ID, wp.CurrentPaymentState.PayAmount, "0", enum.StateWaiting.String(), wp.ID). WillReturnRows(stateRows) mock.ExpectQuery("INSERT INTO \"payments\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.ID, ma.Address, ep.Mode, ep.PriceAmount, ep.PriceCurrency, wp.CurrentPaymentStateId, wp.ID). WillReturnRows(paymentRows) mock.ExpectCommit() return mock } func SetupCreatePaymentWithoutIdCheck(mock sqlmock.Sqlmock) sqlmock.Sqlmock { ep := GetEmptyPayment() wp := GetWaitingPayment() ca := GetChaingateAcc() paymentRows := getPaymentRow(ep) accRows := getAccountRow(ca) stateRows := getPaymentStatesRow(ca, wp) mock.ExpectBegin() mock.ExpectQuery("INSERT INTO \"accounts\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.PrivateKey, ca.Address, ca.Nonce, true, ca.Remainder, ca.ID). WillReturnRows(accRows) mock.ExpectQuery("INSERT INTO \"payment_states\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.ID, wp.CurrentPaymentState.PayAmount, "0", enum.StateWaiting.String(), sqlmock.AnyArg()). WillReturnRows(stateRows) mock.ExpectQuery("INSERT INTO \"payments\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.ID, sqlmock.AnyArg(), ep.Mode, ep.PriceAmount, ep.PriceCurrency, sqlmock.AnyArg(), sqlmock.AnyArg()). WillReturnRows(paymentRows) mock.ExpectCommit() return mock } func SetupAllPaymentIntents(mock sqlmock.Sqlmock) sqlmock.Sqlmock { wp := GetWaitingPayment() ma := GetMerchantAcc() ca := GetChaingateAcc() paymentRows := sqlmock.NewRows([]string{"id", "created_at", "updated_at", "deleted_at", "account_id", "user_wallet", "mode", "price_amount", "price_currency", "current_payment_state_id", "CurrentPaymentState__id", "CurrentPaymentState__created_at", "CurrentPaymentState__updated_at", "CurrentPaymentState__deleted_at", "CurrentPaymentState__account_id", "CurrentPaymentState__pay_amount", "CurrentPaymentState__amount_received", "CurrentPaymentState__status_name", "CurrentPaymentState__payment_id"}). AddRow(wp.ID, time.Now(), time.Now(), time.Now(), ca.ID, ma.Address, wp.Mode, wp.PriceAmount, wp.PriceCurrency, wp.CurrentPaymentStateId, wp.CurrentPaymentStateId, time.Now(), time.Now(), time.Now(), ca.ID, wp.CurrentPaymentState.PayAmount, wp.CurrentPaymentState.AmountReceived, wp.CurrentPaymentState.StatusName, wp.ID) mock.ExpectQuery("SELECT (.+) FROM \"payments\""). WithArgs("waiting", "partially_paid"). WillReturnRows(paymentRows) accRows := getAccountRow(ca) mock.ExpectQuery("SELECT (.+) FROM \"accounts\""). WithArgs(chaingateAcc.ID). WillReturnRows(accRows) stateRows := getPaymentStatesRow(ca, wp) mock.ExpectQuery("SELECT (.+) FROM \"payment_states\""). WithArgs(wp.CurrentPaymentStateId). WillReturnRows(stateRows) return mock } func SetupUpdatePaymentState(mock sqlmock.Sqlmock) sqlmock.Sqlmock { pp := GetPartiallyPayment() wp := GetWaitingPayment() ca := GetChaingateAcc() stateRows := getPaymentStatesRow(ca, pp) accRows := getAccountRow(ca) mock.ExpectBegin() mock.ExpectQuery("INSERT INTO \"accounts\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.PrivateKey, ca.Address, ca.Nonce, true, ca.Remainder, ca.ID). WillReturnRows(accRows) mock.ExpectQuery("INSERT INTO \"payment_states\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.ID, pp.CurrentPaymentState.PayAmount, pp.CurrentPaymentState.AmountReceived, pp.CurrentPaymentState.StatusName, pp.ID). WillReturnRows(stateRows) mock.ExpectExec("UPDATE"). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.ID, pp.UserWallet, pp.Mode, pp.PriceAmount, pp.PriceCurrency, wp.CurrentPaymentStateId, pp.ID). WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit() return mock } func SetupUpdatePaymentStateToPaid(mock sqlmock.Sqlmock) sqlmock.Sqlmock { pp := GetPaidPayment() ca := GetChaingateAcc() stateRows := getPaymentStatesRow(ca, pp) accRows := getAccountRow(ca) mock.ExpectBegin() mock.ExpectQuery("INSERT INTO \"accounts\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.PrivateKey, ca.Address, ca.Nonce, true, ca.Remainder, ca.ID). WillReturnRows(accRows) mock.ExpectQuery("INSERT INTO \"payment_states\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.ID, pp.CurrentPaymentState.PayAmount, pp.CurrentPaymentState.AmountReceived, pp.CurrentPaymentState.StatusName, pp.ID). WillReturnRows(stateRows) mock.ExpectExec("UPDATE"). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.ID, pp.UserWallet, pp.Mode, pp.PriceAmount, pp.PriceCurrency, pp.CurrentPaymentStateId, pp.ID). WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit() return mock } func SetupCreateAccount(mock sqlmock.Sqlmock) sqlmock.Sqlmock { ca := GetChaingateAcc() accRows := getAccountRow(ca) mock.ExpectBegin() mock.ExpectQuery("INSERT INTO \"accounts\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.PrivateKey, ca.Address, ca.Nonce, true, ca.Remainder, ca.ID). WillReturnRows(accRows) mock.ExpectCommit() return mock } func SetupUpdateAccount(mock sqlmock.Sqlmock) sqlmock.Sqlmock { ca := GetChaingateAcc() mock.ExpectBegin() mock.ExpectExec("UPDATE \"accounts\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.PrivateKey, ca.Address, ca.Nonce, true, ca.Remainder, ca.ID). WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit() return mock } func SetupUpdateAccountFree(mock sqlmock.Sqlmock) sqlmock.Sqlmock { ca := GetChaingateAcc() ca.Nonce = ca.Nonce + 1 ca.Used = false ca.Remainder = model.NewBigIntFromInt(1000000000000) mock.ExpectBegin() mock.ExpectExec("UPDATE \"accounts\""). WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg(), ca.PrivateKey, ca.Address, ca.Nonce, ca.Used, ca.Remainder, ca.ID). WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectCommit() return mock } func SetupGetFreeAccount(mock sqlmock.Sqlmock) sqlmock.Sqlmock { ca := GetChaingateAcc() accRows := getFreeAccountRow(ca) mock.ExpectQuery("SELECT (.+) FROM \"accounts\""). WithArgs("false"). WillReturnRows(accRows) return mock } func NewMock() (sqlmock.Sqlmock, *gorm.DB) { db, mock, err := sqlmock.New() if err != nil { log.Fatalf("an error '%s' was not expected when opening a stub database connection", err) } dialector := postgres.New(postgres.Config{ Conn: db, DriverName: "postgres", }) gormDb, err := gorm.Open(dialector, &gorm.Config{}) return mock, gormDb }
/** * Harit Kapadia, Jack Farley * Ms. Krasteva * 2019/May/26 */ public class QuestCompletionListener{ public void finishQuest(){ synchronized(this) { notifyAll(); } } }
Feasibility of an exercise intervention for women with postnatal depression: a pilot randomised controlled trial. BACKGROUND Postnatal depression is a serious mental health problem that may be reduced by exercise. AIM This study examined the feasibility of an exercise intervention for women with postnatal depression, and assessed which methods of recruitment are most effective. DESIGN OF STUDY Randomised controlled trial. SETTING General practice and the community. METHOD Participants were recruited from various sources and randomised to an exercise intervention or usual care with follow-up at 12 weeks. As well as assessing feasibility, other trial outcomes included exercise participation and self-efficacy for exercise. Levels of depression were assessed but the study was not powered to show a difference in this. RESULTS The recruitment rate of eligible patients was 23.1%. The highest recruitment rate was via referral from the psychiatric mother and baby unit (9/28; 32.1%), followed by invitation letters from GPs (24/93; 25.8%). Thirty-eight eligible participants were randomised. At follow-up there was no significant difference in exercise participation between groups. The intervention group reported significantly higher self-efficacy for exercise compared to usual care, but depression scores did not differ. CONCLUSION Exercise participation over the 12-week period was not significantly increased, possibly because it is difficult to motivate women with postnatal depression to exercise, or the intervention was not sufficiently intensive. Eligible patients were recruited into this study but response rates were low. Optimum methods of recruitment in this difficult-to-reach population are required prior to a substantive trial. Further research is imperative given poorly-evidenced recommendations by the National Institute for Health and Clinical Excellence to consider this treatment.
/** * Stores the given meta infos for the stored object. * * @param properties properties to store * @throws IOException in case of an IO error */ public void storeProperties(Map<String, String> properties) throws IOException { Properties props = new Properties(); properties.forEach(props::setProperty); try (FileOutputStream out = new FileOutputStream(getPropertiesFile())) { props.store(out, ""); } }
<filename>__tests__/api.test.ts<gh_stars>1-10 // Import Node Module import request from 'supertest'; // Import express app import app from '../src/app'; // Test root API endpoint // API root should return a 404, this will be updated at somepoint to 501 Not Implemented describe('GET /find-tenant', () => { it('Responds with a 404 message', async () => { const result = await request(app).get('/find-tenant'); expect(result.statusCode).toEqual(404); }); }); // Test by-domain API endpoint - invalid describe('GET /find-tenant/by-domain/:tenant', () => { it('Responds with a 404 message', async () => { const result = await request(app).get('/find-tenant/by-domain/invalidTenant'); expect(result.statusCode).toEqual(404); expect(result.body).toEqual({ statusCode: 404, statusMessage: 'Failed', data: { errorMessage: 'Tenant not found', invalidTenant: 'invalidTenant' }}); }); }); // Test by-domain API endpoint - microsoft.com describe('GET /find-tenant/by-domain/:tenant', () => { it('Responds with a 404 message', async () => { const result = await request(app).get('/find-tenant/by-domain/microsoft.com'); expect(result.statusCode).toEqual(200); expect(result.body).toEqual({ statusCode: 200, statusMessage: 'Success', data: { tenantId: '72f988bf-86f1-41af-91ab-2d7cd011db47', tenantRegion: 'WW' }}); }); });
class HtmlLinkReplacer: """ replaces links in html. Used to keep links working when including dropbox .html files in pages Uses BeautifulSoup html parser. """ def __init__(self): pass def replace_links(self, html, baseURL, currentpath): """ prepend baseURL to all relative links in <a> and <img> element in html Keyword arguments: html -- string with html content baseURL -- prepend to each link, cannot be traversed up currentpath -- path to prepend, this can be traversed up by links using ../ """ soup = BeautifulSoup(html, "html.parser") soup = self.replace_a(soup, baseURL, currentpath) soup = self.replace_img(soup, baseURL, currentpath) return soup.renderContents() def replace_a(self, soup, baseURL, currentpath): for a in soup.findAll("a"): a = self.fix_url(a, baseURL, currentpath) return soup def replace_img(self, soup, baseURL, currentpath): for a in soup.findAll("img"): a = self.fix_url(a, baseURL, currentpath) return soup def refers_to_file(self, url): (start, ext) = os.path.splitext(url) if ext: return True else: return False def get_url(self, a): if a.has_attr("src"): return a["src"] elif a.has_attr("href"): return a["href"] else: return "" def fix_url(self, a, baseURL, currentpath): """ Only rewrite links to files, links to paths are never an included file and just be used as is. """ url = self.get_url(a) if url and self.refers_to_file(url): return self.replace_url(a, baseURL, currentpath) else: return a def replace_url(self, a, baseURL, currentpath): """ problem I am trying to solve: Files from the projects file folder can be included. These included files can have links in them to other files in file folder, or to other pages on comic. Desiderata: * Html files in a folder structure in the file folder should work as expected, with relative, absolute and relative absolute links. * Security should remain: there should be no way to serve any file or content if the project's organizer does not want this. Solution: We need to treat file urls and page urls differently. * Each file in a projects file folder, (anything with an extension), replace any link like href="/image/img1.jpg" by prepending baseURL and currentpath. BaseURL cannot be travesed upward, so it is always prepended regardles of the url requested (../../../file) will not end up outside this URL. currentpath can be travesed upward by ../ handles root-relative url (e.g "/admin/index.html") and regular relative url (e.g. "images/test.png") correctly """ if a.has_attr("src"): url = a["src"] elif a.has_attr("href"): url = a["href"] else: raise AttributeError( "Trying to replace a link which has no src and no href" "attribute. This should never happen." ) # leave absolute links alone if re.match("http://", url) or re.match("https://", url): pass # for root-relative links elif re.match("/", url): url = baseURL + url # regular relative links elif re.match( "\w", url ): # match matches start of string, \w = any alphanumeric url = baseURL + currentpath + url # go up path if ../ are in link else: if currentpath.endswith("/"): currentpath = currentpath[ :-1 ] # remove trailing slash to make first path.dirname actually go # up one dir # while re.match('\.\.',url): # remove "../" # url = url[3:] # go up one in currentpath # if currentpath == "": # pass # going up the path would go outside COMIC dropbox bounds. TODO: maybe # throw some kind of outsidescope error? # else: # currentpath = os.path.dirname(currentpath) if currentpath.endswith("/"): pass else: if not currentpath == "": currentpath = currentpath + "/" url = baseURL + currentpath + url url = url.replace( "//", "/" ) # remove double slashes because this can mess up django's url system url = re.sub( "http:/(?=\w)", "http://", url ) # but this also removes double slashes in http://. Reverse this. if a.has_attr("src"): a["src"] = url elif a.has_attr("href"): a["href"] = url else: logger.warning( "Trying to replace a link which has no src and no href" "attribute. This should never happen." ) pass return a
def infer_scene_from_dataset(x, y): from lmatools.grid.fixed import goesr_conus, goesr_meso, goesr_full, goesr_resolutions rtol = 1.0e-2 dx = np.abs(x[1]-x[0]) resolution = '{:d}microradian at nadir'.format(int(np.round(dx*1e6))) for km, microrad in goesr_resolutions.items(): if np.allclose(microrad, dx, rtol=rtol): resolution = km.replace('.0', '') + ' at nadir' spanEW = x.max() - x.min() spanNS = y.max() - y.min() log.debug("Inferring scene from spans x={0}, y={1}".format(spanEW, spanNS)) if (np.allclose(spanEW, goesr_full['spanEW'], rtol=rtol) & np.allclose(spanNS, goesr_full['spanNS'], rtol=rtol) ): scene_id = "FULL" elif (np.allclose(spanEW, goesr_conus['spanEW'], rtol=rtol) & np.allclose(spanNS, goesr_conus['spanNS'], rtol=rtol) ): scene_id = "CONUS" elif (np.allclose(spanEW, goesr_meso['spanEW'], rtol=rtol) & np.allclose(spanNS, goesr_meso['spanNS'], rtol=rtol) ): scene_id = "MESO1" elif (np.allclose(spanEW, 0.172732, rtol=rtol)): scene_id = "CONUS" else: scene_id = "OTHER" return scene_id, resolution
def defined_by_parent(self): if self.get_parent() is None: return False for base in getBaseClasses(self.get_parent().obj): base_obj = getattr(base, self.name, None) if self.obj is base_obj: return False return True
ORLANDO — Senator John McCain opened up a hard-hitting political attack on Senator Barack Obama’s national security credentials and stepped up his rhetoric against the Russians in Georgia in a speech on Monday aimed at showcasing his ability to be commander-in-chief. In an address to a Veterans of Foreign Wars convention, Mr. McCain criticized what he called Mr. Obama’s “shifting positions’’ on the troop escalation, or surge, in Iraq, and charged that when his rival voted against funding the troops in a single vote in 2007, he had tried to “legislate’’ failure. “This was back when supporting America’s efforts in Iraq entailed serious political risk,’’ Mr. McCain said. “It was a clarifying moment. It was a moment when political self-interest and the national interest parted ways.’’ Mr. McCain went on: “Instead, Senator Obama commits the greater error of insisting that even in hindsight, he would oppose the surge. Even in retrospect, he would choose the path of retreat and failure for America over the path of success and victory. In short, both candidates in this election pledge to end this war and bring our troops home. The great difference, the great difference, is that I intend to win it first.’’ Mr. McCain was referring to a vote against troop funding that Mr. Obama cast in 2007 because the legislation did not include a timetable for withdrawal. Mr. Obama has voted for all other war-financing bills since he entered the Senate in 2005. Moving on to Georgia, Mr. McCain raised questions about Mr. Obama’s ability to handle the crisis. “Behind all of these claims and positions by Senator Obama lies the ambition to be president,’’ Mr. McCain said. “What’s less apparent is the judgment to be commander in chief. And in matters of national security, good judgment will be at a premium in the term of the next president — as we were all reminded ten days ago by events in the nation of Georgia.’’ Mr. McCain renewed his threat to consider throwing Russia out of the G-8 group of leading industrialized democracies and bar it from membership in the World Trade Organization. As president, he said, he would take a tough line. “In cooperation with our friends and allies in Europe, we will make it clear to Russia’s rulers that acts of violence and intimidation come at a heavy cost,’’ Mr. McCain said. “There will be no place among G-8 nations, or in the W.T.O., for a modern Russia that acts at times like the old Soviet Union. The Cold War is over, the Soviet empire is gone, and neither one is missed.’’ The Obama campaign was quick to respond to the speech before the veterans group, which Mr. Obama is set to address on Tuesday. Bill Burton, a campaign spokesman, issued this statement: “All his bluster, distortions and negative attacks notwithstanding, it is hard to understand how Senator McCain can at once proclaim his support for the sovereign government of Iraq, and then stubbornly defy their expressed support for a timeline to remove our combat brigades from their country. The difference in this race is that John McCain is intent on spending $10 billion a month on an open-ended war, while Barack Obama thinks we should bring this war to a responsible end and invest in our pressing needs here at home.”
<reponame>Learner1729/planningModule /** * MIT License * * Copyright (c) 2018 <NAME> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE */ /** * @file VisualizeMapSDL.h * @version 1.0 * @author <NAME> * @brief This header file is used to Visualize the Map of an environment. * * VisualizeMapSDL class is used to Visualize the 2D map of an environment. * Given a Occupancy matrix, and dimensions of the map it draws the 2D map * of an environment. It doesn't have any role in implementing planning * algorithm. It is only used for showing visual demo of AStar algorithm. * * @last-modified 10-22-2018 */ #ifndef INCLUDE_VISUALIZEMAPSDL_H_ #define INCLUDE_VISUALIZEMAPSDL_H_ // C++ header file #include <string> // Including externally dependent library file used to create // window for visualizing map of an environment #include <SDL.h> class VisualizeMapSDL { public: /** * @brief VisualizeMapSDL default constructor * * Used when object of the class doesn't need to assign values to member * variables. Internally, calls init member function to initialize the * properties used to create and update window screen. * * @param none * @return none */ VisualizeMapSDL(); /** * @brief VisualizeMapSDL parameterized constructor * * Used in a case when different values are needed to be assigned to member * variables. Internally, calls default constructor which in turn calls init * member function to initialize the properties used to create and update * window screen. * * @param width: used to set the width of the window * @param height: used to set the height of the window * @return none */ VisualizeMapSDL(int width, int height); /** * @brief VisualizeMapSDL destructor * * Used to destroy SDL objects created. * * @param none * @return none */ ~VisualizeMapSDL(); /** * @brief function to detect window events like mouse click & key press * @param none * @return none */ void detectEvent(); /** * @brief function which is used to put things on the screen * @param none * @return none */ void clear() const; /** * @brief function to delay the window update speed * @param none * @return none */ void callDelay() const; /** * @brief * @param xPos, integer type, denotes x coordinates of the screen * @param yPos, integer type, denotes y coordinates of the screen * @param color, integer type, value: 1,2,3 * 1: To select color for start position (GREEN) * 2: To select color for end position (RED) * 3: To select color to draw path (BLUE) * Default: Obstacle, color for obstacles (BLACK) * @return none */ void drawPixel(int xPos, int yPos, int color); /** * @brief function to update the screen * @param none * @return none */ void updateWindow() const; /** * @brief Function used to set the update rate at which windows should be * updated. * @param delayMilliSeconds, integer type, stores the delay time * @return none */ void setDelay(int delay); /** * @brief Function used to get the status of the window * @param none * @return bool, status of closed_ member variable */ inline bool isClosed() const { return closed_; } private: /** * @brief Initialize SDL properties used to create and update window screen. * @param none * @return none */ bool init(); private: // variable used to give the title to our window std::string title_{"Environment"}; // variable to define screen width, unit: number of pixels int screenWidth_{640}; // variable to define screen height, unit: number of pixels int screenHeight_{480}; // variable used to keep track of whether the window is closed or not bool closed_{false}; // variable used to define the update rate at which window is to be updated // unit: milliseconds int delay_{1000}; // SDL Window object is used to set all the properties related to window, // it's initialize to nullptr. It's property will be set using init // member function SDL_Window *window_{nullptr}; // SDL Renderer is basically used to draw things on to the window SDL_Renderer *renderer_{nullptr}; }; #endif // INCLUDE_VISUALIZEMAPSDL_H_
/** * Tools needed to run a private IOTA testnet. * * (c) 2017 Michael Schierl. Licensed under MIT License. */ public class Main { public static void main(String[] args) throws Exception { if (args.length == 1 && args[0].equals("SnapshotBuilder")) { //TestnetSnapshotBuilder.main(args); } else if (args.length >= 1 && args[0].equals("Coordinator")) { TestnetCoordinator.main(Arrays.copyOfRange(args, 1, args.length)); } else if (args.length >= 1 && args[0].equals("PeriodicCoordinator")) { PeriodicCoordinator.main(Arrays.copyOfRange(args, 1, args.length)); } else { System.out.println("Do you want Coordinator or SnapshotBuilder?"); } } }
/** * (Re)schedule this timer. Discards any previously scheduled timeout. * * @param delay delay before expiration in milliseconds * @throws IllegalStateException if the lock object is not locked */ public void timeoutAfter(int delay) { assert Thread.holdsLock(this.raft); Preconditions.checkArgument(delay >= 0, "delay < 0"); this.cancel(); assert this.future == null; assert this.pendingTimeout == null; assert this.timeoutDeadline == null; this.timeoutDeadline = new Timestamp().offset(delay); if (this.log.isTraceEnabled()) { this.raft.trace("rescheduling " + this.name + " for " + this.timeoutDeadline + " (" + delay + "ms from now)"); } Preconditions.checkArgument(!this.timeoutDeadline.isRolloverDanger(), "delay too large"); this.pendingTimeout = new PendingTimeout(); try { this.future = this.raft.serviceExecutor.schedule(this.pendingTimeout, delay, TimeUnit.MILLISECONDS); } catch (RejectedExecutionException e) { if (!this.raft.shuttingDown) this.raft.warn("can't restart timer", e); } }
/** * This is deleted delete all document(s), which is matched */ @Override public void deleteManyDocument() { MongoDatabase db = null; MongoCollection collection = null; Bson query = null; try { db = client.getDatabase(mongo.getDataBase()); collection = db.getCollection(mongo.getSampleCollection()); query = lt("age", 20); DeleteResult result = collection.deleteMany(query); if (result.wasAcknowledged()) { log.info("Document deleted successfully \nNo of Document(s) Deleted : " + result.getDeletedCount()); } } catch (MongoException e) { log.error("Exception occurred while delete Many Document : " + e, e); } }
<gh_stars>0 // Copyright 2021 The Project Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package api import ( "bytes" "encoding/base64" "errors" "fmt" "strconv" ) // OriginV0 is the Checkpoint origin string for this log. // TODO: extract into flag const OriginV0 = "ArmoryDrive Log v0" // Checkpoint represents a minimal log checkpoint. type Checkpoint struct { // Origin is the unique identifier for the log issuing this checkpoint. Origin string // Size is the number of entries in the log at this checkpoint. Size uint64 // Hash is the hash which commits to the contents of the entire log. Hash []byte } // Unmarshal parses the common formatted checkpoint data and stores the result // in the Checkpoint. // // The supplied data is expected to begin with the following 3 lines of text, // each followed by a newline: // - <Origin string> // - <decimal representation of log size> // - <base64 representation of root hash> // // There must be no extraneous trailing data. func (c *Checkpoint) Unmarshal(data []byte) error { l := bytes.SplitN(data, []byte("\n"), 4) if len(l) < 4 { return errors.New("invalid checkpoint - too few newlines") } origin := string(l[0]) // TODO: extract this check elsewhere or pass in expected origin. if origin != OriginV0 { return fmt.Errorf("invalid checkpoint - incorrect origin %q", origin) } size, err := strconv.ParseUint(string(l[1]), 10, 64) if err != nil { return fmt.Errorf("invalid checkpoint - size invalid: %w", err) } h, err := base64.StdEncoding.DecodeString(string(l[2])) if err != nil { return fmt.Errorf("invalid checkpoint - invalid hash: %w", err) } if xl := len(l[3]); xl > 0 { return fmt.Errorf("invalid checkpoint - %d bytes of unexpected trailing data", xl) } *c = Checkpoint{ Origin: origin, Size: size, Hash: h, } return nil }
/** * @author Konstantin Krismer */ public class RegExMethodWidgetState extends State { private String regEx; public RegExMethodWidgetState() { } public RegExMethodWidgetState(String regEx) { super(); this.regEx = regEx; } public String getRegEx() { return regEx; } public void setRegEx(String regEx) { this.regEx = regEx; } }
<gh_stars>100-1000 /** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; public class Constants { public static final String MAX_PARTITIONS = "maxPartitions"; public static final String SQS_CONSUMER = "sqsConsumer"; public static final String SQS_PRODUCER = "sqsProducer"; public static final String INBOUND_QUEUE_NAME = "inboundQueueName"; }
#include<iostream> #include<cstdio> #include<cstring> #include<algorithm> #include<queue> #include<string> using namespace std; int main() { string str; cin>>str; long long num_1=0; int num_2=0; long long sum=0; for(int i=str.size()-1;i>=0;i--) { sum+=(str[i]-'0'); num_2++; if(sum%3==0||(str[i]-'0')%3==0||num_2==3) { num_1++; num_2=0; sum=0; } } cout<<num_1<<endl; return 0; }
def delete_borrower(self, objectid): if type(objectid) != bson.objectid.ObjectId: raise TypeError(f"objectid is not a BSON ObjectId: {objectid}") return self._borrowers.find_one_and_delete({"_id": objectid})
/** * ConsoleLogger.hpp * * log++ - Intuitive logging library for C++ written by <NAME>. * Co-author: <NAME> */ /**************************** * Local Includes * ****************************/ #include "FileLogger.hpp" #include "LogExtensions.hpp" /*************************** * System Includes * ***************************/ #include <cstring> #include <exception> #include <fstream> #ifndef logpp_USE_FSTAT #if __cplusplus < 201703L #include <experimental/filesystem> #else #include <filesystem> #endif #else #include <sys/stat.h> #endif namespace logpp { using std::cout; using std::endl; using std::ifstream; using std::invalid_argument; using std::ios_base; using std::ofstream; using std::to_string; #ifndef logpp_USE_FSTAT #if __cplusplus < 201703L namespace fs = std::experimental::filesystem; #else namespace fs = std::filesystem; #endif #endif const static uint64_t ONE_MIB = 1048576u; const string FileLogger::LOGPP_CTRL_DIR = ".logpp"; const uint32_t FileLogger::CTRL_FILE_MAGIC = 0xf00dbeef; const uint32_t FileLogger::DEFAULT_MAX_LOG_FILES = 4; /** * @brief Construct a new fileLogger::fileLogger object * * @param logName The name for this logger. * @param maxLogLevel The maximum logging level to log. * @param filename The path/to/file for printing inside * @param bufferSize The maximum buffer size before flushing. * @param flushBufferAfterWrite Indicates whether to flush the buffer after each write to it. */ FileLogger::FileLogger(const string& logName, const LogLevel maxLogLevel, const string& filename, const uint32_t bufferSize, const uint32_t maxFileSize, const bool flushBufferAfterWrite, const bool createFileIfNotExists ): ILogger(logName, maxLogLevel, bufferSize, flushBufferAfterWrite), _maxFileCount(DEFAULT_MAX_LOG_FILES), _maxFileSize(maxFileSize), _filename(filename) { initLogContinuation(); } /** * @brief Destroy the fileLogger::fileLogger object */ FileLogger::~FileLogger() {} /** * @brief checks if file exists * * @param filename name of requested file * * @return true if file exists, false else */ bool FileLogger::fileExists(const string& filename) { #ifdef logpp_USE_FSTAT struct stat buffer; return (stat(filename.c_str (), &buffer) == 0); #else return fs::exists(filename); #endif } /** * @brief Gets the path to the control file for the current logger. * * @return string The path to the control file. */ string FileLogger::getControlFilePath() const { return formatString( "%s/%s/%s.lcf", logpp::getBaseName(_filename).c_str(), FileLogger::LOGPP_CTRL_DIR.c_str(), getCurrentLoggerName().c_str() ); } /** * @brief returns size of file in bytes * * @param filename is the name of requested file * * @return size of file in bytes */ uint32_t FileLogger::fileSize(const string& filename) { #ifdef logpp_USE_FSTAT struct stat buffer; stat(filename.c_str(), &buffer); return buffer.st_size; #else return static_cast<uint32_t>(fs::file_size(filename)); #endif } /** * @brief Writes a message to the underlying log buffer and flushes the buffer accordingly. * * @remarks Bad log levels (log levels above four) will cause the buffer to always be flushed in this default implementation! * * @param level The level of the current log. * @param msg The (formatted) message to output. */ void FileLogger::logMessage(const LogLevel level, const string& msg) { if (level > getCurrentMaxLogLevel()) return; getLogBuffer() << msg; if (msg.back() != '\n' || msg.back() != '\r') { // Add the missing line feed getLogBuffer() << getOsNewLineChar(); } // Now check if we need to flush if (isBadLog(level) || (getMaxBufferSize() == 0 || getBufferSize() >= getMaxBufferSize()) || flushBufferAfterWrite()) { flushBuffer(); } } /** * @brief writes buffer into given file. If file is greater than _maxFileSize (in MiB) in size a new file with incremented end number will be created. */ void FileLogger::flushBuffer() { bool changedLogNo = false; if (fileSize(formatString("%s%d", _filename.c_str(), _numLogs)) >= _maxFileSize * ONE_MIB) { _numLogs = (_numLogs > _maxFileCount ? 0 : _numLogs + 1); changedLogNo = true; storeLatestLogFile(); } ofstream outStream(formatString("%s%d", _filename.c_str(), _numLogs), (changedLogNo ? ios_base::trunc : ios_base::app)); outStream << getLogBufferAsString(); clearStringStream(getLogBuffer()); } void FileLogger::initLogContinuation() { ifstream inStream(getControlFilePath()); // uint8_t buffer[fileSize(getControlFilePath())] = { 0 }; const auto ctrlFileSize = fileSize(getControlFilePath()); uint8_t* buffer = new uint8_t[ctrlFileSize]; memset(buffer, 0, ctrlFileSize); ControlFileContents contents = { 0 }; inStream >> buffer; std::memcpy(&contents, buffer, sizeof(contents)); delete[] buffer; if (contents.magicNumber == CTRL_FILE_MAGIC) { _numLogs = contents.currentWrittenLogFile; } } void FileLogger::storeLatestLogFile() { ofstream outStream(getControlFilePath(), ios_base::trunc); const ControlFileContents ctrlFile { CTRL_FILE_MAGIC, _numLogs }; const char* fileContents = reinterpret_cast<const char*>(&ctrlFile); outStream.write(fileContents, sizeof(ctrlFile)); outStream.flush(); } }
/** * Prints the Area and all Evu's in a human readable format * to a file. * @param outputFile is a File. */ public void printAll (File outputFile) throws SimpplleError { PrintWriter fout; try { fout = new PrintWriter(new FileOutputStream(outputFile)); printAll(fout); fout.flush(); fout.close(); } catch (IOException IOX) { System.out.println("Problems writing output file."); } }
// Score is called on each filtered node. It must return success and an integer // indicating the rank of the node. All scoring plugins must return success or // the pod will be rejected. func (s *Scheduler) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) { podGroup, _, err := s.podGroupManager.podGroup(pod) if err != nil { return 0, framework.NewStatus(framework.Error, fmt.Sprintf("cannot get pod group: %s", err.Error())) } if podGroup != nil { node, err := s.podGroupManager.snapshotSharedLister.NodeInfos().Get(nodeName) if err != nil { klog.Errorf("Cannot get nodeInfos from frameworkHandle: %v", err) return 0, framework.NewStatus(framework.Error, err.Error()) } for _, friend := range node.Pods { names := getPodGroupNameSliceFromPod(friend.Pod) if friend.Pod.Namespace == podGroup.Namespace && !names.IsEmpty() && names[0] == podGroup.Name { return 100, framework.NewStatus(framework.Success, "") } } } return 0, framework.NewStatus(framework.Success, "") }
/** * This Exeption is thrown if a DataReader finds an error in a read Line * f. e. wrong column number * * @author Christian Schmidt 2005 * */ public class LineErrorException extends Exception { public int tokenCount; /** * */ public LineErrorException() { super(); } /** * @param arg0 */ public LineErrorException(String arg0) { super(arg0); } /** * @param arg0 */ public LineErrorException(Throwable arg0) { super(arg0); } /** * @param arg0 * @param arg1 */ public LineErrorException(String arg0, Throwable arg1) { super(arg0, arg1); } }
def maxpeer_searched(self): assert self.maxpeers == -1 or len(self.peers) <= self.maxpeers return self.maxpeers > -1 and len(self.peers) == self.maxpeers
<gh_stars>10-100 package cn.chuanwise.xiaoming.contact.contact; import cn.chuanwise.xiaoming.bot.XiaomingBot; import cn.chuanwise.xiaoming.contact.message.Message; import cn.chuanwise.xiaoming.event.SendMessageEvent; import cn.chuanwise.xiaoming.object.XiaomingObjectImpl; import lombok.Getter; import net.mamoe.mirai.contact.Contact; import net.mamoe.mirai.message.data.MessageChain; import java.util.Optional; @Getter public abstract class XiaomingContactImpl<C extends Contact> extends XiaomingObjectImpl implements XiaomingContact<C> { final C miraiContact; public XiaomingContactImpl(XiaomingBot xiaomingBot, C miraiContact) { super(xiaomingBot); this.miraiContact = miraiContact; } @Override public Optional<Message> sendMessage(MessageChain messages) { final SendMessageEvent event = new SendMessageEvent(this, messages); xiaomingBot.getEventManager().callEvent(event); if (event.isCancelled()) { return Optional.empty(); } else { return event.getMessageBox().toOptional(); } } }
<gh_stars>10-100 package com.hyd.ssdb.conn; import com.hyd.ssdb.*; import com.hyd.ssdb.conf.Server; import com.hyd.ssdb.protocol.Block; import com.hyd.ssdb.protocol.Request; import com.hyd.ssdb.protocol.Response; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.Socket; import java.net.SocketTimeoutException; import java.nio.charset.Charset; import java.util.Arrays; import java.util.HashMap; import java.util.Map; /** * 对 Socket 的包装。一旦发送或读取内容失败,Connection * 会将 available 置为 false,然后会被从连接池中去掉。 * * @author Yiding */ public class Connection { static final Logger LOG = LoggerFactory.getLogger(Connection.class); private final Socket socket; // 网络连接套接字 private final String pass; // 连接成功后发送认证口令 private final int buffer; // 读取数据时缓存区的长度 private final Map<String, Object> properties = new HashMap<>(); // 其他属性 private boolean available; // 可用状态 public Connection(Server server) { this(server.getHost(), server.getPort(), server.getPass(), server.getSocketConfig().getSoTimeout(), server.getSocketConfig().getSoBufferSize()); } public Connection(String host, int port, String pass, int soTimeout, int soBuffer) { try { this.socket = new Socket(host, port); this.socket.setSoTimeout(soTimeout); this.pass = pass; this.buffer = soBuffer; this.available = true; this.properties.put("host", host); this.properties.put("port", port); if (this.pass != null) { auth(); } } catch (IOException e) { throw new SsdbSocketFailedException(e); } } private void auth() { send(new Request("auth", this.pass).toBytes()); Response response = receivePacket(); String header = response.getHead().toString(); if (!header.equals("ok")) { throw new SsdbAuthFailedException(); } } public String getHost() { return this.getProperty("host"); } public int getPort() { return (Integer) this.getProperty("port"); } @SuppressWarnings("unchecked") public <T> T getProperty(String propName) { return (T) this.properties.get(propName); } public void setProperty(String propName, Object propValue) { this.properties.put(propName, propValue); } public boolean hasProperty(String propName) { return this.properties.containsKey(propName); } public void removeProperty(String propName) { this.properties.remove(propName); } public void send(byte[] bytes) { if (bytes == null || bytes.length == 0) { throw new SsdbClientException("Cannot send empty content: " + Arrays.toString(bytes)); } try { OutputStream outputStream = this.socket.getOutputStream(); outputStream.write(bytes); outputStream.flush(); LOG.debug("Send {} bytes.", bytes.length); } catch (IOException e) { this.available = false; throw new SsdbSocketFailedException(e); } } public Response receivePacket() { return receivePacket(AbstractClient.DEFAULT_CHARSET); } public Response receivePacket(Charset charset) { ByteArrayOutputStream bos = null; Response response = new Response(charset); try { InputStream inputStream = this.socket.getInputStream(); bos = new ByteArrayOutputStream(10240); StringBuilder numSb = new StringBuilder(); byte b; int dataLength = 0, dataCounter = 0, lengthCounter = 0; int blockStatus = 0; // 0=ready, 1=receiving_length, 2=receiving_data, 3=data_finished int responseStatus = 0; //0=ready, 1=head_received byte[] bs = new byte[this.buffer]; while (true) { int len = inputStream.read(bs); if (len == -1) { break; } else { lengthCounter += len; } for (int i = 0; i < len; i++) { b = bs[i]; if (b == '\n') { if (blockStatus == 0) { LOG.debug("Received {} bytes.", lengthCounter); return response; // 方法唯一的正确出口 } else if (blockStatus == 1) { dataLength = Integer.parseInt(numSb.toString()); bos.reset(); numSb.setLength(0); // 如果数据长度为 0,则跳过状态2 if (dataLength == 0) { blockStatus = 3; } else { blockStatus = 2; } } else if (blockStatus == 2) { bos.write(b); dataCounter += 1; if (dataCounter >= dataLength) { blockStatus = 3; dataCounter = 0; } } else { // blockStatus == 3 blockStatus = 0; Block block = new Block(bos.toByteArray()); if (responseStatus == 0) { response.setHead(block); responseStatus = 1; } else { response.addBodyBlock(block); } } } else { bos.write(b); if (blockStatus == 0) { blockStatus = 1; numSb.append((char) b); } else if (blockStatus == 1) { numSb.append((char) b); } else if (blockStatus == 2) { dataCounter += 1; if (dataCounter >= dataLength) { blockStatus = 3; dataCounter = 0; } } else { // status == 3 包已读取完毕,此时必须收到 \n throw new SsdbException("Illegal packet: " + Arrays.toString(bos.toByteArray())); } } } } throw new SsdbException("Invalid packet"); } catch (SocketTimeoutException e) { this.available = false; throw new SsdbSocketFailedException("Socket timed out, already read: " + (bos == null ? "" : bos.toString()), e); } catch (IOException e) { this.available = false; throw new SsdbSocketFailedException(e); } catch (SsdbException e) { this.available = false; throw e; } } public void close() throws IOException { this.socket.close(); } public boolean isAvailable() { return this.available; } @Override public String toString() { return "Connection{host='" + getHost() + "',port=" + getPort() + ",available=" + this.available + "}"; } }
/** * Utils for import spacing. * * @author Ben Leitner */ class ImportUtil { private ImportUtil() { // No instantiation } static boolean needsNewLine(@Nullable String priorImport, String currentImport) { if (priorImport == null) { return true; } String[] priorParts = priorImport.split("\\."); String[] currentParts = currentImport.split("\\."); int fewerParts = Math.min(priorParts.length, currentParts.length); int lastAgreeingIndex = lastAgreeingIndex(priorParts, currentParts); return lastAgreeingIndex < Math.min(2, fewerParts - 2); } private static int lastAgreeingIndex(String[] first, String[] second) { int fewerParts = Math.min(first.length, second.length); for (int i = 0; i < fewerParts; i++) { if (!first[i].equals(second[i])) { return i - 1; } } return fewerParts; } }
// Snapshot returns an identifier for the current revision of the state. func (sdb *StateDB) Snapshot() int { id := sdb.nextRevisionId sdb.nextRevisionId++ sdb.revision = append(sdb.revision, revision{id, sdb.journal.length()}) return id }
Photolysis of AgHal Single Crystals After Modification of Surface As a know, the changing of ionic balance in solution lead the modification of specific surface. It is displayed in t changing of specific surface and the concentration lowcoordination Br ions on the surface and the ions condu tivity. Inasmuch as the photolysis is a surface process studying of silver allocation particularities after modificatio of AgHal crystals surface by photographically active su stances is interest. In this paper the investigations of Agticles allocation particularities were made in pure and al samples. The Ag-particles allocate on the crystals surfac a result of photolysis.
def problem_joint_fitness(x, y): cf = 10 h_1 = 50 x_1 = 0.75 y_1 = 0.75 s_1 = 1.6 f_1 = h_1 * \ (1 - ((16.0/s_1) * pow((x/cf - x_1), 2)) - ((16.0/s_1) * pow((y/cf - y_1), 2))) h_2 = 150 x_2 = 0.25 y_2 = 0.25 s_2 = 1.0/32.0 f_2 = h_2 * \ (1 - ((16.0/s_2) * pow((x/cf - x_2), 2)) - ((16.0/s_2) * pow((y/cf - y_2), 2))) return max(f_1, f_2)
import os import datetime as dt from watchcbb.scrape.SportsRefScrape import SportsRefScrape from watchcbb.sql import SQLEngine sql = SQLEngine('cbb') srscrape = SportsRefScrape() TODAY = dt.date(2020,2,17) LOOKBACK = 2 OVERWRITE = True SEASON = TODAY.year if TODAY.month < 6 else TODAY.year+1 for iprev in range(LOOKBACK, 0, -1): date = TODAY - dt.timedelta(iprev) print(f"Checking games for {date}") df_today = sql.df_from_query(""" SELECT * FROM game_data WHERE "Season"={season} AND "Date"='{date}' """.format(season=SEASON, date=date)) if df_today.shape[0] != 0: if OVERWRITE: print("Found existing games, deleting") sql.drop_rows('game_data', f""" "Date"='{date}' """) else: print("Found existing games, skipping date") continue print("Downloading game data") gids = srscrape.get_gids_on_date(date) df_newgames = srscrape.get_game_data(SEASON, gids=gids, verbose=True) print(df_newgames)
<reponame>relvacode/gpool-docker package ddpool // NodeWorkerStatus is a representation of the current node workers status. type NodeWorkerStatus struct { Total uint Executing uint } // NodeStorageStatus is a representation of the current node storage status. type NodeStorageStatus struct { Capacity uint64 Allocated uint64 } // NodeStatus is a representation of the current node status. type NodeStatus struct { ID string Hostname string Workers NodeWorkerStatus Health HealthStatus Storage NodeStorageStatus } // NewNodeStatus generates a node status from the given Node. func NewNodeStatus(n *Node) NodeStatus { s := NodeStatus{} s.ID = n.ID s.Hostname = n.Hostname s.Workers.Total = n.Workers s.Storage.Allocated, s.Storage.Capacity, s.Workers.Executing = n.Status() s.Health = n.Health() return s }
<filename>examples/applications/gaussian/gaussian_relu_rbm_training.py import torch import torchvision from learnergy.models.gaussian import GaussianReluRBM # Creating training and testing dataset train = torchvision.datasets.MNIST( root='./data', train=True, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor() ])) test = torchvision.datasets.MNIST( root='./data', train=False, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor() ])) # Creating a GaussianReluRBM model = GaussianReluRBM(n_visible=784, n_hidden=256, steps=1, learning_rate=0.001, momentum=0.9, decay=0, temperature=1, use_gpu=False) # Training a GaussianReluRBM mse, pl = model.fit(train, batch_size=128, epochs=5) # Reconstructing test set rec_mse, v = model.reconstruct(test) # Saving model torch.save(model, 'model.pth') # Checking the model's history print(model.history)
/** * Initialize the palette through K-means color quantization. */ private void initPalette() { KMeansPlusPlusClusterer<PixelClusterable> clusterer = new KMeansPlusPlusClusterer<>(paletteSize, 200, new LabDistanceMeasure()); System.out.println("clustering..."); System.out.println(PixelClusterable.toClusterable(image).size()); List<CentroidCluster<PixelClusterable>> centroidClusters = clusterer.cluster(PixelClusterable.toClusterable (image)); System.out.println("clustering complete"); palette = PixelClusterable.toPixel(centroidClusters); }
<reponame>AdrianZw/csapex_core_plugins /// HEADER #include "segmentation.h" /// PROJECT #include <csapex/model/node_modifier.h> #include <csapex/msg/generic_vector_message.hpp> #include <csapex/msg/io.h> #include <csapex/param/parameter_factory.h> #include <csapex/utility/register_apex_plugin.h> #include <csapex_scan_2d/scan_message.h> #include <cslibs_laser_processing/common/yaml-io.hpp> #include <cslibs_laser_processing/data/segment.h> using namespace csapex; using namespace csapex::connection_types; using namespace lib_laser_processing; using namespace lib_laser_processing; ScanSegmentation::ScanSegmentation() { } void ScanSegmentation::process() { /// WORK ScanMessage::ConstPtr scan_msg = msg::getMessage<ScanMessage>(input_); const Scan& scan = scan_msg->value; std::shared_ptr<std::vector<Segment>> segments_msg(new std::vector<Segment>); segmentation_->segmentation(scan, *segments_msg); for (Segment& segment : *segments_msg) { segment.frame_id = scan_msg->frame_id; segment.stamp_micro_seconds = scan_msg->stamp_micro_seconds; } msg::publish<GenericVectorMessage, Segment>(output_segments_, segments_msg); } void ScanSegmentation::setup(NodeModifier& node_modifier) { input_ = node_modifier.addInput<ScanMessage>("Scan"); output_scan_ = node_modifier.addOutput<ScanMessage>("FilteredScan"); output_segments_ = node_modifier.addOutput<GenericVectorMessage, Segment>("Segments"); }
import json from collections import defaultdict import os from PIL import Image, ImageDraw # exp = '01222_cascade_r101_dcn_16e_grid' exp = '01231_cascade_r101_dcn_16e_grid_gcb' base_dir = '/data/hope/data/data/wine/data/' test = json.load(open(base_dir + 'chongqing1_round1_testA_20191223/testA.json')) id2im_name = {i['id']:i['file_name'] for i in test['images']} name2wh = {i['file_name']:[i['width'], i['height']] for i in test['images']} print("loading bboxes...") res_bboxes = json.load(open('/data/hope/data/data/wine/models/mmdet/{}/results.pkl.bbox.json'.format(exp))) # res_bboxes = json.load(open('/data/hope/data/data/wine/models/mmdet/01221_cascade_r101_dcn_16e_cj/results_cj.pkl.bbox.json')) # res_bboxes = json.load(open('/data/hope/data/data/wine/models/mmdet/01231_cascade_r101_dcn_16e_grid_gcb/results.pkl.bbox.json')) # res_bboxes = json.load(open('/data/hope/data/data/wine/models/mmdet/01223_reppoints_moment_r101_dcn_fpn_2x/results.pkl.bbox.json')) im_name2bboxes = defaultdict(list) for bbox in res_bboxes: im_name = id2im_name[bbox['image_id']] category_id = bbox['category_id'] #### if bbox['score'] < 0.03: continue ### if name2wh[im_name][0] > 4000 and category_id not in [6, 7, 8]: continue if name2wh[im_name][0] < 4000 and category_id in [6, 7, 8]: continue im_name2bboxes[im_name].append([bbox['score'], category_id] + bbox['bbox']) images, annotations = [], [] im_dir = base_dir + 'chongqing1_round1_testA_20191223/images/' save_dir = base_dir + 'chongqing1_round1_testA_20191223/vis_' + exp os.makedirs(save_dir, exist_ok=True) for im_name, bboxes in im_name2bboxes.items(): max_score = max([bbox[0] for bbox in bboxes]) if max_score < 0.2: continue im = Image.open(im_dir + im_name) draw = ImageDraw.Draw(im) for bbox in bboxes: category_id = bbox[1] xmin, ymin = bbox[2], bbox[3] xmax, ymax = xmin + bbox[4], ymin + bbox[5] draw.rectangle([xmin, ymin, xmax, ymax], outline='red') im.save(os.path.join(save_dir, im_name)) # print(img_counter) # print("{} bboxes".format(bbox_counter)) # # predictions = {"images":images, "annotations":annotations} # # save_dir = base_dir + 'submit' # os.makedirs(save_dir, exist_ok=True) # # save_path = os.path.join(save_dir, 'submit_0123_22.json') # save_path = os.path.join(save_dir, 'sub.json') # print("save predictions to {}".format(save_path)) # with open(save_path, 'w') as f: # json.dump(predictions, f) # 2265 38998 01222_cascade_r101_dcn_16e_grid
package main import "fmt" func array() { var a [5]int var b [5]int = [5]int{1, 2, 3, 4, 5} c := [...]string{"a", "b", "c", "d", "e"} d := [4]string{"geek", "gfg", "Geeks1231", "GeeksforGeeks"} a[4] = 100 fmt.Println("element at index 1:", a[1]) fmt.Println("element at index 4:", a[4]) fmt.Println("length of array:", len(a)) fmt.Println("array b:", b) fmt.Println("array c:", c) fmt.Println("array d:", d) fmt.Println("----------------------------------------------------") for i, v := range c { fmt.Println("index:", i, "value:", v) } }
/** * Class for processing different client request (create, fetch and delete) and * returning an appropriate response to the <b>Server</b> class which ultimately * send the response back to the client. * * This class also implements the Serializable interface for storing the current * state and fetching the last saved state of the server * * @author Lokesh Bisht */ public class FileStorage implements Serializable { private ConcurrentHashMap<String, String> filenameToIdMap; private ConcurrentHashMap<String, String> idToFilenameMap; private File nameIdentifierFile; private File idIdentifierFile; private final String serverDirPath = System.getProperty("user.dir") + File.separator + "server" + File.separator + "data" + File.separator; /** * FileStorage constructor initializes two {@code ConcurrentHashMap} * {@code filenameToIdMap} and {@code idToFilenameMap}. And fetches the last saved * state of the server. */ public FileStorage() { filenameToIdMap = new ConcurrentHashMap<>(); idToFilenameMap = new ConcurrentHashMap<>(); fetchSerializeData(); } /** * <p>Restores the last saved state of the server before processing any client requests.</p> * <br/> * <p>If there are no {@code nameIdentifier} and {@code idIdentifierFile} files to * restore the server state. Then the method creates nameIdentifierFile.txt and * idIdentifierFile.txt for storing the {@code filenameToIdMap} and {@code idToFilenameMap} * ConcurrentHashMaps respectively.</p> */ private synchronized void fetchSerializeData() { String path = serverDirPath + "nameIdentifierFile.txt"; nameIdentifierFile = new File(path); path = serverDirPath + "idIdentifierFile.txt"; idIdentifierFile = new File(path); if (!nameIdentifierFile.exists() && !nameIdentifierFile.isDirectory()) { try { boolean fileCreated = nameIdentifierFile.createNewFile(); if (!fileCreated) { System.out.println("Failed to create file to store serialized data."); System.exit(0); } } catch (IOException e) { System.out.println("Failed to create file to store serialized data."); e.printStackTrace(); } } if (!idIdentifierFile.exists() && !idIdentifierFile.isDirectory()) { try { boolean fileCreated = idIdentifierFile.createNewFile(); if (!fileCreated) { System.out.println("Failed to create file to store serialized data."); } } catch (IOException e) { System.out.println("Failed to create file to store serialized data."); e.printStackTrace(); } } if (nameIdentifierFile.length() != 0 && idIdentifierFile.length() != 0) { filenameToIdMap = Save.deserialize(String.valueOf(nameIdentifierFile)); idToFilenameMap = Save.deserialize(String.valueOf(idIdentifierFile)); } } /** * Saves the current state of the server in a local file on the server * before shutting down the server. */ public synchronized void updateStoredSerializedData() { Save.serialize(filenameToIdMap, String.valueOf(nameIdentifierFile)); Save.serialize(idToFilenameMap, String.valueOf(idIdentifierFile)); } /** Shows the current info of all the files on the server. */ public synchronized void showServerCurrentFilesInfo() { System.out.printf("%nFiles available on the server: %n"); // Do not directly use the filenameToIdMap in the foreach loop you will // get old values of filenameToIdMap and not the current state/values. ConcurrentHashMap<String, String> temp = filenameToIdMap; for (Map.Entry<String, String> entry : temp.entrySet()) { System.out.println("File ID = " + entry.getValue() + ", FileName = " + entry.getKey()); } System.out.println(); } /** * Saves a file with the name {@code serverFilename} on the server. * * @param serverFilename the file is saved on the server by this name. * @param fileContent a {@code byte[]} array containing the contents * of the file to be saved on the server. * @return {@code fileID} if the file is successfully created on the server, * or -1 otherwise. */ public long addFile(String serverFilename, byte[] fileContent) { long fileID = -1; try { String path = serverDirPath + serverFilename; File file = new File(path); if (!file.exists() && !file.isDirectory()) { Files.write(Paths.get(path), fileContent); synchronized (this) { fileID = filenameToIdMap.size(); if (idToFilenameMap.get(fileID) != null) { long start = fileID; long end = 1000_000_000_000_000_000L; while (start <= end) { long mid = (start + end) / 2; if (idToFilenameMap.get(String.valueOf(mid)) == null) { end = mid - 1; } else { start = mid + 1; } } fileID = start; } idToFilenameMap.put(String.valueOf(fileID), serverFilename); filenameToIdMap.put(serverFilename, String.valueOf(fileID)); } } else { System.out.println("The file already exists on the server."); } } catch (IOException | NumberFormatException e) { e.printStackTrace(); } return fileID; } /** * Retrieve the file with id {@code fileID} from the server. * * @param fileID id of the file to be fetched from the server. * @return {@code fileContent} a {@code byte[]} array containing the content * of the file if the file is present on the server, or * {@code null} if this file is not present on the server. * */ public byte[] getFile(long fileID) { byte[] fileContent = null; String filename = idToFilenameMap.get(String.valueOf(fileID)); if (filename == null) { return null; } String path = serverDirPath + filename; try { fileContent = Files.readAllBytes(Paths.get(path)); } catch (IOException e) { System.out.println("Can't fetch file " + filename + ". File does not " + "exist at path: " + path); } return fileContent; } /** * Retrieve the file {@code filename} from the server. * * @param filename name of the file to be fetched from the server. * @return {@code fileContent} a {@code byte[]} array containing the contents * of the file if the file is present on the server, or {@code null} if this * file is not present on the server. */ public byte[] getFile(String filename) { byte[] fileContent = null; if (filenameToIdMap.get(filename) == null) { return null; } String path = serverDirPath + filename; try { fileContent = Files.readAllBytes(Paths.get(path)); } catch (IOException e) { System.out.println("Can't fetch file " + filename + ". File does not " + "exist at path: " + path); } return fileContent; } /** * Delete the file with id {@code fileID} from the server. * * @param fileID ID of the file to be deleted from the server. * @return {@code true} if the file is deleted from the server, otherwise * {@code false} if the file is not present on the server. */ public boolean deleteFile(long fileID) { String filename = idToFilenameMap.get(String.valueOf(fileID)); if (filename == null) { return false; } String path = serverDirPath + filename; File file = new File(path); if (file.exists() && !file.isDirectory()) { boolean isFileDeleted = file.delete(); if (!isFileDeleted) { System.out.println("Failed in deleting file \"" + filename + " \" from the server"); return false; } synchronized (this) { idToFilenameMap.remove(fileID); filenameToIdMap.remove(filename); } return true; } return false; } /** * Delete the file {@code fileName} from the server. * * @param filename name of the file to be deleted from the server. * @return {@code true} if the file is deleted from the server, otherwise * {@code false} if the file is not present on the server. */ public boolean deleteFile(String filename) { String fileID = filenameToIdMap.get(filename); if (fileID == null) { return false; } String path = serverDirPath + filename; File file = new File(path); if (file.exists() && !file.isDirectory()) { boolean isFileDeleted = file.delete(); if (!isFileDeleted) { System.out.println("Failed in deleting file \"" + filename + " \" from the server"); return false; } synchronized (this) { idToFilenameMap.remove(fileID); filenameToIdMap.remove(filename); } return true; } return false; } }
<filename>Episode.go package twist // Episode represents a single episode on twist.moe. type Episode struct { Link string `json:"link"` Number int `json:"episode:number"` AnimeTwistID int `json:"animetwist:id"` }
<filename>cmake_targets/oaisim_build_oai/build/CMakeFiles/R11.2/X2ap-Cause.h<gh_stars>1-10 /* * Generated by asn1c-0.9.24 (http://lionet.info/asn1c) * From ASN.1 module "X2AP-IEs" * found in "/home/liu/openairinterface5g/openair2/X2AP/MESSAGES/ASN1/R11.2/X2AP-IEs.asn" * `asn1c -gen-PER` */ #ifndef _X2ap_Cause_H_ #define _X2ap_Cause_H_ #include <asn_application.h> /* Including external dependencies */ #include "X2ap-CauseRadioNetwork.h" #include "X2ap-CauseTransport.h" #include "X2ap-CauseProtocol.h" #include "X2ap-CauseMisc.h" #include <constr_CHOICE.h> #ifdef __cplusplus extern "C" { #endif /* Dependencies */ typedef enum X2ap_Cause_PR { X2ap_Cause_PR_NOTHING, /* No components present */ X2ap_Cause_PR_radioNetwork, X2ap_Cause_PR_transport, X2ap_Cause_PR_protocol, X2ap_Cause_PR_misc, /* Extensions may appear below */ } X2ap_Cause_PR; /* X2ap-Cause */ typedef struct X2ap_Cause { X2ap_Cause_PR present; union X2ap_Cause_u { X2ap_CauseRadioNetwork_t radioNetwork; X2ap_CauseTransport_t transport; X2ap_CauseProtocol_t protocol; X2ap_CauseMisc_t misc; /* * This type is extensible, * possible extensions are below. */ } choice; /* Context for parsing across buffer boundaries */ asn_struct_ctx_t _asn_ctx; } X2ap_Cause_t; /* Implementation */ extern asn_TYPE_descriptor_t asn_DEF_X2ap_Cause; #ifdef __cplusplus } #endif #endif /* _X2ap_Cause_H_ */ #include <asn_internal.h>
<reponame>stores-and-sales/backend<gh_stars>0 // Copyright (c) 2022 Pestras // // This software is released under the MIT License. // https://opensource.org/licenses/MIT import { Validall } from '@pestras/validall'; export enum ProductsValidators { CREATE = 'CreateProduct', UPDATE = 'UpdateProduct', ADD_IMAGE = 'AddProductImage', DELETE_IMAGE = 'DeleteProductImage', ADD_PROPERTY = 'AddProductProperty', UPDATE_PROPERTY = 'UpdateProductProperty' } new Validall(ProductsValidators.CREATE, { name: { $type: 'string', $required: true, $message: 'invalidProductName' }, description: { $type: 'string', $default: '', $message: 'invalidProductDescription' }, price: { $type: 'number', $required: true, $message: 'invalidProductPrice' }, tags: { $default: [], $each: { $type: 'string', $message: 'invalidProductTag' } } }); new Validall(ProductsValidators.UPDATE, { $ref: ProductsValidators.CREATE }); new Validall(ProductsValidators.ADD_IMAGE, { src: { $type: 'string', $required: true, $message: 'invalidProductImageSrc' } }); new Validall(ProductsValidators.DELETE_IMAGE, { src: { $type: 'string', $required: true, $message: 'invalidProductImageSrc' } }); new Validall(ProductsValidators.ADD_PROPERTY, { name: { $type: 'string', $required: true, $message: 'invalidProductPropertyName' }, value: { $type: 'string', $required: true, $message: 'invalidProductPropertyValue' } }); new Validall(ProductsValidators.UPDATE_PROPERTY, { name: { $type: 'string', $required: true, $message: 'invalidProductPropertyName' }, value: { $type: 'string', $required: true, $message: 'invalidProductPropertyValue' } });
use proconio::{input, fastout}; struct Helper { memo: Vec<(usize, usize)> } impl Helper { fn traverse(&self, index: usize, mut val: usize, mut total: usize ) -> usize { if val == 1 { return total } val -= 1; let (b_p, b_b) = self.memo[index-1]; let last = b_p + b_b; if val < last { return self.traverse(index-1, val, total); } else if val == last { return total + b_b; } val -= last; total += b_b; if val == 1 { return total + 1; } val -=1; total +=1; if val < last { return self.traverse(index-1, val, total); } else if val == last { return total + b_b; } val -= last; total += b_b; if val == 1 { return total } panic!("total: {}, val: {}, index: {}", total, val, index); } } #[fastout] fn main() { input! { n: usize, x: usize, } let mut memo: Vec<(usize, usize)> = vec![]; memo.push((0, 1)); for i in 1..=n { let a = memo[i-1].0 * 2usize + 2usize; let b = memo[i-1].1 * 2usize + 1usize; memo.push((a, b)); } let helper = Helper { memo }; let result = helper.traverse(n, x, 0); println!("{}", result); }
// VisitElementAccessNode generates the il code for an array element access func (v *ILCodeGenerationVisitor) VisitElementAccessNode(node *node.ElementAccessNode) { node.Expression.Accept(v) node.Designator.Accept(v) designatorType := v.symbolTable.GetTypeByExpression(node.Designator) if v.isArrayType(designatorType) { v.assembler.Emit(il.ArrAt) } else if v.isMapType(designatorType) { v.assembler.Emit(il.MapGetVal) } else { panic(unsupportedElementAccessMsg) } }
Nowcasting algorithm for wind fields using ensemble forecasting and aircraft flight data This study proposes an algorithm that combines ensemble numerical weather‐prediction model data and aircraft flight data in a wind nowcasting system for safe and efficient aircraft operation. It uses an ensemble‐weighted average method based on sequential importance sampling (SIS), which is a particle filter method for forecasting the wind field in real time. SIS is applied to the ensemble forecast data and control run data of the European Centre for Medium‐Range Weather Forecasts (ECMWF), Japan Meteorological Agency (JMA), Korea Meteorological Administration (KMA), National Centers for Environmental Prediction (NCEP) and United Kingdom Met Office (UKMO) for the two case studies that use flight data from 72 commercial aircraft flights. The results show that SIS can forecast better than the other four methods: direct ensemble average (DEA), elite strategy (ES), and selective ensemble average (SEAV) and weighted average (SEWE), with average improvements in forecast performance of about 10–15%, even at 300 min ahead. In addition, the overall forecast performance between the forecast wind and observation of the radiosonde of SIS was slightly better than DEA. In both cases, the forecast performance was significantly improved on points along the flight path of the aircraft used for this study. Case analyses and the impact of differences in the hyper‐parameters of SIS on forecast performance are also presented in this study.
import { doElsMatchSegs } from 'fullcalendar-tests/src/lib/segs' import { ResourceTimelineViewWrapper } from '../lib/wrappers/ResourceTimelineViewWrapper' import { TimelineViewWrapper } from '../lib/wrappers/TimelineViewWrapper' describe('timeline businessHours', () => { pushOptions({ initialView: 'timelineDay', now: '2016-02-15', scrollTime: '00:00', }) describeOptions('direction', { 'when LTR': 'ltr', 'when RTL': 'rtl', }, () => { it('renders when on a day with business hours', () => { let calendar = initCalendar({ businessHours: { startTime: '10:00', endTime: '16:00', }, slotDuration: { hours: 1 }, }) let viewWrapper = new TimelineViewWrapper(calendar) expect10to4(viewWrapper) }) it('renders all-day on a day completely outside of business hours', () => { let calendar = initCalendar({ now: '2016-02-14', // weekend businessHours: { startTime: '10:00', endTime: '16:00', }, slotDuration: { hours: 1 }, }) let viewWrapper = new TimelineViewWrapper(calendar) expect(isTimelineNonBusinessSegsRendered(viewWrapper, [ { start: '2016-02-14T00:00', end: '2016-02-15T00:00' }, ])).toBe(true) }) it('renders once even with resources', () => { let calendar = initCalendar({ initialView: 'resourceTimelineDay', resources: [ { id: 'a', title: 'a' }, { id: 'b', title: 'b' }, { id: 'c', title: 'c' }, ], businessHours: true, }) let viewWrapper = new ResourceTimelineViewWrapper(calendar) expect9to5(viewWrapper) }) it('render differently with resource override', () => { let calendar = initCalendar({ initialView: 'resourceTimelineDay', resources: [ { id: 'a', title: 'a' }, { id: 'b', title: 'b', businessHours: { startTime: '02:00', endTime: '22:00' } }, { id: 'c', title: 'c' }, ], businessHours: true, }) let viewWrapper = new ResourceTimelineViewWrapper(calendar) expectResourceOverride(viewWrapper) }) it('renders full height with resource override and expandRow', () => { let calendar = initCalendar({ initialView: 'resourceTimelineDay', expandRows: true, resources: [ { id: 'a', title: 'a' }, { id: 'b', title: 'b', businessHours: { startTime: '02:00', endTime: '22:00' } }, { id: 'c', title: 'c' }, ], businessHours: true, }) let timelineGrid = new ResourceTimelineViewWrapper(calendar).timelineGrid let laneEls = timelineGrid.getResourceLaneEls() let totalLaneHeight = 0 // for calculating ave height for (let laneEl of laneEls) { totalLaneHeight += laneEl.getBoundingClientRect().height } let aveLaneHeight = totalLaneHeight / laneEls.length let nonBusinessEls = timelineGrid.getNonBusinessDayEls() for (let nonBusinessEl of nonBusinessEls) { expect(Math.abs( nonBusinessEl.getBoundingClientRect().height - aveLaneHeight, )).toBeLessThan(3) } }) it('renders dynamically with resource override', (done) => { let specialResourceInput = { id: 'b', title: 'b', businessHours: { startTime: '02:00', endTime: '22:00' }, } let calendar = initCalendar({ initialView: 'resourceTimelineDay', resources: [ { id: 'a', title: 'a' }, specialResourceInput, { id: 'c', title: 'c' }, ], businessHours: true, }) let viewWrapper = new ResourceTimelineViewWrapper(calendar) expectResourceOverride(viewWrapper) setTimeout(() => { calendar.getResourceById(specialResourceInput.id).remove() expect9to5(viewWrapper) calendar.addResource(specialResourceInput) expectResourceOverride(viewWrapper) done() }) }) it('renders dynamically with resource override amidst other custom rows', (done) => { let calendar = initCalendar({ initialView: 'resourceTimelineDay', resources: [ { id: 'a', title: 'a', businessHours: { startTime: '03:00', endTime: '21:00' }, }, ], businessHours: true, }) let viewWrapper = new ResourceTimelineViewWrapper(calendar) expect(isResourceTimelineNonBusinessSegsRendered(viewWrapper, [ { resourceId: 'a', start: '2016-02-15T00:00', end: '2016-02-15T03:00' }, { resourceId: 'a', start: '2016-02-15T21:00', end: '2016-02-16T00:00' }, ])).toBe(true) setTimeout(() => { calendar.addResource({ id: 'b', title: 'b', businessHours: { startTime: '02:00', endTime: '22:00' } }) expect(isResourceTimelineNonBusinessSegsRendered(viewWrapper, [ { resourceId: 'a', start: '2016-02-15T00:00', end: '2016-02-15T03:00' }, { resourceId: 'a', start: '2016-02-15T21:00', end: '2016-02-16T00:00' }, { resourceId: 'b', start: '2016-02-15T00:00', end: '2016-02-15T02:00' }, { resourceId: 'b', start: '2016-02-15T22:00', end: '2016-02-16T00:00' }, ])).toBe(true) done() }) }) }) // https://github.com/fullcalendar/fullcalendar-scheduler/issues/414 it('can switch views with resource override', () => { let calendar = initCalendar({ initialView: 'resourceTimelineDay', resources: [ { id: 'a', title: 'a' }, { id: 'b', title: 'b', businessHours: { startTime: '02:00', endTime: '22:00' } }, { id: 'c', title: 'c' }, ], businessHours: true, }) let viewWrapper = new ResourceTimelineViewWrapper(calendar) expectResourceOverride(viewWrapper) calendar.changeView('dayGridMonth') calendar.changeView('resourceTimelineDay') expectResourceOverride(viewWrapper) }) describe('when resource initially contracted', () => { pushOptions({ resourcesInitiallyExpanded: false, }) describe('with a business hour override', () => { pushOptions({ initialView: 'resourceTimelineDay', resources: [ { id: 'a', title: 'a', children: [ { id: 'a1', title: 'a1', businessHours: { startTime: '02:00', endTime: '22:00' } }, ] }, ], }) it('renders when expanded', (done) => { let calendar = initCalendar() let viewWrapper = new ResourceTimelineViewWrapper(calendar) viewWrapper.dataGrid.clickFirstExpander() setTimeout(() => { // wait for animation to finish expect(isResourceTimelineNonBusinessSegsRendered(viewWrapper, [ { resourceId: 'a1', start: '2016-02-15T00:00', end: '2016-02-15T02:00' }, { resourceId: 'a1', start: '2016-02-15T22:00', end: '2016-02-16T00:00' }, ])).toBe(true) done() }, 500) }) }) }) function expect9to5(viewWrapper) { expect(isTimelineNonBusinessSegsRendered(viewWrapper, [ { start: '2016-02-15T00:00', end: '2016-02-15T09:00' }, { start: '2016-02-15T17:00', end: '2016-02-16T00:00' }, ])).toBe(true) } function expect10to4(viewWrapper) { expect(isTimelineNonBusinessSegsRendered(viewWrapper, [ { start: '2016-02-15T00:00', end: '2016-02-15T10:00' }, { start: '2016-02-15T16:00', end: '2016-02-16T00:00' }, ])).toBe(true) } function expectResourceOverride(viewWrapper) { expect(isResourceTimelineNonBusinessSegsRendered(viewWrapper, [ { resourceId: 'a', start: '2016-02-15T00:00', end: '2016-02-15T09:00' }, { resourceId: 'a', start: '2016-02-15T17:00', end: '2016-02-16T00:00' }, { resourceId: 'b', start: '2016-02-15T00:00', end: '2016-02-15T02:00' }, { resourceId: 'b', start: '2016-02-15T22:00', end: '2016-02-16T00:00' }, { resourceId: 'c', start: '2016-02-15T00:00', end: '2016-02-15T09:00' }, { resourceId: 'c', start: '2016-02-15T17:00', end: '2016-02-16T00:00' }, ])).toBe(true) } function isTimelineNonBusinessSegsRendered(viewWrapper, segs) { let timelineGridWrapper = viewWrapper.timelineGrid let baseGrid = timelineGridWrapper.base || timelineGridWrapper // :( return doElsMatchSegs( baseGrid.getNonBusinessDayEls(), segs, (seg) => baseGrid.getRect(seg.start, seg.end), ) } function isResourceTimelineNonBusinessSegsRendered(viewWrapper, segs) { let resourceTimelineGridWrapper = viewWrapper.timelineGrid return doElsMatchSegs( resourceTimelineGridWrapper.getNonBusinessDayEls(), segs, (seg) => resourceTimelineGridWrapper.getRect(seg.resourceId, seg.start, seg.end), // needs resource ) } })
<reponame>knocklabs/knock-node export interface KnockOptions { host?: string; } export interface HttpException extends Error { readonly status: number; readonly requestID: string; } export interface PostAndPutOptions { query?: { [key: string]: any }; } export interface UnprocessableEntityError { message: string; type: string; field: string; } // Channel types supported in Knock // TODO: it would be great to pull this in from an external location export type ChannelType = "email" | "in_app_feed" | "sms" | "push" | "chat"; export type CommonMetadata = Record<string, any>; export interface ChannelData<T = CommonMetadata> { channel_id: string; data: T; } export interface SetChannelDataProperties {}
COMPARATIVE ANALYSIS OF TRANSLATIONS OF THE SEVENTH BOOK OF PLATO’S “ ” WITH THE ORIGINAL TEXT. POLYVARIATIVITY OF FORM AND MEANING An appealing to original texts, a comparing linguistic variations in the forms of their offsprings (translations), a research of processes of branching of meanings, a reconstruction of the first-sense of texts, and especially those that were created centuries ago in ancient languages, that is enabling to improve translation or understanding of the history of the mentality of native and modern na- tive speakers — will always be relevant for any philological, linguistic and philosophical studies. This article is an attempt to analyze and show how different the form and meaning of the same text can be in different languages and what can be common between different translations. For the first time, the comparison of the original fragments of Plato’s “The Republic” in Ancient Greek with their translations into Japanese and Korean translations has been done. Also, some fragments of Lithuanian, Latin, Latin and English translations are included. For the analysis, I propose the following two fragments of the text of the seventh book, namely the replica numbered 514a-514b of the dialogue of Socrates with Glaucon and the replica numbered 517b-517c. After all, in my opinion, there is the greatest concentration of philosophical terms associated with the myth of the cave, which interests me.
/** * This class is an Adapter for Appendable objects. * * @author Nicolas Richeton * */ public class FutureAppendableAdapter implements FutureAppendable { private Appendable out; private List<Future<CharSequence>> futureList; public FutureAppendableAdapter(Appendable out) { this.out = out; this.futureList = new ArrayList<>(); } @Override public FutureAppendable enqueueAppend(Future<CharSequence> csq) { this.futureList.add(csq); return this; } @Override public FutureAppendable performAppends(int timeout, TimeUnit unit) throws IOException, HttpErrorPage, TimeoutException { try { for (Future<CharSequence> f : this.futureList) { CharSequence csq = f.get(timeout, unit); this.out.append(csq); } } catch (CancellationException | InterruptedException e) { throw new IOException(e); } catch (ExecutionException e) { // HttpErrorPage must be if (e.getCause() instanceof HttpErrorPage) { throw (HttpErrorPage) e.getCause(); } throw new IOException(e); } this.futureList.clear(); return this; } @Override public boolean hasPending() { return this.futureList.size() > 0; } @Override public FutureAppendable performAppends() throws IOException, HttpErrorPage { try { for (Future<CharSequence> f : this.futureList) { CharSequence csq = f.get(); this.out.append(csq); } } catch (CancellationException | InterruptedException e) { throw new IOException(e); } catch (ExecutionException e) { // HttpErrorPage must be if (e.getCause() instanceof HttpErrorPage) { throw (HttpErrorPage) e.getCause(); } throw new IOException(e); } this.futureList.clear(); return this; } }
<gh_stars>1-10 package rawg func (suite *RAWGTestSuite) TestGetTag() { tag, err := suite.client.GetTag(1) suite.NoError(err) suite.Equal("Survival", tag.Name) } func (suite *RAWGTestSuite) TestGetTagFailed() { suite.client.baseUrl = "" tag, err := suite.client.GetTag(1) suite.Error(err) suite.Nil(tag) }
/** * iterates through selecting for user to go through stages selecting an auton or the debugger * and then all the config options * loads all screens at start so there are no mem management issues * finishes when all options are chosen */ int chooseAuton() { Autons auton_data; SelectionScreen scr1; OptionsScreen scr2; PrepScreen scr3; int finalAutonChoice = 0; int auton = 1; bool confirm = false; int interval = 20; while ( !(finalAutonChoice) ) { scr2.back = false; auton = scr1.selectAuton( auton ); if ( auton == auton_data.driver_control_num ) { finalAutonChoice = 1; } else if ( auton == auton_data.debug_auton_num ) { MotorThread* motor_thread = MotorThread::get_instance(); Motors::register_motors(); motor_thread->start_thread(); pros::Task driver_control_task (driver_control, (void*)NULL, TASK_PRIORITY_DEFAULT, TASK_STACK_DEPTH_DEFAULT, "DriverControlTask"); debug(); driver_control_task.remove(); Motors::unregister_motors(); motor_thread->stop_thread(); } else { finalAutonChoice = auton; } if ( !(scr2.back) ) //if user does not want to go back from screen 2 { scr3.getConfirmation( auton ); //gets confirmation from user if ( scr3.confirm ) { finalAutonChoice = auton; } } else { break; } } } } return finalAutonChoice; }
My 4-year-old autistic son Jordan loves video games. I took him and his twin sister Sophie to Dave and Buster’s in Braintree one afternoon, but we had to leave after five minutes because he became overwhelmed. Sadly, he and Sophie miss out on a lot because of his disability. I emailed Michelle Pena, the restaurant’s event coordinator, explaining our situation and asking when the least busy time is at their facility. To my surprise I received an email back that day and was informed that the manager was willing to open early just for our family on June 22. Not only did we have the place to ourselves, the kids were all given $15 play cards and unlimited play for certain games. The staff there was so welcoming and made sure every one of my family members were having a good time. Jordan was even given a few special surprises. I cried watching both of my children being able to have fun, laugh and actually stay at a place where they were having so much fun. I want everyone to know how amazing the Dave and Buster’s staff is and what an impact they made on my special little guy. STEPHANIE SAAD Braintree That's one opinion. What's yours? Click here to write a letter to the editor or leave a comment on the story. Read more columns, editorials and letters
package sit.int204.practice.controllers; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Page; import org.springframework.data.domain.PageRequest; import org.springframework.data.domain.Pageable; import org.springframework.data.domain.Sort; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import sit.int204.practice.models.Product; import sit.int204.practice.repositories.ProductRepository; import java.util.List; @Controller public class ProductController { @Autowired private ProductRepository productRepository; @RequestMapping("/product") public String product(Model model) { model.addAttribute("products", productRepository.findAll()); return "product"; } @RequestMapping("/productWithPage") public String productWithPage( @RequestParam(defaultValue = "0") Integer pageNo, @RequestParam(defaultValue = "5") Integer pageSize, @RequestParam(defaultValue = "prodPrice") String sortBy, Model model) { Pageable pageable = PageRequest.of(pageNo, pageSize, Sort.by(sortBy) ); Page<Product> pageResult = productRepository.findAll(pageable); model.addAttribute("products", pageResult.getContent()); return "product"; } @RequestMapping("/show/{id}") public String show(@PathVariable Long id, Model model) { model.addAttribute("product", productRepository.findById(id).orElse(null)); return "show"; } @RequestMapping("/create") public String create(Model model) { return "create"; } @RequestMapping("/save") public String save(Product product, Model model) { productRepository.save(product); model.addAttribute("product", product); return "redirect:/show/" + product.getId(); } @RequestMapping("/delete") public String delete(@RequestParam Long id, Model model) { productRepository.deleteById(id); return "redirect:/product"; } @RequestMapping("/edit/{id}") public String edit(@PathVariable Long id, Model model) { model.addAttribute("product", productRepository.findById(id).orElse(null)); return "edit"; } @RequestMapping("/update") public String update(@RequestParam Long id, @RequestParam String prodName, @RequestParam String prodDesc, @RequestParam Double prodPrice, @RequestParam String prodImage) { Product product = productRepository.findById(id).orElse(null); product.setProdName(prodName); product.setProdDesc(prodDesc); product.setProdImage(prodImage); product.setProdPrice(prodPrice); productRepository.save(product); return "redirect:/show/" + product.getId(); } @RequestMapping("/price") public String findByPrice(@RequestParam(defaultValue = "0") Double lower, @RequestParam(defaultValue = "100000") Double upper, Model model) { List<Product> products = productRepository.findByProdPriceBetween(lower,upper); model.addAttribute("products", products); return "product"; } }
<filename>Semester4/Algorithms/test.c // Mergesort #include <stdio.h> void merge(int *arr, int start, int mid, int end) { int n1[mid - start], n2[end - mid], i, j, k; i = 0; j = start; while (j < mid) n1[i++] = arr[j++]; i = 0; k = mid; while (k < end) n2[i++] = arr[k++]; i = 0; j = 0; k = start; while (i < mid - start && j < end - mid) { if (n1[i] < n2[j]) { arr[k++] = n1[i++]; } else arr[k++] = n2[j++]; } while (i < mid - start) arr[k++] = n1[i++]; while (j < end - mid) arr[k++] = n2[j++]; } void mergeSort(int *arr, int start, int end) { int mid; if (end - start > 1) { mid = (end + start) / 2; mergeSort(arr, start, mid); mergeSort(arr, mid, end); merge(arr, start, mid, end); } } int main() { int arr[10], n, i; printf("Enter the no. of elements: "); scanf("%d", &n); for (i = 0; i < n; i++) scanf("%d", &arr[i]); mergeSort(arr, 0, n); for (i = 0; i < n; i++) printf("%d ", arr[i]); return (0); }
/** * Filter that accepts cards that have been targeted by a weapon accepted by the weapon filter this turn. * @param weaponFilter the weapon filter * @return Filter */ public static Filter hasBeenTargetedByWeaponThisTurn(final Filter weaponFilter) { return new Filter() { @Override public boolean accepts(GameState gameState, ModifiersQuerying modifiersQuerying, PhysicalCard physicalCard) { Collection<PhysicalCard> weaponsTargetedByThisTurn = modifiersQuerying.weaponsTargetedByThisTurn(physicalCard); for (PhysicalCard weapon : weaponsTargetedByThisTurn) { if (Filters.and(weaponFilter).accepts(gameState, modifiersQuerying, weapon)) { return true; } } Collection<SwccgBuiltInCardBlueprint> permanentWeaponsTargetedByThisTurn = modifiersQuerying.permanentWeaponsTargetedByThisTurn(physicalCard); for (SwccgBuiltInCardBlueprint permanentWeapon : permanentWeaponsTargetedByThisTurn) { if (Filters.and(weaponFilter).accepts(gameState, modifiersQuerying, permanentWeapon)) { return true; } } return false; } }; }
/** * Gets the scrollLeft value for this element. * @return the scrollLeft value for this element * @see <a href="http://msdn.microsoft.com/en-us/library/ms534617.aspx">MSDN documentation</a> */ public int jsxGet_scrollLeft() { if (scrollLeft_ < 0) { scrollLeft_ = 0; } else if (scrollLeft_ > 0) { if (!jsxGet_currentStyle().isScrollable(true)) { scrollLeft_ = 0; } } return scrollLeft_; }
/** * Mouse listener to allow the window to be draggable even though it is * undecorated and unresizable. * */ private void initMouseListener() { try { this.addMouseListener(new MouseListener() { public void mouseReleased(MouseEvent e) { mouseDownCompCoords = null; } public void mousePressed(MouseEvent e) { mouseDownCompCoords = e.getPoint(); } public void mouseExited(MouseEvent e) { } public void mouseEntered(MouseEvent e) { } public void mouseClicked(MouseEvent e) { } }); this.addMouseMotionListener(new MouseMotionListener() { public void mouseMoved(MouseEvent e) { } public void mouseDragged(MouseEvent e) { Point currCoords = e.getLocationOnScreen(); setLocation(currCoords.x - mouseDownCompCoords.x, currCoords.y - mouseDownCompCoords.y); } }); } catch (Exception e) { } }
package com.zwo.pls.modules.mem.mapper; import com.zwo.pls.modules.mem.domain.Department; import com.zwo.pls.modules.mem.domain.DepartmentCriteria; import org.apache.ibatis.annotations.Param; import tk.mybatis.mapper.common.Mapper; import java.util.List; public interface DepartmentMapper extends Mapper<Department> { }
// coreGroupResourcePriority returns the relative priority of the resource, in the following order: // pods, pvcs, pvs, everything else. func coreGroupResourcePriority(resource string) int { switch strings.ToLower(resource) { case "pods": return pod case "persistentvolumeclaims": return pvc case "persistentvolumes": return pv } return other }
// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_BROWSER_WORKER_HOST_WORKER_MESSAGE_FILTER_H_ #define CONTENT_BROWSER_WORKER_HOST_WORKER_MESSAGE_FILTER_H_ #include "base/callback.h" #include "content/browser/browser_message_filter.h" class ResourceDispatcherHost; namespace content { class ResourceContext; } // namespace content namespace net { class URLRequestContextGetter; } // namespace net struct ViewHostMsg_CreateWorker_Params; class WorkerMessageFilter : public BrowserMessageFilter { public: // |next_routing_id| is owned by this object. It can be used up until // OnChannelClosing. WorkerMessageFilter( int render_process_id, const content::ResourceContext* resource_context, ResourceDispatcherHost* resource_dispatcher_host, CallbackWithReturnValue<int>::Type* next_routing_id); // BrowserMessageFilter implementation. virtual void OnChannelClosing(); virtual bool OnMessageReceived(const IPC::Message& message, bool* message_was_ok); int GetNextRoutingID(); int render_process_id() const { return render_process_id_; } ResourceDispatcherHost* resource_dispatcher_host() const { return resource_dispatcher_host_; } private: ~WorkerMessageFilter(); // Message handlers. void OnCreateWorker(const ViewHostMsg_CreateWorker_Params& params, int* route_id); void OnLookupSharedWorker(const ViewHostMsg_CreateWorker_Params& params, bool* exists, int* route_id, bool* url_error); void OnCancelCreateDedicatedWorker(int route_id); void OnForwardToWorker(const IPC::Message& message); void OnDocumentDetached(unsigned long long document_id); void OnCreateMessagePort(int* route_id, int* message_port_id); int render_process_id_; const content::ResourceContext* const resource_context_; ResourceDispatcherHost* resource_dispatcher_host_; // This is guaranteed to be valid until OnChannelClosing is closed, and it's // not used after. scoped_ptr<CallbackWithReturnValue<int>::Type> next_routing_id_; DISALLOW_IMPLICIT_CONSTRUCTORS(WorkerMessageFilter); }; #endif // CONTENT_BROWSER_WORKER_HOST_WORKER_MESSAGE_FILTER_H_
/** * Estimates the memory footprint of a SparseRow with <code>clen</code> * columns and <code>sp</code> sparsity. This method accounts for the * overhead incurred by extra cells allocated (but not used) for SparseRow. * It assumes that non-zeros are uniformly distributed in the matrix -- * i.e., #estimated nnz in a given SparseRow = clen*sp. * * @param clen * @param sp * @return estimated size in bytes */ public static long estimateRowSize(long clen, double sp) { if ( sp == 0 ) return 0; int basicSize = 28; int cellSize = 12; if ( sp == 1 ) { return clen * cellSize; } long numCells = SparseRow.initialCapacity; if ( (long) (sp*clen) > numCells ) { numCells = (long) (sp*clen); } long allocatedCells = (long)Math.pow(2, Math.ceil(Math.log(numCells)/Math.log(2)) ); long rowSize = basicSize + allocatedCells * cellSize; return rowSize; }
package org.opencv.core; /** * Template class for a 4-element vector derived from Vec. * * Being derived from "Vec<_Tp, 4>", "Scalar_" and "Scalar" can be used just as * typical 4-element vectors. In addition, they can be converted to/from * "CvScalar". The type "Scalar" is widely used in OpenCV to pass pixel values. * * @see <a href="http://opencv.itseez.com/modules/core/doc/basic_structures.html#scalar">org.opencv.core.Scalar_</a> */ public class Scalar { public double val[]; public Scalar(double v0, double v1, double v2, double v3) { this.val = new double[] {v0, v1, v2, v3}; } public Scalar(double v0, double v1, double v2) { this.val = new double[] {v0, v1, v2, 0}; } public Scalar(double v0, double v1) { this.val = new double[] {v0, v1, 0, 0}; } public Scalar(double v0) { this.val = new double[] {v0, 0, 0, 0}; } public Scalar(double[] vals) { this.val = new double[4]; set(vals); } public void set(double[] vals) { if(vals!=null) { this.val[0] = vals.length>0 ? vals[0] : 0; this.val[1] = vals.length>1 ? vals[1] : 0; this.val[2] = vals.length>2 ? vals[2] : 0; this.val[3] = vals.length>3 ? vals[3] : 0; } } public static Scalar all(double v) { return new Scalar(v, v, v, v); } public Scalar clone() { return new Scalar(val); } public Scalar mul(Scalar it, double scale) { return new Scalar( val[0] * it.val[0] * scale, val[1] * it.val[1] * scale, val[2] * it.val[2] * scale, val[3] * it.val[3] * scale ); } public Scalar mul(Scalar it) { return mul(it, 1); } public Scalar conj() { return new Scalar(val[0], -val[1], -val[2], -val[3]); } public boolean isReal() { return val[1] == 0 && val[2] == 0 && val[3] == 0; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + java.util.Arrays.hashCode(val); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!(obj instanceof Scalar)) return false; Scalar it = (Scalar) obj; if (!java.util.Arrays.equals(val, it.val)) return false; return true; } }
// Copyright 2017 PDFium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Original code copyright 2014 Foxit Software Inc. http://www.foxitsoftware.com #include "fxjs/cfx_v8.h" #include "core/fxcrt/fx_memory.h" #include "third_party/base/allocator/partition_allocator/partition_alloc.h" CFX_V8::CFX_V8(v8::Isolate* isolate) : m_pIsolate(isolate) {} CFX_V8::~CFX_V8() = default; v8::Local<v8::Value> CFX_V8::GetObjectProperty( v8::Local<v8::Object> pObj, const WideString& wsPropertyName) { if (pObj.IsEmpty()) return v8::Local<v8::Value>(); v8::Local<v8::Value> val; if (!pObj->Get(m_pIsolate->GetCurrentContext(), NewString(wsPropertyName.AsStringView())) .ToLocal(&val)) return v8::Local<v8::Value>(); return val; } std::vector<WideString> CFX_V8::GetObjectPropertyNames( v8::Local<v8::Object> pObj) { if (pObj.IsEmpty()) return std::vector<WideString>(); v8::Local<v8::Array> val; v8::Local<v8::Context> context = m_pIsolate->GetCurrentContext(); if (!pObj->GetPropertyNames(context).ToLocal(&val)) return std::vector<WideString>(); std::vector<WideString> result; for (uint32_t i = 0; i < val->Length(); ++i) { result.push_back(ToWideString(val->Get(context, i).ToLocalChecked())); } return result; } void CFX_V8::PutObjectProperty(v8::Local<v8::Object> pObj, const WideString& wsPropertyName, v8::Local<v8::Value> pPut) { if (pObj.IsEmpty()) return; pObj->Set(m_pIsolate->GetCurrentContext(), NewString(wsPropertyName.AsStringView()), pPut) .FromJust(); } void CFX_V8::DisposeIsolate() { if (m_pIsolate) m_pIsolate.Release()->Dispose(); } v8::Local<v8::Array> CFX_V8::NewArray() { return v8::Array::New(GetIsolate()); } v8::Local<v8::Object> CFX_V8::NewObject() { return v8::Object::New(GetIsolate()); } unsigned CFX_V8::PutArrayElement(v8::Local<v8::Array> pArray, unsigned index, v8::Local<v8::Value> pValue) { if (pArray.IsEmpty()) return 0; if (pArray->Set(m_pIsolate->GetCurrentContext(), index, pValue).IsNothing()) return 0; return 1; } v8::Local<v8::Value> CFX_V8::GetArrayElement(v8::Local<v8::Array> pArray, unsigned index) { if (pArray.IsEmpty()) return v8::Local<v8::Value>(); v8::Local<v8::Value> val; if (!pArray->Get(m_pIsolate->GetCurrentContext(), index).ToLocal(&val)) return v8::Local<v8::Value>(); return val; } unsigned CFX_V8::GetArrayLength(v8::Local<v8::Array> pArray) { if (pArray.IsEmpty()) return 0; return pArray->Length(); } v8::Local<v8::Number> CFX_V8::NewNumber(int number) { return v8::Int32::New(GetIsolate(), number); } v8::Local<v8::Number> CFX_V8::NewNumber(double number) { return v8::Number::New(GetIsolate(), number); } v8::Local<v8::Number> CFX_V8::NewNumber(float number) { return v8::Number::New(GetIsolate(), (float)number); } v8::Local<v8::Boolean> CFX_V8::NewBoolean(bool b) { return v8::Boolean::New(GetIsolate(), b); } v8::Local<v8::String> CFX_V8::NewString(const ByteStringView& str) { v8::Isolate* pIsolate = m_pIsolate ? GetIsolate() : v8::Isolate::GetCurrent(); return v8::String::NewFromUtf8(pIsolate, str.unterminated_c_str(), v8::NewStringType::kNormal, str.GetLength()) .ToLocalChecked(); } v8::Local<v8::String> CFX_V8::NewString(const WideStringView& str) { // Conversion from pdfium's wchar_t wide-strings to v8's uint16_t // wide-strings isn't handled by v8, so use UTF8 as a common // intermediate format. return NewString(FX_UTF8Encode(str).AsStringView()); } v8::Local<v8::Value> CFX_V8::NewNull() { return v8::Null(GetIsolate()); } v8::Local<v8::Value> CFX_V8::NewUndefined() { return v8::Undefined(GetIsolate()); } v8::Local<v8::Date> CFX_V8::NewDate(double d) { return v8::Date::New(m_pIsolate->GetCurrentContext(), d) .ToLocalChecked() .As<v8::Date>(); } int CFX_V8::ToInt32(v8::Local<v8::Value> pValue) { if (pValue.IsEmpty()) return 0; v8::Local<v8::Context> context = m_pIsolate->GetCurrentContext(); v8::MaybeLocal<v8::Int32> maybe_int32 = pValue->ToInt32(context); if (maybe_int32.IsEmpty()) return 0; return maybe_int32.ToLocalChecked()->Value(); } bool CFX_V8::ToBoolean(v8::Local<v8::Value> pValue) { if (pValue.IsEmpty()) return false; v8::Local<v8::Context> context = m_pIsolate->GetCurrentContext(); v8::MaybeLocal<v8::Boolean> maybe_boolean = pValue->ToBoolean(context); if (maybe_boolean.IsEmpty()) return false; return maybe_boolean.ToLocalChecked()->Value(); } double CFX_V8::ToDouble(v8::Local<v8::Value> pValue) { if (pValue.IsEmpty()) return 0.0; v8::Local<v8::Context> context = m_pIsolate->GetCurrentContext(); v8::MaybeLocal<v8::Number> maybe_number = pValue->ToNumber(context); if (maybe_number.IsEmpty()) return 0.0; return maybe_number.ToLocalChecked()->Value(); } WideString CFX_V8::ToWideString(v8::Local<v8::Value> pValue) { if (pValue.IsEmpty()) return WideString(); v8::Local<v8::Context> context = m_pIsolate->GetCurrentContext(); v8::MaybeLocal<v8::String> maybe_string = pValue->ToString(context); if (maybe_string.IsEmpty()) return WideString(); v8::String::Utf8Value s(GetIsolate(), maybe_string.ToLocalChecked()); return WideString::FromUTF8(ByteStringView(*s, s.length())); } ByteString CFX_V8::ToByteString(v8::Local<v8::Value> pValue) { if (pValue.IsEmpty()) return ByteString(); v8::Local<v8::Context> context = m_pIsolate->GetCurrentContext(); v8::MaybeLocal<v8::String> maybe_string = pValue->ToString(context); if (maybe_string.IsEmpty()) return ByteString(); v8::String::Utf8Value s(GetIsolate(), maybe_string.ToLocalChecked()); return ByteString(*s); } v8::Local<v8::Object> CFX_V8::ToObject(v8::Local<v8::Value> pValue) { if (pValue.IsEmpty() || !pValue->IsObject()) return v8::Local<v8::Object>(); v8::Local<v8::Context> context = m_pIsolate->GetCurrentContext(); return pValue->ToObject(context).ToLocalChecked(); } v8::Local<v8::Array> CFX_V8::ToArray(v8::Local<v8::Value> pValue) { if (pValue.IsEmpty() || !pValue->IsArray()) return v8::Local<v8::Array>(); v8::Local<v8::Context> context = m_pIsolate->GetCurrentContext(); return v8::Local<v8::Array>::Cast(pValue->ToObject(context).ToLocalChecked()); } void* CFX_V8ArrayBufferAllocator::Allocate(size_t length) { if (length > kMaxAllowedBytes) return nullptr; void* p = AllocateUninitialized(length); if (p) memset(p, 0, length); return p; } void* CFX_V8ArrayBufferAllocator::AllocateUninitialized(size_t length) { if (length > kMaxAllowedBytes) return nullptr; return pdfium::base::PartitionAllocGeneric( gArrayBufferPartitionAllocator.root(), length, "CFX_V8ArrayBuffer"); } void CFX_V8ArrayBufferAllocator::Free(void* data, size_t length) { pdfium::base::PartitionFreeGeneric(gArrayBufferPartitionAllocator.root(), data); }
/******************************************************************************* * Copyright (c) 2015 Development Gateway, Inc and others. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the MIT License (MIT) * which accompanies this distribution, and is available at * https://opensource.org/licenses/MIT * * Contributors: * Development Gateway - initial API and implementation *******************************************************************************/ package org.devgateway.ocds.web.rest.controller; import com.mongodb.BasicDBObject; import com.mongodb.DBObject; import io.swagger.annotations.ApiOperation; import org.devgateway.ocds.persistence.mongo.Tender; import org.devgateway.ocds.persistence.mongo.constants.MongoConstants; import org.devgateway.ocds.web.rest.controller.request.YearFilterPagingRequest; import org.devgateway.toolkit.persistence.mongo.aggregate.CustomGroupingOperation; import org.devgateway.toolkit.persistence.mongo.aggregate.CustomProjectionOperation; import org.springframework.cache.annotation.CacheConfig; import org.springframework.cache.annotation.Cacheable; import org.springframework.data.mongodb.core.aggregation.Aggregation; import org.springframework.data.mongodb.core.aggregation.AggregationResults; import org.springframework.data.mongodb.core.aggregation.Fields; import org.springframework.web.bind.annotation.ModelAttribute; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.RestController; import javax.validation.Valid; import java.util.Arrays; import java.util.List; import static org.springframework.data.mongodb.core.aggregation.Aggregation.limit; import static org.springframework.data.mongodb.core.aggregation.Aggregation.match; import static org.springframework.data.mongodb.core.aggregation.Aggregation.newAggregation; import static org.springframework.data.mongodb.core.aggregation.Aggregation.skip; import static org.springframework.data.mongodb.core.aggregation.Aggregation.unwind; import static org.springframework.data.mongodb.core.query.Criteria.where; /** * * @author mpostelnicu * */ @RestController @CacheConfig(keyGenerator = "genericPagingRequestKeyGenerator", cacheNames = "genericPagingRequestJson") @Cacheable public class TenderPercentagesController extends GenericOCDSController { public static final class Keys { public static final String TOTAL_TENDERS = "totalTenders"; public static final String TOTAL_CANCELLED = "totalCancelled"; public static final String PERCENT_CANCELLED = "percentCancelled"; public static final String YEAR = "year"; public static final String TOTAL_TENDERS_WITH_TWO_OR_MORE_TENDERERS = "totalTendersWithTwoOrMoreTenderers"; public static final String PERCENT_TENDERS = "percentTenders"; public static final String TOTAL_TENDERS_WITH_ONE_OR_MORE_TENDERERS = "totalTendersWithOneOrMoreTenderers"; public static final String TOTAL_TENDERS_USING_EBID = "totalTendersUsingEbid"; public static final String PERCENTAGE_TENDERS_USING_EBID = "percentageTendersUsingEbid"; public static final String PERCENTAGE_EGP = "percentEgp"; public static final String TOTAL_TENDERS_WITH_LINKED_PROCUREMENT_PLAN = "totalTendersWithLinkedProcurementPlan"; public static final String AVG_TIME_FROM_PLAN_TO_TENDER_PHASE = "avgTimeFromPlanToTenderPhase"; public static final String TOTAL_EGP = "totalEgp"; } @ApiOperation("Returns the percent of tenders that were cancelled, grouped by year." + " The year is taken from tender.tenderPeriod.startDate. The response also contains the" + " total number of tenders and total number of cancelled tenders for each year.") @RequestMapping(value = "/api/percentTendersCancelled", method = { RequestMethod.POST, RequestMethod.GET }, produces = "application/json") public List<DBObject> percentTendersCancelled(@ModelAttribute @Valid final YearFilterPagingRequest filter) { DBObject project1 = new BasicDBObject(); addYearlyMonthlyProjection(filter, project1, MongoConstants.FieldNames.TENDER_PERIOD_START_DATE_REF); project1.put("tender.status", 1); DBObject group = new BasicDBObject(); addYearlyMonthlyReferenceToGroup(filter, group); group.put(Keys.TOTAL_TENDERS, new BasicDBObject("$sum", 1)); group.put(Keys.TOTAL_CANCELLED, new BasicDBObject("$sum", new BasicDBObject("$cond", Arrays.asList(new BasicDBObject("$eq", Arrays.asList("$tender.status", "cancelled")), 1, 0)))); DBObject project2 = new BasicDBObject(); project2.put(Keys.TOTAL_TENDERS, 1); project2.put(Keys.TOTAL_CANCELLED, 1); project2.put(Keys.PERCENT_CANCELLED, new BasicDBObject("$multiply", Arrays.asList(new BasicDBObject("$divide", Arrays.asList("$totalCancelled", "$totalTenders")), 100))); Aggregation agg = newAggregation( match(where(MongoConstants.FieldNames.TENDER_PERIOD_START_DATE).exists(true) .andOperator(getYearDefaultFilterCriteria(filter, MongoConstants.FieldNames.TENDER_PERIOD_START_DATE))), new CustomProjectionOperation(project1), new CustomGroupingOperation(group), new CustomProjectionOperation(project2), transformYearlyGrouping(filter).andInclude(Keys.TOTAL_TENDERS, Keys.TOTAL_CANCELLED, Keys.PERCENT_TENDERS, Keys.PERCENT_CANCELLED), getSortByYearMonth(filter), skip(filter.getSkip()), limit(filter.getPageSize()) ); AggregationResults<DBObject> results = mongoTemplate.aggregate(agg, "release", DBObject.class); List<DBObject> list = results.getMappedResults(); return list; } @ApiOperation("Percentage of tenders with >1 tenderer/bidder): " + "Count of tenders with numberOfTenderers >1 divided by total count of tenders." + "This endpoint uses tender.tenderPeriod.startDate to calculate the tender year.") @RequestMapping(value = "/api/percentTendersWithTwoOrMoreTenderers", method = { RequestMethod.POST, RequestMethod.GET }, produces = "application/json") public List<DBObject> percentTendersWithTwoOrMoreTenderers(@ModelAttribute @Valid final YearFilterPagingRequest filter) { DBObject project1 = new BasicDBObject(); addYearlyMonthlyProjection(filter, project1, MongoConstants.FieldNames.TENDER_PERIOD_START_DATE_REF); project1.put("tender.numberOfTenderers", 1); DBObject group = new BasicDBObject(); addYearlyMonthlyReferenceToGroup(filter, group); group.put(Keys.TOTAL_TENDERS, new BasicDBObject("$sum", 1)); group.put("totalTendersWithTwoOrMoreTenderers", new BasicDBObject("$sum", new BasicDBObject("$cond", Arrays.asList(new BasicDBObject("$gt", Arrays.asList("$tender.numberOfTenderers", 1)), 1, 0)))); DBObject project2 = new BasicDBObject(); project2.put(Keys.TOTAL_TENDERS, 1); project2.put(Keys.TOTAL_TENDERS_WITH_TWO_OR_MORE_TENDERERS, 1); project2.put(Keys.PERCENT_TENDERS, new BasicDBObject("$multiply", Arrays.asList( new BasicDBObject("$divide", Arrays.asList("$totalTendersWithTwoOrMoreTenderers", "$totalTenders")), 100))); Aggregation agg = newAggregation( match(where(MongoConstants.FieldNames.TENDER_PERIOD_START_DATE).exists(true) .andOperator(getYearDefaultFilterCriteria(filter, MongoConstants.FieldNames.TENDER_PERIOD_START_DATE))), new CustomProjectionOperation(project1), new CustomGroupingOperation(group), new CustomProjectionOperation(project2), transformYearlyGrouping(filter).andInclude(Keys.TOTAL_TENDERS, Keys.TOTAL_TENDERS_WITH_TWO_OR_MORE_TENDERERS, Keys.PERCENT_TENDERS), getSortByYearMonth(filter), skip(filter.getSkip()), limit(filter.getPageSize()) ); AggregationResults<DBObject> results = mongoTemplate.aggregate(agg, "release", DBObject.class); List<DBObject> list = results.getMappedResults(); return list; } @ApiOperation("Percent of awarded tenders with >1 tenderer/bidder" + "Count of tenders with numberOfTenderers >1 divided by total count of tenders with numberOfTenderers >0" + "This endpoint uses tender.tenderPeriod.startDate to calculate the tender year.") @RequestMapping(value = "/api/percentTendersAwardedWithTwoOrMoreTenderers", method = { RequestMethod.POST, RequestMethod.GET }, produces = "application/json") public List<DBObject> percentTendersAwarded(@ModelAttribute @Valid final YearFilterPagingRequest filter) { DBObject project1 = new BasicDBObject(); addYearlyMonthlyProjection(filter, project1, MongoConstants.FieldNames.TENDER_PERIOD_START_DATE_REF); project1.put("tender.numberOfTenderers", 1); DBObject group = new BasicDBObject(); addYearlyMonthlyReferenceToGroup(filter, group); group.put(Keys.TOTAL_TENDERS_WITH_ONE_OR_MORE_TENDERERS, new BasicDBObject("$sum", new BasicDBObject("$cond", Arrays.asList(new BasicDBObject("$gt", Arrays.asList("$tender.numberOfTenderers", 0)), 1, 0)))); group.put(Keys.TOTAL_TENDERS_WITH_TWO_OR_MORE_TENDERERS, new BasicDBObject("$sum", new BasicDBObject("$cond", Arrays.asList(new BasicDBObject("$gt", Arrays.asList("$tender.numberOfTenderers", 1)), 1, 0)))); DBObject project2 = new BasicDBObject(); project2.put(Keys.TOTAL_TENDERS_WITH_ONE_OR_MORE_TENDERERS, 1); project2.put(Keys.TOTAL_TENDERS_WITH_TWO_OR_MORE_TENDERERS, 1); project2.put(Keys.PERCENT_TENDERS, new BasicDBObject("$multiply", Arrays.asList(new BasicDBObject("$divide", Arrays.asList("$totalTendersWithTwoOrMoreTenderers", "$totalTendersWithOneOrMoreTenderers")), 100))); Aggregation agg = newAggregation( match(where(MongoConstants.FieldNames.TENDER_PERIOD_START_DATE) .exists(true).and("tender.numberOfTenderers").gt(0) .andOperator(getYearDefaultFilterCriteria(filter, MongoConstants.FieldNames.TENDER_PERIOD_START_DATE))), new CustomProjectionOperation(project1), new CustomGroupingOperation(group), new CustomProjectionOperation(project2), transformYearlyGrouping(filter).andInclude(Keys.TOTAL_TENDERS_WITH_ONE_OR_MORE_TENDERERS, Keys.TOTAL_TENDERS_WITH_TWO_OR_MORE_TENDERERS, Keys.PERCENT_TENDERS), getSortByYearMonth(filter), skip(filter.getSkip()), limit(filter.getPageSize()) ); AggregationResults<DBObject> results = mongoTemplate.aggregate(agg, "release", DBObject.class); List<DBObject> list = results.getMappedResults(); return list; } @ApiOperation("Returns the percent of tenders with active awards, " + "with tender.submissionMethod='electronicSubmission'." + "The endpoint also returns the total tenderds with active awards and the count of tenders with " + "tender.submissionMethod='electronicSubmission") @RequestMapping(value = "/api/percentTendersUsingEBid", method = { RequestMethod.POST, RequestMethod.GET }, produces = "application/json") public List<DBObject> percentTendersUsingEBid(@ModelAttribute @Valid final YearFilterPagingRequest filter) { DBObject project1 = new BasicDBObject(); addYearlyMonthlyProjection(filter, project1, MongoConstants.FieldNames.TENDER_PERIOD_START_DATE_REF); project1.put(Fields.UNDERSCORE_ID, "$tender._id"); project1.put("electronicSubmission", new BasicDBObject("$cond", Arrays.asList(new BasicDBObject("$eq", Arrays.asList("$tender.submissionMethod", Tender.SubmissionMethod.electronicSubmission.toString())), 1, 0))); DBObject group1 = new BasicDBObject(); group1.put(Fields.UNDERSCORE_ID, Fields.UNDERSCORE_ID_REF); addYearlyMonthlyGroupingOperationFirst(filter, group1); group1.put("electronicSubmission", new BasicDBObject("$max", "$electronicSubmission")); DBObject group2 = new BasicDBObject(); addYearlyMonthlyReferenceToGroup(filter, group2); group2.put(Keys.TOTAL_TENDERS, new BasicDBObject("$sum", 1)); group2.put(Keys.TOTAL_TENDERS_USING_EBID, new BasicDBObject("$sum", "$electronicSubmission")); DBObject project2 = new BasicDBObject(); project2.put(Keys.TOTAL_TENDERS, 1); project2.put(Keys.TOTAL_TENDERS_USING_EBID, 1); project2.put(Keys.PERCENTAGE_TENDERS_USING_EBID, new BasicDBObject("$multiply", Arrays .asList(new BasicDBObject("$divide", Arrays.asList("$totalTendersUsingEbid", "$totalTenders")), 100))); Aggregation agg = newAggregation( match(where(MongoConstants.FieldNames.TENDER_PERIOD_START_DATE).exists(true) .and("tender.submissionMethod.0").exists(true). and("awards.status").is("active") .andOperator(getYearDefaultFilterCriteria(filter, MongoConstants.FieldNames.TENDER_PERIOD_START_DATE))), unwind("$tender.submissionMethod"), new CustomProjectionOperation(project1), new CustomGroupingOperation(group1), new CustomGroupingOperation(group2), new CustomProjectionOperation(project2), transformYearlyGrouping(filter).andInclude(Keys.TOTAL_TENDERS, Keys.TOTAL_TENDERS_USING_EBID, Keys.PERCENTAGE_TENDERS_USING_EBID), getSortByYearMonth(filter), skip(filter.getSkip()), limit(filter.getPageSize()) ); AggregationResults<DBObject> results = mongoTemplate.aggregate(agg, "release", DBObject.class); List<DBObject> list = results.getMappedResults(); return list; } @ApiOperation("Returns the percent of tenders that are using eProcurement." + " This is read from tender.publicationMethod='eGP'") @RequestMapping(value = "/api/percentTendersUsingEgp", method = { RequestMethod.POST, RequestMethod.GET }, produces = "application/json") public List<DBObject> percentTendersUsingEgp(@ModelAttribute @Valid final YearFilterPagingRequest filter) { DBObject project1 = new BasicDBObject(); addYearlyMonthlyProjection(filter, project1, "$tender.tenderPeriod.startDate"); project1.put("tender.publicationMethod", 1); DBObject group = new BasicDBObject(); addYearlyMonthlyReferenceToGroup(filter, group); group.put("totalTenders", new BasicDBObject("$sum", 1)); group.put(Keys.TOTAL_EGP, new BasicDBObject("$sum", new BasicDBObject("$cond", Arrays.asList(new BasicDBObject("$eq", Arrays.asList("$tender.publicationMethod", "eGP")), 1, 0)))); DBObject project2 = new BasicDBObject(); project2.put("totalTenders", 1); project2.put(Keys.TOTAL_EGP, 1); project2.put("year", 1); project2.put("month", 1); project2.put("percentEgp", new BasicDBObject("$multiply", Arrays.asList(new BasicDBObject("$divide", Arrays.asList("$totalEgp", "$totalTenders")), 100))); Aggregation agg = newAggregation( match(where("tender.tenderPeriod.startDate").exists(true) .andOperator(getYearDefaultFilterCriteria(filter, "tender.tenderPeriod.startDate"))), new CustomProjectionOperation(project1), new CustomGroupingOperation(group), transformYearlyGrouping(filter).andInclude("totalTenders", "percentEgp", Keys.TOTAL_EGP), new CustomProjectionOperation(project2), getSortByYearMonth(filter), skip(filter.getSkip()), limit(filter.getPageSize())); AggregationResults<DBObject> results = mongoTemplate.aggregate(agg, "release", DBObject.class); List<DBObject> list = results.getMappedResults(); return list; } @ApiOperation("Percentage of tenders that are associated in releases that " + "have the planning.budget.amount non empty," + "meaning there really is a planning entity correlated with the tender entity." + "This endpoint uses tender.tenderPeriod.startDate to calculate the tender year.") @RequestMapping(value = "/api/percentTendersWithLinkedProcurementPlan", method = { RequestMethod.POST, RequestMethod.GET }, produces = "application/json") public List<DBObject> percentTendersWithLinkedProcurementPlan(@ModelAttribute @Valid final YearFilterPagingRequest filter) { DBObject project1 = new BasicDBObject(); addYearlyMonthlyProjection(filter, project1, "$tender.tenderPeriod.startDate"); project1.put("tender._id", 1); project1.put("planning.budget.amount", 1); DBObject group = new BasicDBObject(); addYearlyMonthlyReferenceToGroup(filter, group); group.put(Keys.TOTAL_TENDERS, new BasicDBObject("$sum", 1)); group.put(Keys.TOTAL_TENDERS_WITH_LINKED_PROCUREMENT_PLAN, new BasicDBObject("$sum", new BasicDBObject("$cond", Arrays.asList(new BasicDBObject("$gt", Arrays.asList("$planning.budget.amount", null)), 1, 0)))); DBObject project2 = new BasicDBObject(); project2.put(Keys.YEAR, 1); project2.put("month", 1); project2.put(Keys.TOTAL_TENDERS, 1); project2.put(Keys.TOTAL_TENDERS_WITH_LINKED_PROCUREMENT_PLAN, 1); project2.put(Keys.PERCENT_TENDERS, new BasicDBObject("$multiply", Arrays.asList(new BasicDBObject("$divide", Arrays.asList( "$" + Keys.TOTAL_TENDERS_WITH_LINKED_PROCUREMENT_PLAN, "$" + Keys.TOTAL_TENDERS)), 100))); Aggregation agg = newAggregation( match(where("tender.tenderPeriod.startDate").exists(true) .andOperator(getYearDefaultFilterCriteria(filter, "tender.tenderPeriod.startDate"))), new CustomProjectionOperation(project1), new CustomGroupingOperation(group), transformYearlyGrouping(filter).andInclude(Keys.TOTAL_TENDERS, Keys.TOTAL_TENDERS_WITH_LINKED_PROCUREMENT_PLAN, Keys.PERCENT_TENDERS), new CustomProjectionOperation(project2), getSortByYearMonth(filter), skip(filter.getSkip()), limit(filter.getPageSize()) ); AggregationResults<DBObject> results = mongoTemplate.aggregate(agg, "release", DBObject.class); List<DBObject> list = results.getMappedResults(); return list; } @ApiOperation("For all tenders that have both tender.tenderPeriod.startDate and planning.bidPlanProjectDateApprove" + "calculates the number o days from planning.bidPlanProjectDateApprove to tender.tenderPeriod.startDate" + "and creates the average. Groups results by tender year, calculatedfrom tender.tenderPeriod.startDate") @RequestMapping(value = "/api/avgTimeFromPlanToTenderPhase", method = { RequestMethod.POST, RequestMethod.GET }, produces = "application/json") public List<DBObject> avgTimeFromPlanToTenderPhase(@ModelAttribute @Valid final YearFilterPagingRequest filter) { DBObject timeFromPlanToTenderPhase = new BasicDBObject("$divide", Arrays.asList( new BasicDBObject("$subtract", Arrays.asList("$tender.tenderPeriod.startDate", "$planning.bidPlanProjectDateApprove")), MongoConstants.DAY_MS)); DBObject project1 = new BasicDBObject(); addYearlyMonthlyProjection(filter, project1, "$tender.tenderPeriod.startDate"); project1.put("timeFromPlanToTenderPhase", timeFromPlanToTenderPhase); Aggregation agg = newAggregation( match(where("tender.tenderPeriod.startDate").exists(true).and("planning.budget.amount").exists(true) .and("planning.bidPlanProjectDateApprove").exists(true) .andOperator(getYearDefaultFilterCriteria(filter, "tender.tenderPeriod.startDate"))), new CustomProjectionOperation(project1), getYearlyMonthlyGroupingOperation(filter).avg("timeFromPlanToTenderPhase") .as(Keys.AVG_TIME_FROM_PLAN_TO_TENDER_PHASE), transformYearlyGrouping(filter).andInclude(Keys.AVG_TIME_FROM_PLAN_TO_TENDER_PHASE), getSortByYearMonth(filter), skip(filter.getSkip()), limit(filter.getPageSize())); AggregationResults<DBObject> results = mongoTemplate.aggregate(agg, "release", DBObject.class); List<DBObject> list = results.getMappedResults(); return list; } }
def initialize_error_reporting() -> None: import sentry_sdk from sentry_sdk import configure_scope from sentry_sdk.integrations.atexit import AtexitIntegration from sentry_sdk.integrations.dedupe import DedupeIntegration from sentry_sdk.integrations.excepthook import ExcepthookIntegration key = sentry_write_key() if not key: return telemetry_id = get_telemetry_id() sentry_sdk.init( f"https://{key}.ingest.sentry.io/2801673", before_send=before_send, integrations=[ ExcepthookIntegration(), DedupeIntegration(), AtexitIntegration(lambda _, __: None), ], send_default_pii=False, server_name=telemetry_id or "UNKNOWN", ignore_errors=[ KeyboardInterrupt, MemoryError, NotImplementedError, asyncio.CancelledError, RasaException, OSError, ], in_app_include=["rasa"], with_locals=False, release=f"rasa-{rasa.__version__}", default_integrations=False, environment="development" if in_continuous_integration() else "production", ) if not telemetry_id: return with configure_scope() as scope: if hasattr(scope, "set_user"): scope.set_user({"id": telemetry_id}) default_context = _default_context_fields() if hasattr(scope, "set_context"): if "os" in default_context: scope.set_context("Operating System", default_context.pop("os")) scope.set_context("Environment", default_context)
Hoshangabad: No son or daughter of the state should remain deprived of education. This is what the government and the chief minister wants. Vidya Bharati is doing commendable work in the field of education. Besides lighting the lamp of knowledge, it is also inculcating moral values in the students. Those passing out from institutions run by Vidya Bharati will make a mark in the world. This was stated by Minister for Food Processing and Horticulture and minister in-charge of the district at the concluding function of a two-day Maths workshop organised at Govindnagar in the Bankhedi block of the district by Vidya Bharati. He also dedicated the new Maths laboratory of the Saraswati Gramodaya School to the students. The laboratory will be used to introduce the students to complex mathematical concepts through models made from waste material. The minister said that he would try to ensure that similar laboratories come up in other schools of the state. Director general of MAPCOST Dr Navin Chandra said that ancient India had an advanced understanding of mathematical concepts. The decimal system and zero are India’s invaluable gifts to the world. He said that our ancient sages knew that the earth goes round the sun and the moon revolves around the earth. They had discovered the means for measuring the distance between planets. He said that software designed by Indian engineers commands the highest price in the global market. The DG said mathematics is the mother of all sciences. Dr P. Satyanarain, master trainer in mathematics from Andhra Pradesh said that mathematics is an interesting subject provided one is able to grasp its basic concepts. Hitanand Sharma, Madhya Bharat in-charge of Vidya Bharati said that the organisation is running 12,000 schools in the country.
def multipass_render(template, variables, environment): this_template = template for i in range(1, RENDER_PASS_LIMIT + 1): logging.debug(f"Performing render pass {i}...") this_template_rendered = this_template.render(variables) if "{{" not in this_template_rendered: return this_template_rendered this_template = environment.from_string(this_template_rendered) raise RuntimeError(f"Variables not eliminated from template after {RENDER_PASS_LIMIT} passes." f" Please verify that template variables references to not recurse.")
<gh_stars>1-10 /* * This work was created by participants in the DataONE project, and is * jointly copyrighted by participating institutions in DataONE. For * more information on DataONE, see our web site at http://dataone.org. * * Copyright 2019. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.dataone.bookkeeper.jdbi; import org.dataone.bookkeeper.api.Quota; import org.jdbi.v3.sqlobject.config.RegisterBeanMapper; import org.jdbi.v3.sqlobject.customizer.Bind; import org.jdbi.v3.sqlobject.customizer.BindBean; import org.jdbi.v3.sqlobject.customizer.BindList; import org.jdbi.v3.sqlobject.statement.GetGeneratedKeys; import org.jdbi.v3.sqlobject.statement.SqlQuery; import org.jdbi.v3.sqlobject.statement.SqlUpdate; import java.util.List; /** * The quota data access interfaces used to create, read, update, and delete * quotas from the database */ @RegisterBeanMapper(Quota.class) public interface QuotaStore { /** The query used to find all quotas */ String SELECT_CLAUSE = "SELECT " + "q.id, " + "q.object, " + "q.quotaType, " + "q.softLimit, " + "q.hardLimit, " + "q.totalUsage, " + "q.unit, " + "q.orderId, " + "q.subject, " + "q.name " + "FROM quotas q "; /** The full ordered query */ String SELECT_ALL = SELECT_CLAUSE; /** The query used to find unassigned quotas (i.e. generic product quotas */ String SELECT_UNASSIGNED = SELECT_CLAUSE + "WHERE orderId IS NULL "; /** The query used to find an individual quota */ String SELECT_ONE = SELECT_CLAUSE + "WHERE q.id = :id "; /** The query used to find quotas by name */ String SELECT_BY_TYPE = SELECT_CLAUSE + "WHERE q.quotaType = :quotaType"; /** The query used to find a quota by order identifier */ String SELECT_ORDER = SELECT_CLAUSE + "WHERE q.orderId = :orderId "; /** The query used to find a quota by subject identifier */ String SELECT_OWNER = SELECT_CLAUSE + "WHERE q.subject = :subject "; /** The query used to find quotas by multiple subject identifiers */ String SELECT_OWNERS = SELECT_CLAUSE + "WHERE q.subject IN (<subjects>) "; /** The query used to find quotas by type and multiple subject identifiers */ String SELECT_BY_NAME_AND_OWNERS = SELECT_CLAUSE + "WHERE q.quotaType = :quotaType AND q.subject IN (<subjects>) "; /** * List all quotas * @return quotas the list of quotas */ @SqlQuery(SELECT_ALL) List<Quota> listQuotas(); /** * List all unassigned quotas (no orderId) * @return quotas the list of unassigned quotas */ @SqlQuery(SELECT_UNASSIGNED) List<Quota> listUnassignedQuotas(); /** * Find quotas by quota identifier * @param id the quota identifier * @return quota the quota for the identifier */ @SqlQuery(SELECT_ONE) Quota getQuota(@Bind("id") Integer id); /** * Find quotas by order identifier. * * Pass a null orderId to list all product-associated quotas (i.e. not bound to an order). * @param orderId the order identifier * @return quotas the quotas for the orderId */ @SqlQuery(SELECT_ORDER) List<Quota> findQuotasByOrderId(@Bind("orderId") Integer orderId); /** * Find quotas by quota type * * @param quotaType quota type * @return quotas the list of quotas for the quota type */ @SqlQuery(SELECT_BY_TYPE) List<Quota> findQuotasByType(@Bind("quotaType") String quotaType); /** * Find quotas by subject identifier * * @param subject the subject identifier (such as an ORCID identifier) * @return quotas the list of quotas for the subject */ @SqlQuery(SELECT_OWNER) List<Quota> findQuotasBySubject(@Bind("subject") String subject); /** * Find quotas by a list of subject identifiers * * @param subjects the subject identifiers list (such as an ORCID identifier) * @return quotas the list of quotas for the subject */ @SqlQuery(SELECT_OWNERS) List<Quota> findQuotasBySubjects(@BindList("subjects") List<String> subjects); /** * Find quotas by a quota type and subjects * * @param quotaType the quota name (e.g. "portal", "storage") * @param subjects the subject identifiers (such as an ORCID identifier) * @return quotas the list of quotas for the subjects and names */ @SqlQuery(SELECT_BY_NAME_AND_OWNERS) List<Quota> findQuotasByNameAndSubjects(@Bind("quotaType") String quotaType, @BindList("subjects") List<String> subjects); /** * Insert a quota with a given Quota instance * @param quota the quota to insert */ @SqlUpdate("INSERT INTO quotas " + "(object, " + "quotaType, " + "softLimit, " + "hardLimit, " + "unit, " + "orderId, " + "subject, " + "name) " + "VALUES " + "(:object, " + ":quotaType, " + ":softLimit, " + ":hardLimit, " + ":unit, " + ":orderId, " + ":subject, " + ":name) " + "RETURNING id") @GetGeneratedKeys Integer insert(@BindBean Quota quota); /** * Update a quota for a given id, but don't update totalUsage since they are handled by triggers * @param quota the quota to update */ @SqlUpdate("UPDATE quotas " + "SET object = :object, " + "quotaType = :quotaType, " + "softLimit = :softLimit, " + "hardLimit = :hardLimit, " + "unit = :unit, " + "orderId = :orderId, " + "subject = :subject, " + "name = :name " + "WHERE id = :id ") @GetGeneratedKeys Quota update(@BindBean Quota quota); /** * Delete a quota given the quota id * @param id the quota to delete */ @SqlUpdate("DELETE FROM quotas WHERE id = :id") void delete(@Bind("id") Integer id); }
def define_categories(self, files) -> None: for file in filter(lambda f: f.is_documentation_page(), files): with open(file.abs_src_path, encoding='utf-8') as handle: source, meta_data = meta.get_data(handle.read()) if len(meta_data) <= 0 or 'categories' not in meta_data: continue if not isinstance(meta_data['categories'], list): self.log.error( 'The categories object at %s was not a list, but %s', str(file.url), type(meta_data['categories'].__name__) ) continue for category in meta_data['categories']: self.register_page( str(category), str(file.url), get_page_title(source, meta_data) )
<reponame>aslaksm/veilarbportefoljeflatefs import {useRef} from 'react'; export function useTimer(): { startTimer: () => void; stoppTimer: () => number; } { const ref = useRef<number>(-1); function start() { ref.current = Date.now(); } function stopp() { if (ref.current === -1) { return -1; } const ret = Date.now() - ref.current; ref.current = -1; return ret; } return {startTimer: start, stoppTimer: stopp}; }
<reponame>jdh8/metallic<filename>src/soft/float/floatunsitf.c #include "../../math/reinterpret.h" #include <stdint.h> long double __floatunsitf(uint32_t a) { if (!a) return 0; uint64_t high = (reinterpret(uint64_t, (double)a) >> 4) + 0x3C00000000000000; return reinterpret(long double, (unsigned __int128)high << 64); }
<gh_stars>0 package com.icommerce.product.restapi; public interface VersionController { String getVersion(); }
/** * Boundary Top Left Pixel Number (ST0903 VTarget Pack Tag 2). * * <p>From ST0903: * * <blockquote> * * Specifies the position of the top left corner of the target bounding box within a frame as a * pixel number. Numbering commences with 1, at the top left pixel, and proceeds from left to right, * top to bottom. The calculation of the pixel number uses the equation: Column + ((Row-1) x Frame * Width)). The top left pixel of the frame equates to (Column, Row) = (1, 1) and pixel number 1. * The Frame Width comes from VMTI LS Tag 8, if present. If it is not present, then the Frame Width * comes from the underlying Motion Imagery. In the absence of underlying Motion Imagery, VMTI LS * Tag 8 needs to be present. * * <p>It is important for bit efficiency to rely on variable length payloads for this value. * * </blockquote> * * This item is also used for ST0903 VTrackItem Item 5. The corresponding frame width and frame * height are given in VTrackItem Item 20 and Item 21. */ public class BoundaryTopLeft extends PixelNumber implements IVmtiMetadataValue, IVTrackItemMetadataValue { /** * Create from value. * * @param num the bounding box top left pixel number */ public BoundaryTopLeft(long num) { super(num); } /** * Create from encoded bytes. * * @param bytes Encoded byte array */ public BoundaryTopLeft(byte[] bytes) { super(bytes); } @Override public final String getDisplayName() { return "Boundary Top Left"; } }
As rumored this week, chip designer and supplier Avago is buying chip designer Broadcom in a cash-and-stock deal worth $37bn (£24bn). Broadcom is probably best known to Reg readers as the biz behind the BCM system-on-chips found in the Raspberry Pi and various other gizmos. The combined biz will rename itself Broadcom Ltd as it continues its consolidation depredations in the semiconductor industry. Avago has headquarters in San Jose, California, and Singapore. Broadcom is based in Irvine, California. Ironically, Avago earlier bought Emulex which Broadcom tried to buy in a hotly contested hostile takeover in 2009. That deal failed, and with Emulex never living up to its hyped management expectations, it fell into Avago’s hands in February this year. Now it is being brought together with Broadcom but inside Avago. The combined Avago-Broadcom company is valued at a massive $77bn (£50bn), and says it is the world’s leading diversified communications semiconductor company. It will certainly overtake Texas Instruments with its $58.2bn (£38bn) capitalization. The acquisition is being bankrolled by a combination of Avago shares plus $17bn in cash including $9bn in new debt financing from a consortium of banks. Avago’s basic history and acquisitions: 1961: HP starts up a semiconductor products division. 1999: This division separates out as Agilent Technologies. Oct 2005: Private equity groups KKR and Silver Lake Partners buy Agilent for $2.6bn, and form Avago Technologies.. Oct 2008: Avago buys Infineon Technologies' bulk acoustic wave business for $23m. Apr 2013: Avago buys CyOptics for $400m. Dec 2013: Avago buys LSI for $6.6bn. Aug 2014: Avago claims it’s 9th largest semiconductor company. Feb 2015: Avago gobbles Emulex for $606m. May 2015: Avago gobbles Broadcom for $37bn. Henry Nicholas, a cofounder and ex-CEO of Broadcom, said in a canned quote: “In Avago, we have found a culture and a management team that embody the best of the philosophies on which Broadcom was founded, together with a fast-paced, no-nonsense, process-driven business culture that we need to take our combined company to the next level. I am confident that, under the visionary leadership of Hock Tan, the combined company will realise its potential to be the world’s greatest semiconductor company.” To be the world’s greatest semiconductor company? Is this just a newly enriched cofounder’s bombast, or does he mean it, bearing in mind that Intel exists with a market capitalization of $161.3bn? Acquired Broadcom’s cofounder, chairman and CTO Henry Samueli becomes CTO for the combined company, and will join the Broadcom Ltd board. Nicholas gets a strategic advisory role, reporting to Broadcom boss Hock Tan who becomes CEO of the whole show. Separately, Avago reported its fiscal results for the second quarter of 2015: revenue was $1.6bn, a decrease of 1 per cent from $1.64bn in the previous quarter and an increase of 130 per cent from $701m in the same quarter last year. Net income was $344m, down from the $351m from the prior quarter, and more than double the $158m a year ago. Broadcom Ltd’s management expects to save $750m in costs in the coming 18 months. The combined company will be, Stifel MD Aaron Rakers says, an end-to-end provider of Ethernet connectivity chips and products, and a vertically integrated supplier of Fibre Channel HBAs and ASICs. This implies competition for QLogic, the other HBA and storage CNA supplier, especially as Avago supplies QLogic with its Fibre Channel ASICs through a long-term agreement, from its acquired LSI business. QLogic bought some Ethernet technology and blueprints from Broadcom for $147m in February last year. A part of that deal was that its Ethernet ASICs are bought exclusively from Broadcom, now Avago. Q now thinks it has a 26 per cent share of the $800m combined 10GbitE adapter and ASIC market, with Intel having around 50 per cent, Avago-Emulex at 13 per cent, and Mellanox at 3 per cent. It seems to El Reg that QLogic is now strategically threatened and could look for a white knight – such as Intel, perchance? Avago does not make general-purpose processors, but Broadcom does: it has a host of ARM-compatible system-on-chips. We may see Intel consider making acquisition moves to strengthen its position in the low-end, embedded world. There could also be consolidation elsewhere in the semiconductor industry as smaller players combine to strengthen their position versus Avago-Broadcom. The deal should close by the end of March 2016. It is subject to regulatory approvals, of course. ®
#include "roConstantBuffer.h" roConstantBuffer::roConstantBuffer(UINT bufferSize, UINT elementSize) : roGPUBuffer() { roGPUBuffer::CreateConstantBuffer(bufferSize, elementSize); } void roConstantBuffer::UploadData(int index, UINT numElements, const void* data) { roGPUBuffer::CopyToGPU(index, numElements, data); } D3D12_GPU_VIRTUAL_ADDRESS roConstantBuffer::GetView(UINT offset) { return roGPUBuffer::GetCBufferAddressOffset(offset); } void roConstantBuffer::Destroy() { roGPUBuffer::DiscardResource(); } roConstantBuffer::~roConstantBuffer(){} /////////////////////////////////////////////////////////////////////////////// roConstantBufferAsRing::~roConstantBufferAsRing() { } roConstantBufferAsRing::roConstantBufferAsRing(UINT64 bufferSize) : roGPUBuffer() { roGPUBuffer::CreateConstantBufferAsRing(bufferSize); } void roConstantBufferAsRing::UploadData(UINT64 offset, void* data, UINT memsize) { roGPUBuffer::CopyToGPUAsRing(offset, data, memsize); } D3D12_GPU_VIRTUAL_ADDRESS roConstantBufferAsRing::GetView(UINT64 offset) { return roGPUBuffer::GetCBufferAddressOffsetAsRing(offset); } void roConstantBufferAsRing::Destroy() { roGPUBuffer::DiscardResource(); }
<filename>src/main/java/com/helger/event/dispatch/async/IAsynchronousEventDispatcher.java /** * Copyright (C) 2015-2020 <NAME> (www.helger.com) * philip[at]helger[dot]com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.helger.event.dispatch.async; import java.util.function.Consumer; import javax.annotation.Nonnull; import com.helger.event.IEvent; import com.helger.event.dispatch.IEventDispatcher; import com.helger.event.observerqueue.IEventObserverQueue; /** * Dispatch events asynchronously. * * @author <NAME> */ @FunctionalInterface public interface IAsynchronousEventDispatcher extends IEventDispatcher { /** * Dispatch an event to a number of observers in a synchronized way. * * @param aEvent * The event to be dispatched. May not be <code>null</code>. * @param aObservers * The list of available observers. They need to be queried whether * they are interested in the event. May not be <code>null</code>. * @param aOverallResultConsumer * The callback to be called once all results are present. May not be * <code>null</code>. Must be called even if all event handlers are * "void" handlers. */ void dispatch (@Nonnull final IEvent aEvent, @Nonnull final IEventObserverQueue aObservers, @Nonnull final Consumer <Object> aOverallResultConsumer); }
/** Copyright 2020 RWTH Aachen University. All rights reserved. * * Licensed under the RWTH ASR License (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.hltpr.rwth-aachen.de/rwth-asr/rwth-asr-license.html * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <Mm/Types.hh> #include <Nn/BufferedAlignedFeatureProcessor.hh> #include <Test/UnitTest.hh> class BufferedAlignedFeatureProcessor : public Nn::BufferedAlignedFeatureProcessor<f32> { public: typedef Nn::BufferedFeatureExtractor<f32> PrePrecursor; typedef Nn::BufferedAlignedFeatureProcessor<f32> Precursor; using Precursor::acousticModelNeedInit_; using Precursor::alignmentBuffer_; using Precursor::alignmentWeightsBuffer_; using Precursor::classLabelWrapper_; using Precursor::generateMiniBatch; using Precursor::initBuffer; using Precursor::processAlignedFeature; using PrePrecursor::batchSize_; using PrePrecursor::featureBuffer_; using PrePrecursor::nBufferedFeatures_; using PrePrecursor::segmentIndexBuffer_; public: BufferedAlignedFeatureProcessor(const Core::Configuration& config) : Core::Component(config), Precursor(config, false) {} }; class TestBufferedAlignedFeatureProcessor : public Test::ConfigurableFixture { public: BufferedAlignedFeatureProcessor* processor_; u32 nClasses_; Core::Ref<const Speech::Feature> feature1_; Core::Ref<const Speech::Feature> feature2_; Core::Ref<const Speech::Feature> feature3_; void setUp(); void tearDown(); }; void TestBufferedAlignedFeatureProcessor::setUp() { setParameter("*.buffer-size", "3"); setParameter("*.on-error", "ignore"); setParameter("*.buffer-type", "batch"); setParameter("*.channel", "nil"); setParameter("*.disregard-classes", "5"); setParameter("*.shuffle", "false"); nClasses_ = 10; processor_ = new BufferedAlignedFeatureProcessor(config); processor_->acousticModelNeedInit_ = false; processor_->classLabelWrapper_ = new Nn::ClassLabelWrapper(config, nClasses_); // input Flow::Vector<Mm::FeatureType>* vector1 = new Flow::Vector<Mm::FeatureType>(2); vector1->at(0) = 1.0; vector1->at(1) = 2.0; Flow::DataPtr<Flow::Vector<Mm::FeatureType>> dptr1(vector1); Core::Ref<const Speech::Feature> feature1(new Speech::Feature(dptr1)); feature1_ = feature1; Flow::Vector<Mm::FeatureType>* vector2 = new Flow::Vector<Mm::FeatureType>(2); vector2->at(0) = 3.0; vector2->at(1) = 4.0; Flow::DataPtr<Flow::Vector<Mm::FeatureType>> dptr2(vector2); Core::Ref<const Speech::Feature> feature2(new Speech::Feature(dptr2)); feature2_ = feature2; Flow::Vector<Mm::FeatureType>* vector3 = new Flow::Vector<Mm::FeatureType>(2); vector3->at(0) = 5.0; vector3->at(1) = 6.0; Flow::DataPtr<Flow::Vector<Mm::FeatureType>> dptr3(vector3); Core::Ref<const Speech::Feature> feature3(new Speech::Feature(dptr3)); feature3_ = feature3; } void TestBufferedAlignedFeatureProcessor::tearDown() { delete processor_; } TEST_F(Test, TestBufferedAlignedFeatureProcessor, buffer) { processor_->processAlignedFeature(feature1_, 3); EXPECT_EQ(1u, processor_->nBufferedFeatures_); EXPECT_EQ(0u, processor_->segmentIndexBuffer_.at(0)); processor_->processAlignedFeature(feature2_, 1); EXPECT_EQ(2u, processor_->nBufferedFeatures_); EXPECT_EQ(0u, processor_->segmentIndexBuffer_.at(1)); processor_->processAlignedFeature(feature3_, 9); EXPECT_EQ(3u, processor_->nBufferedFeatures_); EXPECT_EQ(0u, processor_->segmentIndexBuffer_.at(2)); EXPECT_EQ(processor_->classLabelWrapper_->getOutputIndexFromClassIndex(3), processor_->alignmentBuffer_.at(0)); EXPECT_EQ(processor_->classLabelWrapper_->getOutputIndexFromClassIndex(1), processor_->alignmentBuffer_.at(1)); EXPECT_EQ(processor_->classLabelWrapper_->getOutputIndexFromClassIndex(9), processor_->alignmentBuffer_.at(2)); f32 val = 1.0f; for (u32 i = 0; i < 3; i++) { for (u32 j = 0; j < 2; j++) { EXPECT_EQ(val, processor_->featureBuffer_.at(0).at(j, i)); val = val + 1.0f; } } } // with alignment weights class TestBufferedAlignedFeatureProcessorWithWeights : public Test::ConfigurableFixture { public: BufferedAlignedFeatureProcessor* trainer_; u32 nClasses_; Core::Ref<const Speech::Feature> feature1_; Core::Ref<const Speech::Feature> feature2_; Core::Ref<const Speech::Feature> feature3_; Mm::Weight w1_, w2_, w3_; // weights from alignment virtual void setUp(); void tearDown(); }; void TestBufferedAlignedFeatureProcessorWithWeights::setUp() { setParameter("*.buffer-size", "3"); setParameter("*.on-error", "ignore"); setParameter("*.buffer-type", "batch"); setParameter("*.channel", "nil"); setParameter("*.disregard-classes", "5"); setParameter("*.shuffle", "false"); setParameter("*.weighted-alignment", "true"); // default = false nClasses_ = 10; trainer_ = new BufferedAlignedFeatureProcessor(config); trainer_->acousticModelNeedInit_ = false; trainer_->classLabelWrapper_ = new Nn::ClassLabelWrapper(config, nClasses_); // input Flow::Vector<Mm::FeatureType>* vector1 = new Flow::Vector<Mm::FeatureType>(2); vector1->at(0) = 1.0; vector1->at(1) = 2.0; Flow::DataPtr<Flow::Vector<Mm::FeatureType>> dptr1(vector1); Core::Ref<const Speech::Feature> feature1(new Speech::Feature(dptr1)); feature1_ = feature1; w1_ = 1.0; Flow::Vector<Mm::FeatureType>* vector2 = new Flow::Vector<Mm::FeatureType>(2); vector2->at(0) = 3.0; vector2->at(1) = 4.0; Flow::DataPtr<Flow::Vector<Mm::FeatureType>> dptr2(vector2); Core::Ref<const Speech::Feature> feature2(new Speech::Feature(dptr2)); feature2_ = feature2; w2_ = 0.31415; Flow::Vector<Mm::FeatureType>* vector3 = new Flow::Vector<Mm::FeatureType>(2); vector3->at(0) = 5.0; vector3->at(1) = 6.0; Flow::DataPtr<Flow::Vector<Mm::FeatureType>> dptr3(vector3); Core::Ref<const Speech::Feature> feature3(new Speech::Feature(dptr3)); feature3_ = feature3; w3_ = 0.5; } void TestBufferedAlignedFeatureProcessorWithWeights::tearDown() { delete trainer_; } TEST_F(Test, TestBufferedAlignedFeatureProcessorWithWeights, buffer) { trainer_->processAlignedFeature(feature1_, 3, w1_); EXPECT_EQ(1u, trainer_->nBufferedFeatures_); EXPECT_EQ(0u, trainer_->segmentIndexBuffer_.at(0)); EXPECT_EQ(w1_, trainer_->alignmentWeightsBuffer_.at(0)); trainer_->processAlignedFeature(feature2_, 1, w2_); EXPECT_EQ(2u, trainer_->nBufferedFeatures_); EXPECT_EQ(0u, trainer_->segmentIndexBuffer_.at(1)); EXPECT_EQ(w2_, trainer_->alignmentWeightsBuffer_.at(1)); trainer_->processAlignedFeature(feature3_, 5, w3_); EXPECT_EQ(2u, trainer_->nBufferedFeatures_); // 2 because 5 is in disregard classes and is therefore dropped trainer_->processAlignedFeature(feature3_, 9, w3_); EXPECT_EQ(0u, trainer_->segmentIndexBuffer_.at(2)); EXPECT_EQ(w3_, trainer_->alignmentWeightsBuffer_.at(2)); EXPECT_EQ(trainer_->classLabelWrapper_->getOutputIndexFromClassIndex(3), trainer_->alignmentBuffer_.at(0)); EXPECT_EQ(trainer_->classLabelWrapper_->getOutputIndexFromClassIndex(1), trainer_->alignmentBuffer_.at(1)); EXPECT_EQ(trainer_->classLabelWrapper_->getOutputIndexFromClassIndex(9), trainer_->alignmentBuffer_.at(2)); f32 val = 1.0f; for (u32 i = 0; i < 3; i++) { for (u32 j = 0; j < 2; j++) { EXPECT_EQ(val, trainer_->featureBuffer_.at(0).at(j, i)); val = val + 1.0f; } } }
#n,k = map(int,input().split()) #l = list(map(int,input().split())) from collections import defaultdict for _ in range(int(input())): s = input() k = len(s) check = [0]*k ans=0 for i in range(1,k): if i==1: if s[i]==s[i-1] and check[i-1]==0: check[i]=1 ans+=1 else: if (s[i]==s[i-1] and check[i-1]==0) or (s[i]==s[i-2] and check[i-2]==0): check[i]=1 ans+=1 print(ans)
/** * A simple {@link Fragment} subclass. */ public class MoreFragment extends Fragment { ListView lv_more; public MoreFragment() { // Required empty public constructor } @Override public void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setHasOptionsMenu(true); SessionManager.getInstance(getContext()).setScreen("MoreFragment"); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment View view = inflater.inflate(R.layout.fragment_more, container, false); getActivity().setTitle("More"); lv_more = view.findViewById(R.id.lv_more); MoreAdapter moreAdapter = new MoreAdapter(getContext(), FrontEngine.getInstance().getMoreList()); lv_more.setAdapter(moreAdapter); moreAdapter.notifyDataSetChanged(); lv_more.setOnItemClickListener(new AdapterView.OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent, View view, int position, long id) { String name = FrontEngine.getInstance().getMoreList().get(position); if(name.equals("Über uns")){ Intent aboutUs = new Intent(getActivity(), AboutActivity.class); startActivity(aboutUs); } String[] link = name.split(":"); Intent browserIntent = new Intent(Intent.ACTION_VIEW, Uri.parse(link[1]+":"+link[2])); startActivity(browserIntent); } }); return view; } // the create options menu with a MenuInflater to have the menu from your fragment @Override public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) { menu.findItem(R.id.action_favorite).setVisible(false); super.onCreateOptionsMenu(menu, inflater); } }
import dataclasses import datetime import functools import typing from typing import Type, TypeVar import elasticsearch7 import pydantic from utils.exceptions import IndexWasNotInitializedError from utils.index import IndexMeta, set_index_meta from utils.typings import CreateAction DocType = TypeVar("DocType", bound="StrechyDocument") @dataclasses.dataclass(init=True) class BulkInsertResults: errors: list[tuple[Type["StrechyDocument"], dict[str, typing.Any]]] success: list[Type["StrechyDocument"]] class StrechyDocument( pydantic.BaseModel, ): id: typing.Optional[str] = None created: typing.Optional[datetime.datetime] = None def __init__( self, **kwargs, ): super(StrechyDocument, self).__init__(**kwargs) @functools.cached_property def _index_meta( self, ) -> IndexMeta: return StrechyDocument._get_index_meta() @classmethod async def get( cls: Type[DocType], document_id: str, ) -> DocType: _index_meta: IndexMeta = cls._get_index_meta() _db_return_value: dict[str, typing.Any] = await _index_meta.client.get( index=_index_meta.index_name, id=document_id, ) _document = _db_return_value["_source"] _document["id"] = _db_return_value.pop("_id") return cls( **_document, ) @classmethod async def insert_document( cls, document: DocType, document_id: typing.Optional[str] = None, ) -> DocType: if type(document) is not cls: raise TypeError(f"document must be of type: {cls.__name__}") _index_meta = cls._get_index_meta() _date_created = datetime.datetime.utcnow() document.created = _date_created _db_return = await _index_meta.client.index( index=_index_meta.index_name, body=document.json( exclude={"id"} if document.id is None else None ), id=document_id, ) document.id = _db_return["_id"] return document @staticmethod def _bulk_create_actions( documents: list[DocType], index_name: str ) -> list[typing.Union[CreateAction, dict[str, typing.Any]]]: """ Create Elasticsearch bulk create actions. Important Actions must be [action, document, action, document] :param documents: StrechyDocument :param index_name: Elasticsearch index name from meta :return: Elasticsearch create actions for bulk api """ _create_actions: list[ typing.Union[CreateAction, dict[str, typing.Any]] ] = [] for document in documents: _create_action: CreateAction = { "create": { "_index": index_name, } } if document.id is not None: _create_action["create"].update({"_id": document.id}) _create_actions.append(_create_action) _create_actions.append(document.dict(exclude={"id"})) return _create_actions @classmethod async def bulk_insert_document( cls, documents: list[DocType], ) -> BulkInsertResults: """ Bulk insert document if documents already have an ID it will be used if not it will be created by Elasticsearch. :param documents: StrechyDocument with IDS or without :return: BulkInsertResults: one list of success of documents with IDs one list of error of tuple of documents and error """ _index_meta = cls._get_index_meta() for _document in documents: _document.created = datetime.datetime.utcnow() _create_actions = cls._bulk_create_actions( documents=documents, index_name=_index_meta.index_name ) _db_return = await _index_meta.client.bulk( body=_create_actions, ) _bulk_insert_results = BulkInsertResults(success=[], errors=[]) for _document, _item in zip(documents, _db_return["items"]): if _item["create"]["status"] == 201: _document.id = _item["create"]["_id"] _bulk_insert_results.success.append(_document) else: _bulk_insert_results.errors.append( (_document, _item["create"]["error"]) ) return _bulk_insert_results @classmethod async def init_index( cls: Type[DocType], client: elasticsearch7.AsyncElasticsearch, ) -> None: """ And index meta to object must be used before class access APIs :param client: Elasticsearch client :return: None """ _index_meta = await set_index_meta( client=client, index_name=cls.__name__.lower(), model_class=cls, ) setattr(cls, "IndexMeta", _index_meta) return None @classmethod def _get_index_meta( cls: Type[DocType], ) -> IndexMeta: """ :return: """ if (collection_meta := getattr(cls, "IndexMeta", None)) is None: raise IndexWasNotInitializedError() return collection_meta
<reponame>munezbn/sample_webkit_extension<filename>include/sample_web_extension.h<gh_stars>1-10 #ifndef __SAMPLE_WEB_EXTENSION_ #define __SAMPLE_WEB_EXTENSION_ #include <gio/gio.h> #include <JavaScriptCore/JSContextRef.h> #include <jsc/jsc.h> #ifdef PLATFORM_GTK #include <webkit2/webkit-web-extension.h> #elif defined(PLATFORM_WPE) #include <wpe/webkit-web-extension.h> #endif #endif /* __SAMPLE_WEB_EXTENSION_ */
The influence of musculoskeletal forces on the growth of the prenatal cortex in the ilium: a finite element study Abstract Remodelling and adaptation of bone within the pelvis is believed to be influenced by the mechanical strains generated during locomotion. Variation in the cortical bone thickness observed in the prenatal ilium has been linked to the musculoskeletal loading associated with in utero movements; for example the development of a thicker gluteal cortex is a possible response to contractions of the gluteal muscles. This study examines if the strains generated in the prenatal iliac cortex due to musculoskeletal loading in utero are capable of initiating bone remodelling to either maintain homeostasis or form new bone. Computational modelling techniques were used firstly to predict the muscle forces and resultant joint reaction force acting on the pelvis during a range of in utero movements. Finite element analyses were subsequently performed to calculate the von Mises strains induced in the prenatal ilium. The results demonstrated that strains generated in the iliac cortex were above the thresholds suggested to regulate bone remodelling to either maintain homeostasis or form new bone. Further simulations are required to investigate the extent to which the heterogeneous cortex forms in response to these strains (i.e., remodelling) or if developmental bone modelling plays a more pivotal role.
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** package batch import ( "github.com/pulumi/pulumi/sdk/v2/go/pulumi" ) // The Batch Compute Environment data source allows access to details of a specific // compute environment within AWS Batch. // // ## Example Usage // // ```go // package main // // import ( // "github.com/pulumi/pulumi-aws/sdk/v3/go/aws/batch" // "github.com/pulumi/pulumi/sdk/v2/go/pulumi" // ) // // func main() { // pulumi.Run(func(ctx *pulumi.Context) error { // _, err := batch.LookupComputeEnvironment(ctx, &batch.LookupComputeEnvironmentArgs{ // ComputeEnvironmentName: "batch-mongo-production", // }, nil) // if err != nil { // return err // } // return nil // }) // } // ``` func LookupComputeEnvironment(ctx *pulumi.Context, args *LookupComputeEnvironmentArgs, opts ...pulumi.InvokeOption) (*LookupComputeEnvironmentResult, error) { var rv LookupComputeEnvironmentResult err := ctx.Invoke("aws:batch/getComputeEnvironment:getComputeEnvironment", args, &rv, opts...) if err != nil { return nil, err } return &rv, nil } // A collection of arguments for invoking getComputeEnvironment. type LookupComputeEnvironmentArgs struct { // The name of the Batch Compute Environment ComputeEnvironmentName string `pulumi:"computeEnvironmentName"` // Key-value map of resource tags Tags map[string]string `pulumi:"tags"` } // A collection of values returned by getComputeEnvironment. type LookupComputeEnvironmentResult struct { // The ARN of the compute environment. Arn string `pulumi:"arn"` ComputeEnvironmentName string `pulumi:"computeEnvironmentName"` // The ARN of the underlying Amazon ECS cluster used by the compute environment. EcsClusterArn string `pulumi:"ecsClusterArn"` // The provider-assigned unique ID for this managed resource. Id string `pulumi:"id"` // The ARN of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. ServiceRole string `pulumi:"serviceRole"` // The state of the compute environment (for example, `ENABLED` or `DISABLED`). If the state is `ENABLED`, then the compute environment accepts jobs from a queue and can scale out automatically based on queues. State string `pulumi:"state"` // The current status of the compute environment (for example, `CREATING` or `VALID`). Status string `pulumi:"status"` // A short, human-readable string to provide additional details about the current status of the compute environment. StatusReason string `pulumi:"statusReason"` // Key-value map of resource tags Tags map[string]string `pulumi:"tags"` // The type of the compute environment (for example, `MANAGED` or `UNMANAGED`). Type string `pulumi:"type"` }
/** * * @author Marcel Jakob */ @Entity @Table(name = "UPP_Zahlungsmethode") public class Zahlungsmethode implements Serializable{ private static final long serialVersionUID = 1L; @Id @GeneratedValue(strategy = GenerationType.TABLE, generator = "zahlungsmethode_ids") @TableGenerator(name = "zahlungsmethode_ids", initialValue = 0, allocationSize = 50) private long id; // @OneToOne // @NotNull(message = "Zu einer Zahlungsmethode wird ein Kunde benötigt.") // private Kunde kunde; @Lob private String kundeId; private String kreditkartenNummer; private String kreditkartenDatum; private String paypalEmail; private String iban; private String bic; public Zahlungsmethode(){ } public Zahlungsmethode(String kundeId, String kreditkartenNummer, String kreditkartenDatum, String paypalEmail, String iban, String bic) { this.kundeId = kundeId; this.kreditkartenNummer = kreditkartenNummer; this.kreditkartenDatum = kreditkartenDatum; this.paypalEmail = paypalEmail; this.iban = iban; this.bic = bic; } public long getId() { return id; } public String getKundeId() { return kundeId; } public void setKundeId(String kundeId) { this.kundeId = kundeId; } public String getKreditkartenNummer() { return kreditkartenNummer; } public void setKreditkartenNummer(String kreditkartenNummer) { this.kreditkartenNummer = kreditkartenNummer; } public String getKreditkartenDatum() { return kreditkartenDatum; } public void setKreditkartenDatum(String kreditkartenDatum) { this.kreditkartenDatum = kreditkartenDatum; } public String getPaypalEmail() { return paypalEmail; } public void setPaypalEmail(String paypalEmail) { this.paypalEmail = paypalEmail; } public String getIban() { return iban; } public void setIban(String iban) { this.iban = iban; } public String getBic() { return bic; } public void setBic(String bic) { this.bic = bic; } }
<reponame>zhigui-projects/z-ledger /* Copyright Zhigui.com. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package transport import ( "context" "crypto/tls" "crypto/x509" "time" "github.com/pkg/errors" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" ) var ( // Max send and receive bytes for grpc clients and servers MaxRecvMsgSize = 100 * 1024 * 1024 // 100 MiB MaxSendMsgSize = 100 * 1024 * 1024 DefaultConnectionTimeout = time.Second * 3 ServerMinInterval = time.Duration(1) * time.Minute ) // SecureOptions defines the security parameters (e.g. TLS) for a // GRPCServer or GRPCClient instance type TLSOptions struct { // PEM-encoded X509 public key to be used for TLS communication Certificate []byte // PEM-encoded private key to be used for TLS communication Key []byte // Set of PEM-encoded X509 certificate authorities used by clients to // verify server certificates ServerRootCAs [][]byte // Set of PEM-encoded X509 certificate authorities used by servers to // verify client certificates ClientRootCAs [][]byte // Whether or not to use TLS for communication UseTLS bool // Whether or not TLS client must present certificates for authentication RequireClientCert bool } type GrpcClient struct { // TLS configuration used by the grpc.ClientConn tlsConfig *tls.Config // Options for setting up new connections dialOpts []grpc.DialOption // Duration for which to block while established a new connection timeout time.Duration } // NewGrpcClient creates a new implementation of GrpcClient given an address // and client configuration func NewGrpcClient(opts *TLSOptions) (*GrpcClient, error) { client := &GrpcClient{} // parse secure options var err error client.tlsConfig, err = parseTLSOptionsForClient(opts) if err != nil { return nil, err } // keepalive options kaOpts := keepalive.ClientParameters{ Time: 1 * time.Minute, Timeout: 20 * time.Second, PermitWithoutStream: true, } client.dialOpts = append(client.dialOpts, grpc.WithKeepaliveParams(kaOpts)) // Unless asynchronous connect is set, make connection establishment blocking. client.dialOpts = append(client.dialOpts, grpc.WithBlock()) client.dialOpts = append(client.dialOpts, grpc.FailOnNonTempDialError(true)) // set send/recv message size to package defaults client.dialOpts = append(client.dialOpts, grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(MaxRecvMsgSize), grpc.MaxCallSendMsgSize(MaxSendMsgSize), )) client.timeout = DefaultConnectionTimeout return client, nil } // NewConnection returns a grpc.ClientConn for the target address and // overrides the server name used to verify the hostname on the // certificate returned by a server when using TLS func (client *GrpcClient) NewConnection(address string) (*grpc.ClientConn, error) { var dialOpts []grpc.DialOption dialOpts = append(dialOpts, client.dialOpts...) if client.tlsConfig != nil { dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(client.tlsConfig))) } else { dialOpts = append(dialOpts, grpc.WithInsecure()) } ctx, cancel := context.WithTimeout(context.Background(), client.timeout) defer cancel() conn, err := grpc.DialContext(ctx, address, dialOpts...) if err != nil { return nil, errors.Errorf("failed to create new connection, %v", errors.WithStack(err)) } return conn, nil } func parseTLSOptionsForClient(opts *TLSOptions) (*tls.Config, error) { if opts == nil || !opts.UseTLS { return nil, nil } tlsConfig := &tls.Config{ MinVersion: tls.VersionTLS12, } if len(opts.ServerRootCAs) > 0 { certPool := x509.NewCertPool() for _, certBytes := range opts.ServerRootCAs { if ok := certPool.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("error adding server root certificate") } } tlsConfig.RootCAs = certPool } if opts.RequireClientCert { // make sure we have both Key and Certificate if opts.Key != nil && opts.Certificate != nil { cert, err := tls.X509KeyPair(opts.Certificate, opts.Key) if err != nil { return nil, errors.Errorf("failed to load client certificate, err: %v", err) } tlsConfig.Certificates = []tls.Certificate{cert} } else { return nil, errors.New("both Key and Certificate are required when using mutual TLS") } } return tlsConfig, nil }
<filename>multiapps-controller-core/src/test/java/org/cloudfoundry/multiapps/controller/core/validators/parameters/HostValidatorTest.java<gh_stars>10-100 package org.cloudfoundry.multiapps.controller.core.validators.parameters; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.Collections; import java.util.Map; import java.util.stream.Stream; import org.cloudfoundry.multiapps.common.test.Tester; import org.cloudfoundry.multiapps.common.test.Tester.Expectation; import org.cloudfoundry.multiapps.controller.core.model.SupportedParameters; import org.cloudfoundry.multiapps.mta.model.Module; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; class HostValidatorTest { private final Tester tester = Tester.forClass(getClass()); private final HostValidator validator = new HostValidator(); public static Stream<Arguments> getParameters() { return Stream.of( // @formatter:off // (0) Arguments.of("TEST_TEST_TEST", false, new Expectation("test-test-test")), // (1) Arguments.of("test-test-test", true , new Expectation("test-test-test")), // (2) Arguments.of("---", false, new Expectation(Expectation.Type.EXCEPTION, "Could not create a valid host from \"---\"")), // (3) Arguments.of("@12", false, new Expectation("12")), // (4) Arguments.of("@@@", false, new Expectation(Expectation.Type.EXCEPTION, "Could not create a valid host from \"@@@\"")), // (5) Arguments.of(false, false, new Expectation(Expectation.Type.EXCEPTION, "Could not create a valid host from \"false\"")) // @formatter:on ); } @ParameterizedTest @MethodSource("getParameters") void testValidate(Object host, boolean isValid, Expectation expectation) { assertEquals(isValid, validator.isValid(host, Collections.emptyMap())); } @Test void testCanCorrect() { assertTrue(validator.canCorrect()); } @ParameterizedTest @MethodSource("getParameters") void testAttemptToCorrect(Object host, boolean isValid, Expectation expectation) { tester.test(() -> validator.attemptToCorrect(host, Collections.emptyMap()), expectation); } @Test void testGetParameterName() { assertEquals("host", validator.getParameterName()); } @Test void testGetContainerType() { assertTrue(validator.getContainerType() .isAssignableFrom(Module.class)); } static Stream<Arguments> testValidateHostWithNamespace() { return Stream.of(Arguments.of("test", Collections.emptyMap(), true, "dev", false), Arguments.of("dev-test", Collections.emptyMap(), true, "dev", true), Arguments.of("prod-test-application", Map.of(SupportedParameters.APPLY_NAMESPACE, true), true, "prod", true), Arguments.of("test-application", Map.of(SupportedParameters.APPLY_NAMESPACE, false), true, "prod", true), Arguments.of(false, Collections.emptyMap(), true, "dev", false), Arguments.of("test", Collections.emptyMap(), false, "dev", true)); } @ParameterizedTest @MethodSource void testValidateHostWithNamespace(Object host, Map<String, Object> context, boolean applyNamespaceGlobal, String namespace, boolean expectedValidHost) { HostValidator hostValidator = new HostValidator(namespace, applyNamespaceGlobal); boolean result = hostValidator.isValid(host, context); assertEquals(expectedValidHost, result); } static Stream<Arguments> testAttemptToCorrectHostWithNamespace() { return Stream.of(Arguments.of("test", Collections.emptyMap(), true, "dev", new Expectation("dev-test")), Arguments.of("test_application", Map.of(SupportedParameters.APPLY_NAMESPACE, false), true, "prod", new Expectation("test-application")), Arguments.of("test-application", Map.of(SupportedParameters.APPLY_NAMESPACE, true), true, "prod", new Expectation("prod-test-application")), Arguments.of(false, Collections.emptyMap(), true, "dev", new Expectation(Expectation.Type.EXCEPTION, "Could not create a valid host from \"false\"")), Arguments.of("test", Collections.emptyMap(), false, "dev", new Expectation("test"))); } @ParameterizedTest @MethodSource void testAttemptToCorrectHostWithNamespace(Object host, Map<String, Object> context, boolean applyNamespaceGlobal, String namespace, Expectation expectation) { HostValidator hostValidator = new HostValidator(namespace, applyNamespaceGlobal); tester.test(() -> hostValidator.attemptToCorrect(host, context), expectation); } }
<filename>lib/z_vendor/getto-application/board/validate_board/method.ts import { ValidateBoardState } from "./data" export interface UpdateBoardValidateStateMethod<N extends string> { (name: N, valid: boolean, post: Post<ValidateBoardState>): void } interface Post<E> { (event: E): void }
Major ISPs such as Telstra, Optus and Primus will voluntarily block (at the ISP level) a list of sites which specifically serve child abuse and pornography content Communications Minister Stephen Conroy this morning announced a number of wide-ranging modifications to the Government's controversial mandatory internet filtering policy, including a delay of at least a year to the project while the state and Federal governments review the Refused Classification category of content which the filter would block. In addition, major ISPs such as Telstra, Optus and Primus will voluntarily block (at the ISP level) a list of sites which specifically serve child abuse and pornography content, until the mandatory filter is implemented. The list will be compiled and maintained by the Australian Communications and Media Authority. Conroy's other additions to the policy this morning include: An annual review of content on the 'blacklist' of Refused Classification content by an independent expert, appointed in consultation with industry "Clear" avenues for appeal of classification decisions A policy that all content which is being considered for inclusion on the blacklist on the basis of a public complaint be classified by the existing Classification Board A policy that all parties affected by a content block have the ability to have decisions reviewed by the Classification Review Board The use of a standardised block page notification, which will allow ISPs to notify users that the content that have requested has been blocked, and how to see a review of the block "The public needs to have confidence that the URLs on the list, and the process by which they get there, is independent, rigorous, free from interference or influence and enables content and site owners access to appropriate review mechanisms," said Conroy in a statement." "This suite of measures will help the public have confidence that only the content specified by the legislation is being blocked." The additions to the policy will be incorporated into the filter legislation, which is currently being developed. Conroy acknowledged that "some sections of the community" had expressed concern about whether the range of material currently included in the RC category correctly reflected current community standards. "In order to address these concerns, the Government will recommend a review of the RC classification to State and Territory Ministers, be conducted at the earliest opportunity. The review would examine the current scope of the existing RC classification, and whether it adequately reflects community standards," he added. Crikey correspondent Bernard Keane first revealed the news on Twitter, appearing to be tweeting from Conroy's press conference in Melbourne this morning on the matter. He noted that the review of RC content was expected to take a year. The timing of the introduction of the legislation to support the filter, however, may still be later this year. Conroy said this week that he expected the legislation to be out this year -- and likely before December. Earlier this year Greens Communications spokesperson Scott Ludlam predicted the filter legislation was unlikely to be introduced until after the Federal election, when the balance of power in the Senate could change. But it remains unclear when that election will be.
package core import ( "bytes" "fmt" "go.uber.org/zap/buffer" "gopkg.in/yaml.v3" "os" "strings" ) type Variables []VariableStage func (v *Variables) Inspect() error { for _, vs := range *v { if err := vs.Inspect(); err != nil { return err } } return nil } func (v *Variables) GenVariables() ([]*Variable, error) { var vars []*Variable for _, vs := range *v { vsVars, err := vs.GenVariables() if err != nil { return nil, err } vars = append(vars, vsVars...) } return vars, nil } func (v *Variables) Set() error { for _, vs := range *v { if err := vs.Set(); err != nil { return err } } return nil } type VariableStage map[string]*Variable type yamlVariableStage map[string]*Variable func (vs *VariableStage) UnmarshalYAML(value *yaml.Node) error { var aux yamlVariableStage if err := value.Decode(&aux); err != nil { return err } *vs = make(VariableStage, len(aux)) for varName, variable := range aux { variable.Name = varName (*vs)[varName] = variable } return nil } func (vs *VariableStage) Inspect() error { for _, v := range *vs { if err := v.Inspect(); err != nil { return err } } return nil } func (vs *VariableStage) GenVariables() ([]*Variable, error) { vars := make([]*Variable, 0, len(*vs)) for _, v := range *vs { vars = append(vars, v) } return vars, nil } func (vs *VariableStage) Set() error { for _, v := range *vs { if err := v.Set(); err != nil { return err } } return nil } type Variable struct { Name string Value *string Command *Command } type yamlVariableInline *string type yamlVariableExtended struct { Value *string Command *Command } func (v *Variable) UnmarshalYAML(value *yaml.Node) error { var auxInline yamlVariableInline if err := value.Decode(&auxInline); err == nil { v.Value = auxInline return nil } var auxExtended yamlVariableExtended if err := value.Decode(&auxExtended); err == nil { if auxExtended.Value != nil && auxExtended.Command != nil { return fmt.Errorf("variable should be <string>, or { value: <string> }, or { command: <command> }") } v.Value = auxExtended.Value v.Command = auxExtended.Command return nil } return fmt.Errorf("variable should be <string>, or { value: <string> }, or { command: <command> }") } func (v *Variable) Inspect() error { return v.Command.Inspect() } func (v *Variable) Set() error { var buff bytes.Buffer defer func() { logger.Println(buff.String()) }() buff.WriteString(fmt.Sprintf("%s=", v.Name)) if v.Command != nil { if v.Command.String != nil { buff.WriteString(fmt.Sprintf("$(%s) -> ", *v.Command.String)) } var out buffer.Buffer v.Command.Stdout = &out if err := v.Command.Run(); err != nil { return err } varVal := strings.TrimSpace(out.String()) v.Value = &varVal } buff.WriteString(fmt.Sprintf("%q", *v.Value)) if err := os.Setenv(v.Name, *v.Value); err != nil { return err } return nil } func SetVariables(vars ...*Variable) error { for _, v := range vars { if err := v.Set(); err != nil { return err } } return nil }
Employees at four of the nation’s largest technology companies have given the majority of their political dollars to Democratic candidates and causes, federal records show. Democrats have received more than 80 percent of the $2.4 million in donations that workers at Amazon, Google, Facebook and Apple gave directly to candidates and causes affiliated with a party during the 2016 campaign cycle, with the presidential election driving much of the contributions. The proportion that went to Democrats falls to 60 percent when corporate PAC giving is included in the total. ADVERTISEMENT Beyond giving to candidates, Silicon Valley employees have favored left-leaning causes, including the pro-gun-control Americans for Responsible Solutions; Mayday PAC and Wolf PAC, which encourage campaign finance reform; and EMILY’s List, an organization that works to elect women who support abortion rights. The workers who donated range from top executives to lower-level software engineers. “I think any candidate, Republican or Democrat, wants to be associated with the tech community and Silicon Valley. It gives you a street cred in that space to say that you’re being supported by that community,” said David Thomas, a partner at Mehlman Castagnetti Rosen & Thomas and former chief of staff to Rep. Zoe Lofgren (D-Calif.), who represents Silicon Valley. The liberal bent of the donations is likely to fuel suspicions on the right about Silicon Valley, which has long faced charges of showing favoritism to Democratic officials and their causes. But some say the trend in giving is more reflective of the political climate in California. “Not to minimize the role of campaign contributions, but in fundraising in Silicon Valley and Seattle, politicians are listening to what the people in the Bay Area are thinking about,” said Lee Drutman, senior fellow in the program on political reform at New America. “They’re not buying policy outcomes directly, but they’re buying an audience.” While some employees at the four tech companies have given money to political action committees or to tech industry groups — which also dole out cash to candidates — they have mostly made the donations themselves, directly to candidates. Only 19 percent of the more than $1.3 million donated by Google employees in the 2016 cycle, for example, has gone to industry groups and the Google NetPAC, the company’s political action committee. Sixty-eight percent of the remaining political cash from Google employees went directly to Democratic candidates and committees, with Republicans receiving 12 percent, according to an analysis by The Hill. The Libertarian National Committee received 2 percent. Google has been accused of having a cozy relationship with the Obama administration, with company officials averaging about one visit per week to the White House since Obama took office. Google rejects the charges of favoritism and says many of the visits either took place with other tech industry officials or were for innocuous projects like photographing the White House art collection for Google’s Art Project. While Google employees are donating more money to Democrats, the company’s official corporate PAC has played it down the middle. Google NetPAC has distributed $1.4 million during the 2016 cycle, with 49 percent going to Democratic candidates and lawmakers and 51 percent going to Republicans. It had more than $1.6 million in the bank at the end of April, the most recent reporting period. The PACs for Amazon and Facebook also show a near 50-50 split among donations to Republicans and Democrats. There is a slight edge to the GOP, likely because Republicans have majorities in the House and Senate. Bruce Cain, a political science professor at Stanford University, said big companies typically avoid playing favorites even if their employees skew toward one side of the aisle. “You can distinguish between the more cautious corporate strategy of firms versus the unadulterated view of people who work in those firms,” Cain said. Alone among Silicon Valley giants, Apple remains without a PAC for campaign giving, which reflects how the company’s late CEO Steve Jobs was famously contemptuous of Washington. Employees at Apple have donated more than $575,000 in the 2016 cycle, with 78 percent of it going to Democrats. Roughly half of that money went to fund the presidential campaigns of Sen. Bernie Sanders Bernard (Bernie) SandersPush to end U.S. support for Saudi war hits Senate setback Sanders: 'I fully expect' fair treatment by DNC in 2020 after 'not quite even handed' 2016 primary Sanders: 'Damn right' I'll make the large corporations pay 'fair share of taxes' MORE (I-Vt.) and former Secretary of State Hillary Clinton Hillary Diane Rodham ClintonSanders: 'I fully expect' fair treatment by DNC in 2020 after 'not quite even handed' 2016 primary Sanders: 'Damn right' I'll make the large corporations pay 'fair share of taxes' Former Sanders campaign spokesman: Clinton staff are 'biggest a--holes in American politics' MORE. More than half of Apple’s $104,138 in Republican donations went to vulnerable Sen. Rob Portman Robert (Rob) Jones PortmanAddressing repair backlog at national parks can give Congress a big win Texas senator introduces bill to produce coin honoring Bushes GOP Green New Deal stunt is a great deal for Democrats MORE (R-Ohio). That money came from some of the company’s top executives, including CEO Tim Cook. Workers at Amazon contributed $452,691 to politics this cycle overall and $239,220 to Republicans and Democrats directly, with 38 percent of that cash given to Democratic candidates and causes. Amazon is owned by Jeff Bezos, who is often described as having libertarian political beliefs. Between its employees and its political action committee, Amazon is by far the top contributor to Rep. Jason Chaffetz Jason ChaffetzTop Utah paper knocks Chaffetz as he mulls run for governor: ‘His political career should be over’ Boehner working on memoir: report Former GOP lawmaker on death of 7-year-old migrant girl: Message should be ‘don't make this journey, it will kill you' MORE (R-Utah) this election cycle, according to the Center for Responsive Politics, having given roughly $50,000 so far. Facebook employees have given more than $382,000 directly to political candidates and causes, with 78 percent of that going Democrats. Workers spent an additional $193,323 on contributions to the Facebook PAC. The social networking company was recently caught in a political firestorm after it was accused of suppressing conservative stories in its trending topics feature. The company said it found no deliberate bias on the trending feature and has tried to mend fences with the GOP by flying out prominent conservatives to meet with CEO Mark Zuckerberg. Representatives for Google, Facebook, Apple and Amazon either did not respond or declined to comment for this article. Silicon Valley is an increasingly critical fundraising stop for both parties. Democrats and Republicans drop in to fundraise from tech companies and are sometimes surprised by the array of issues that come up, said Thomas, the lobbyist and former aide. “They expect to only get questions about patent reform or high-skilled visas — your standard tech issues — but more often than not, they get questions about issues that employees are more passionate about. It could be LGBT issues, income equality or comprehensive immigration reform,” he said. “I think it’s because these employees are progressive about the issues they care about, and that might not be the same issues that their companies lobby on in D.C.” Read more from The Hill: Silicon Valley workers send more cash to Clinton
<reponame>pleak-tools/pleak-sql-analysis {-# LANGUAGE MultiParamTypeClasses, FunctionalDependencies, RebindableSyntax, FlexibleInstances #-} module BanachQ where import Banach (Expr(..), TableExpr(..), (!!), chooseBeta, chooseBetaCustom, gamma, dualnorm, skipith) import qualified Banach as B import ProgramOptions import CreateTablesQ import DatabaseQ import GroupQ import NormsQ hiding ((!!)) import VarstateQ (VarState(..),VState(..)) import ErrorMsg import RangeUtils import qualified Prelude as P import qualified Data.List as L import Prelude hiding (fromInteger,fromRational,(!!),(+),(-),(*),(/),(**),(==),(/=),(<=),(>=),(<),(>),exp,abs,sum,product,minimum,maximum) import Data.List hiding ((!!),sum,product,minimum,maximum) import Data.List.Split import qualified Data.Map as M import qualified Data.Set as S import Data.IORef import Text.Printf import Control.Monad import System.Process import qualified System.IO.Strict as StrictIO sqlsaExecutablePathQuiet = "./sqlsa-quiet" sqlsaExecutablePathVerbose = "./sqlsa-verbose" combinedSensitivityTmpFileName = "_combined_sensitivity.tmp" temporaryTableName = "_temp" readTableToSensitivityMap :: IO (M.Map String Double) readTableToSensitivityMap = do s <- StrictIO.readFile combinedSensitivityTmpFileName return $ M.fromList $ map ((\ [w1,w2] -> (w1, read w2 :: Double)) . words) $ lines s fromInteger :: Integer -> Double fromInteger = P.fromInteger fromRational :: Rational -> Double fromRational = P.fromRational data TaskMap = TM [(String,Bool)] deriving Show getMap :: TaskMap -> M.Map String Bool getMap (TM xs) = M.fromList xs -- the main data needed for banach analysis (without groups and task names) data AnalysisDataWrtTable = ADWT String String B.TableExpr (String,String,String) deriving Show data DataWrtTable = DWT AnalysisDataWrtTable OneGroupData [String] String (String, Norm String, ADouble) deriving Show getData :: [DataWrtTable] -> [(String, String, OneGroupData, B.TableExpr,(String,String,String), [String], String)] getData xs = map (\(DWT (ADWT x1 x2 x3 x4) x5 x6 x7 _) -> (x1, x2, x5, x3, x4, x6, x7)) xs getTableNames :: [DataWrtTable] -> [String] getTableNames xs = map (\(DWT (ADWT x1 _ _ _) _ _ _ _) -> x1) xs getExtra :: DataWrtTable -> (String, Norm String, ADouble) getExtra (DWT _ _ _ _ x8) = x8 data ExprQ = Q Double -- a constant | VarQ String -- a variable | FunQ String ExprQ -- an SQL function | OpQ String ExprQ ExprQ -- an SQL binary operator | ListFunQ String [ExprQ] -- an SQL function with argument list | IfThenElseQ BoolExprQ ExprQ ExprQ | (:+) ExprQ ExprQ | (:-) ExprQ ExprQ | (:*) ExprQ ExprQ | (:/) ExprQ ExprQ | (:**) ExprQ ExprQ -- exponentiation | Select ExprQ String String -- SELECT x FROM y WHERE z | GroupBy ExprQ String String -- x GROUP BY y HAVING z | ExprQ `As` String -- x AS y | ExprQ `Where` String -- x WHERE y | Subquery ExprQ ExprQ -- SELECT x FROM (subquery y) data BoolExprQ = CmpOpQ String ExprQ ExprQ -- an SQL comparison operator | BoolExprQ String -- an SQL expression of boolean type infix 4 == infix 4 /= infix 4 <= infix 4 >= infix 4 < infix 4 > infixl 6 + infixl 6 - infixl 7 * infixl 7 / infixr 8 ** class Arith1Q a where exp,abs :: a -> a sum,product,minimum,maximum :: [a] -> a instance Arith1Q Double where exp = P.exp abs = P.abs sum = L.sum product = L.product minimum = L.minimum maximum = L.maximum instance Arith1Q ExprQ where exp = expQ abs = absQ sum = sumQ product = productQ minimum = minimumQ maximum = maximumQ class ArithQ a b c | a b -> c where (+),(-),(*),(/),(**) :: a -> b -> c instance ArithQ Double Double Double where (+) = (P.+) (-) = (P.-) (*) = (P.*) (/) = (P./) (**) = (P.**) instance ArithQ ExprQ ExprQ ExprQ where (+) = (:+) (-) = (:-) (*) = (:*) (/) = (:/) (**) = (:**) instance ArithQ Double ExprQ ExprQ where x + y = Q x :+ y x - y = Q x :- y x * y = Q x :* y x / y = Q x :/ y x ** y = Q x :** y instance ArithQ ExprQ Double ExprQ where x + y = x :+ Q y x - y = x :- Q y x * y = x :* Q y x / y = x :/ Q y x ** y = x :** Q y class CmpQ a b c | a b -> c where (==),(/=),(<=),(>=),(<),(>) :: a -> b -> c instance CmpQ Double Double Bool where (==) = (P.==) (/=) = (P./=) (<=) = (P.<=) (>=) = (P.>=) (<) = (P.<) (>) = (P.>) instance CmpQ ExprQ ExprQ BoolExprQ where (==) = CmpOpQ "=" (/=) = CmpOpQ "!=" (<=) = CmpOpQ "<=" (>=) = CmpOpQ ">=" (<) = CmpOpQ "<" (>) = CmpOpQ ">" instance CmpQ Double ExprQ BoolExprQ where x == y = Q x == y x /= y = Q x /= y x <= y = Q x <= y x >= y = Q x >= y x < y = Q x < y x > y = Q x > y instance CmpQ ExprQ Double BoolExprQ where x == y = x == Q y x /= y = x /= Q y x <= y = x <= Q y x >= y = x >= Q y x < y = x < Q y x > y = x > Q y class IfThenElseQ a b where ifThenElse :: a -> b -> b -> b instance IfThenElseQ Bool a where ifThenElse True x y = x ifThenElse False x y = y instance IfThenElseQ BoolExprQ ExprQ where ifThenElse = IfThenElseQ instance IfThenElseQ BoolExprQ AnalysisResult where ifThenElse b x err = x -- TODO: handle the error message correctly expQ = FunQ "exp" logQ = FunQ "log" absQ = FunQ "abs" minimumT = FunQ "min" maximumT = FunQ "max" sumT = FunQ "sum" productT = expQ . sumT . logQ minimumQ = ListFunQ "least" maximumQ = ListFunQ "greatest" sumQ = foldl1 (:+) productQ = foldl1 (:*) -- forces the type of a number literal to be Double --dbl :: Double -> Double --dbl = id ---------------- a simple constant propagation block applyListFunQ :: String -> [ExprQ] -> ExprQ applyListFunQ "greatest" [x] = x applyListFunQ "greatest" xs = let ys = filter (\x -> case x of {Q _ -> True; _ -> False}) xs in case ys of [] -> ListFunQ "greatest" xs _ -> let zs = filter (\x -> case x of {Q _ -> False; _ -> True}) xs in let z = Q $ maximum (map (\(Q c) -> c) ys) in case zs of [] -> z -- if z = 0 and all others are 'abs', then we may discard z _ -> case z of Q 0 -> let ws = filter (\x -> case x of {FunQ "abs" _ -> True; _ -> False}) zs in case compare (length zs) (length ws) of EQ -> case zs of [w] -> w _ -> ListFunQ "greatest" zs _ -> ListFunQ "greatest" (z:zs) _ -> ListFunQ "greatest" (z:zs) applyListFunQ "least" [x] = x applyListFunQ "least" xs = let ys = filter (\x -> case x of {Q _ -> True; _ -> False}) xs in case ys of [] -> ListFunQ "least" xs _ -> let zs = filter (\x -> case x of {Q _ -> False; _ -> True}) xs in let z = Q $ maximum (map (\(Q c) -> c) ys) in case zs of [] -> z -- if z = 0 and all others are 'abs', then we may discard zs _ -> case z of Q 0 -> let ws = filter (\x -> case x of {FunQ "abs" _ -> True; _ -> False}) zs in case compare (length zs) (length ws) of EQ -> z _ -> ListFunQ "least" (z:zs) _ -> ListFunQ "least" (z:zs) applyFunQ :: String -> ExprQ -> ExprQ applyFunQ f x = case x of Q c -> case f of "exp" -> Q $ exp c "log" -> Q $ log c "abs" -> Q $ abs c _ -> FunQ f x _ -> FunQ f x applyOpQ :: String -> ExprQ -> ExprQ -> ExprQ applyOpQ op (Q cx) (Q cy) = case op of "+" -> Q (cx + cy) "-" -> Q (cx - cy) "*" -> Q (cx * cy) "/" -> Q (cx / cy) "^" -> Q (cx ** cy) applyOpQ op x@(Q cx) y = case op of "+" -> if cx == 0 then y else (:+) x y "-" -> (:-) x y "*" -> if cx == 0 then Q 0 else (if cx == 1 then y else (:*) x y) "/" -> if cx == 0 then Q 0 else (:/) x y "^" -> if cx == 0 then Q 0 else (if cx == 1 then Q 1 else (:**) x y) applyOpQ op x y@(Q cy) = case op of "+" -> if cy == 0 then x else (:+) x y "-" -> if cy == 0 then x else (:-) x y "*" -> if cy == 0 then Q 0 else (if cy == 1 then x else (:*) x y) "/" -> if cy == 0 then Q (1/0) else (if cy == 1 then x else (:/) x y) "^" -> if cy == 0 then Q 1 else (if cy == 1 then x else (:**) x y) applyOpQ op x y = case op of "+" -> (:+) x y "-" -> (:-) x y "*" -> (:*) x y "/" -> (:/) x y "^" -> (:**) x y constProp :: ExprQ -> ExprQ constProp expr = case expr of Q c -> Q c VarQ x -> VarQ x FunQ f x -> applyFunQ f $ constProp x OpQ op x y -> OpQ op (constProp x) (constProp y) ListFunQ f xs -> applyListFunQ f $ map constProp xs IfThenElseQ (CmpOpQ op z1 z2) x y -> case z of Just True -> (constProp x) Just False -> (constProp y) Nothing -> IfThenElseQ (CmpOpQ op nz1 nz2) (constProp x) (constProp y) where nz1 = constProp z1 nz2 = constProp z2 z = constPropBool op nz1 nz2 IfThenElseQ b x y -> IfThenElseQ b (constProp x) (constProp y) (x :+ y) -> applyOpQ "+" (constProp x) (constProp y) (x :- y) -> applyOpQ "-" (constProp x) (constProp y) (x :* y) -> applyOpQ "*" (constProp x) (constProp y) (x :/ y) -> applyOpQ "/" (constProp x) (constProp y) (x :** y) -> applyOpQ "^" (constProp x) (constProp y) Select x fr wh -> Select (constProp x) fr wh GroupBy x g h -> GroupBy (constProp x) g h x `Where` y -> (constProp x) `Where` y x `As` a -> (constProp x) `As` a Subquery x y -> Subquery (constProp x) (constProp y) constPropBool :: String -> ExprQ -> ExprQ -> Maybe Bool constPropBool op (Q cx) (Q cy) = case op of "=" -> if cx == cy then Just True else Just False "!=" -> if cx == cy then Just False else Just True "<=" -> if cx <= cy then Just True else Just False ">=" -> if cx >= cy then Just True else Just False "<" -> if cx < cy then Just True else Just False ">" -> if cx > cy then Just True else Just False constPropBool op (Q cx) y = case op of ">=" -> if cx == infinity then Just True else Nothing _ -> Nothing constPropBool op x (Q cy) = case op of "<=" -> if cy == infinity then Just True else Nothing _ -> Nothing constPropBool _ _ _ = Nothing ---------------- instance Show ExprQ where show (Q x) | x >= 0 = if x == infinity then "99999.99" else show x | otherwise = '(' : show x ++ ")" show (VarQ x) = x -- due to overflow errors, added temporarily rounding for exp here -- show (FunQ f x) = case f of -- "exp" -> "round(" ++ f ++ '(' : show x ++ ") :: numeric, 64)" -- _ -> f ++ '(' : show x ++ ")" show (FunQ f x) = f ++ '(' : show x ++ ")" show (OpQ op x y) = '(' : show x ++ ' ' : op ++ ' ' : show y ++ ")" show (ListFunQ f xs) = f ++ '(' : intercalate ", " (map show xs) ++ ")" show (IfThenElseQ b x y) = "case when " ++ show b ++ " then " ++ show x ++ " else " ++ show y ++ " end" show (x :+ y) = '(' : show x ++ " + " ++ show y ++ ")" show (x :- y) = '(' : show x ++ " - " ++ show y ++ ")" show (x :* y) = '(' : show x ++ " * " ++ show y ++ ")" -- due to overflow errors, added temporarily rounding for sigmoids here -- show (x :/ y) = "round((" ++ show x ++ " / " ++ show y ++ ") :: numeric, 64)" show (x :/ y) = '(' : show x ++ " / " ++ show y ++ ")" show (x :** y) = '(' : show x ++ " ^ " ++ show y ++ ")" show (Select (Subquery x y) fr wh) = show (Subquery x (Select y fr wh)) show (Select (GroupBy (x `Where` y) g h) fr wh) = show (Select (GroupBy x g h) fr ('(' : wh ++ ") AND " ++ y)) show (Select (GroupBy x g h) fr wh) = "SELECT " ++ show x ++ (if null fr then "" else " FROM ") ++ fr ++ (if null wh then "" else " WHERE ") ++ wh ++ " GROUP BY " ++ g ++ (if null h then "" else " HAVING ") ++ h show (Select (x `Where` y) fr wh) = show (Select x fr ('(' : wh ++ ") AND " ++ y)) show (Select x fr wh) = "SELECT " ++ show x ++ (if null fr then "" else " FROM ") ++ fr ++ (if null wh then "" else " WHERE ") ++ wh show (GroupBy x g h) = show x ++ " GROUP BY " ++ g ++ (if null h then "" else " HAVING ") ++ h show (x `As` a) = show x ++ " AS " ++ a show (Subquery x y) = "SELECT " ++ show x ++ " FROM (" ++ show y ++ ") AS sub" instance Show BoolExprQ where show (CmpOpQ op x y) = '(' : show x ++ ' ' : op ++ ' ' : show y ++ ")" show (BoolExprQ x) = '(' : x ++ ")" data SmoothUpperBound = SUB { subg :: Double -> ExprQ, subBeta :: Double} instance Show SmoothUpperBound where show (SUB g beta0) | beta0 >= 0 = let beta = chooseBeta beta0 in if beta >= beta0 then (printf "%s (beta = %0.3f)" (show (g beta)) beta :: String) else ((error $ printf "ERROR (beta = %0.3f but must be >= %0.3f)" beta beta0) :: String) | otherwise = "unknown" data AnalysisResult = AR { fx :: ExprQ, -- value of the analyzed function (f(x)) subf :: SmoothUpperBound, -- smooth upper bound of the absolute value of the analyzed function itself sdsf :: SmoothUpperBound, -- smooth upper bound of the derivative sensitivity of the analyzed function gub :: Double, -- global (constant) upper bound on the absolute value of the analyzed function itself gsens :: Double, -- (upper bound on) global sensitivity of the analyzed function, may be infinity arVarState :: VarState} -- Range lb ub, if both lower and upper bound are known, otherwise Exact deriving Show unknownSUB = SUB undefined (-1) aR :: AnalysisResult aR = AR {subf = unknownSUB, gub = infinity, gsens = infinity, arVarState = Exact} chooseSUBBeta :: Double -> Maybe Double -> SmoothUpperBound -> Double chooseSUBBeta defaultBeta fixedBeta (SUB g beta0) = let beta = chooseBetaCustom defaultBeta fixedBeta beta0 in if beta >= beta0 then beta else error $ printf "ERROR (beta = %0.3f but must be >= %0.3f)" beta beta0 -- compute ||(x_1,...,x_n)||_p lpnorm :: Double -> [ExprQ] -> ExprQ lpnorm p xs = (sum $ map (** p) $ map abs xs) ** (1 / p) lpnormT :: Double -> ExprQ -> ExprQ lpnormT p xs = (sumT $ (abs xs) ** p) ** (1 / p) -- compute ||(x_1,...,x_n)||_q where || ||_q is the dual norm of || ||_p lqnorm :: Double -> [ExprQ] -> ExprQ lqnorm 1 xs = linfnorm xs lqnorm p xs = lpnorm (dualnorm p) xs lqnormT :: Double -> ExprQ -> ExprQ lqnormT 1 = linfnormT lqnormT p = lpnormT (dualnorm p) -- compute ||(x_1,...,x_n)||_infinity linfnorm :: [ExprQ] -> ExprQ linfnorm = maximumQ . map absQ linfnormT :: ExprQ -> ExprQ linfnormT = maximumT . absQ getUbFromAr :: AnalysisResult -> Double getUbFromAr ar = case arVarState ar of Range lb ub -> ub _ -> gub ar getLbFromAr :: AnalysisResult -> Double getLbFromAr ar = case arVarState ar of Range lb ub -> lb _ -> - gub ar getRangeFromAr :: AnalysisResult -> VarState getRangeFromAr ar = Range (getLbFromAr ar) (getUbFromAr ar) findUsedVars :: Expr -> [Int] findUsedVars = fuv where fuv expr = case expr of Prec _ -> [] StringCond _ -> [] Power i _ -> [i] ComposePower e1 _ -> fuv e1 Exp r i -> [i] ComposeExp r e1 -> fuv e1 Sigmoid a c i -> [i] ComposeSigmoid a c e1 -> fuv e1 SigmoidPrecise aa ab c i -> [i] ComposeSigmoidPrecise aa ab c e1 -> fuv e1 Tauoid a c i -> [i] ComposeTauoid a c e1 -> fuv e1 TauoidPrecise aa ab c i -> [i] ComposeTauoidPrecise aa ab c e1 -> fuv e1 L0Predicate i p -> [i] PowerLN i r -> [i] Const c -> [] ScaleNorm a e1 -> fuv e1 ZeroSens e1 -> fuv e1 L p is -> is LInf is -> is Prod es -> concatMap fuv es Prod2 es -> concatMap fuv es Min es -> concatMap fuv es Max es -> concatMap fuv es ComposeL p es -> concatMap fuv es Sump p es -> concatMap fuv es SumInf es -> concatMap fuv es Sum2 es -> concatMap fuv es analyzeExprQ :: [String] -> S.Set Int -> [VarState] -> [Int] -> Bool -> (M.Map String AnalysisResult) -> Expr -> AnalysisResult analyzeExprQ colNames = analyzeExpr (map VarQ colNames) -- computeGsens specifies whether we want to compute gsens or the rest of the analysis results -- They must be computed separately because -- the rest of the analysis requires gsens w.r.t. a different norm -- (the one w.r.t. which we need smoothness instead of the one w.r.t. which we need the sensitivity) analyzeExpr :: [ExprQ] -> S.Set Int -> [VarState] -> [Int] -> Bool -> (M.Map String AnalysisResult) -> Expr -> AnalysisResult analyzeExpr row sensitiveVarSet varStates colTableCounts computeGsens subQueryMap = ae where -- require n times smaller beta in the subexpression scaleBeta n (SUB subg subBeta) = let c = fromIntegral n :: Double in SUB (\ beta -> subg (beta / c)) (subBeta * c) scaleGsens :: Int -> Double -> Double scaleGsens n gsens = if computeGsens then gsens else let c = fromIntegral n :: Double in c * gsens ae expr = case expr of Prec (B.AR fx (B.SUB subg subBeta) (B.SUB sdsg sdsBeta)) -> AR {fx = Q fx, subf = SUB (Q . subg) subBeta, sdsf = SUB (Q . subg) subBeta, gub = if subBeta > 0 then infinity else subg 0, gsens = if sdsBeta > 0 then infinity else sdsg 0} -- we use this construction only for equality checks, so an UB on f itself is 1 StringCond s -> aR {fx = VarQ s, subf = SUB (const (VarQ s)) 0, sdsf = SUB (const (Q 0)) 0, gub = 1.0, gsens = 0} Power i r -> let x = row !! i vs = varStates !! i sf = colTableCounts !! i sb = scaleBeta sf sg = scaleGsens sf in -- ###################### -- here we assume that 'Id x' functions are converted to 'Power x 1' -- we read fx from the table to keep table cross product correct -- we assign variable placeholders for SUBs, although we compute them later when final beta will be known let xName = show x in if r == 1 && M.member xName subQueryMap then let ar = subQueryMap M.! xName in let (SUB _ beta1) = subf ar in let (SUB _ beta2) = sdsf ar in ar {fx = VarQ xName, subf = SUB (const $ VarQ ("_sens_" ++ xName ++ "_subf")) beta1, sdsf = SUB (const $ VarQ ("_sens_" ++ xName ++ "_sdsf")) beta2} -- ###################### else if r == 1 then let gub = getGubFromVs vs in aR {fx = x, subf = sb $ SUB (\ beta -> if gub < 1/beta then exp (beta * (abs x - gub)) * gub else ifThenElse (abs x >= 1 / beta) (abs x) (exp (beta * abs x - 1) / beta)) 0, sdsf = sb $ SUB (const (Q 1)) 0, gub = gub, gsens = sg 1, arVarState = vs} else if r >= 1 then let x_ub = getUbFromVs vs x_lb = max 0 (getLbFromVs vs) ub = x_ub ** r in if x > 0 then aR {fx = x ** r, subf = sb $ SUB (\ beta -> if x_ub < r/beta then exp (beta * (x - x_ub)) * ub else ifThenElse (x >= r/beta) (x ** r) (exp (beta*x - r) * (r/beta)**r)) 0, sdsf = sb $ SUB (\ beta -> if x_ub < (r-1)/beta then exp (beta * (x - x_ub)) * r * x_ub**(r-1) else ifThenElse (x >= (r-1)/beta) (r * x**(r-1)) (r * exp (beta*x - (r-1)) * ((r-1)/beta)**(r-1))) 0, gub = ub, gsens = sg $ r * x_ub**(r-1), arVarState = Range (x_lb ** r) ub} else error "analyzeExpr/Power: condition (r >= 1 && x > 0 || r == 1) not satisfied" else error "analyzeExpr/Power: condition (r >= 1 && x > 0 || r == 1) not satisfied" ComposePower e1 r -> let ar @ (AR gx (SUB subf1g beta1) (SUB sdsf1g beta2) _ gsens _) = ae e1 beta3 = (r-1)*beta1 + beta2 b1 = if beta3 > 0 then beta1 / beta3 else 1/r b2 = if beta3 > 0 then beta2 / beta3 else 1/r gx_ub = getUbFromAr ar gx_lb = max 0 (getLbFromAr ar) ub = gx_ub ** r in if r >= 1 then if gx > 0 then AR {fx = gx ** r, subf = SUB (\ beta -> subf1g (beta / r) ** r) (r*beta1), sdsf = SUB (\ beta -> r * (subf1g (b1 * beta))**(r-1) * sdsf1g (b2 * beta)) beta3, gub = ub, gsens = r * gx_ub ** (r-1) * gsens, arVarState = Range (gx_lb ** r) ub} else error "analyzeExpr/ComposePower: condition (r >= 1 && g(x) > 0) not satisfied" else error "analyzeExpr/ComposePower: condition (r >= 1 && g(x) > 0) not satisfied" Exp r i -> let x = row !! i x_ub = getUbFromVs (varStates !! i) x_lb = getLbFromVs (varStates !! i) f_x_ub = if r >= 0 then exp (r * x_ub) else exp (r * x_lb) f_x_lb = if r >= 0 then exp (r * x_lb) else exp (r * x_ub) sf = colTableCounts !! i sb = scaleBeta sf sg = scaleGsens sf in aR {fx = exp (r * x), subf = sb $ SUB (const $ exp (r * x)) (abs r), sdsf = sb $ SUB (const $ abs r * exp (r * x)) (abs r), gub = f_x_ub, gsens = sg $ abs r * f_x_ub, arVarState = Range f_x_lb f_x_ub} ComposeExp r e1 -> let ar @ (AR gx _ (SUB sdsf1g beta2) _ gsens _) = ae e1 b = gsens gx_ub = getUbFromAr ar gx_lb = getLbFromAr ar f_x = exp (r * gx) f_x_ub = if r >= 0 then exp (r * gx_ub) else exp (r * gx_lb) f_x_lb = if r >= 0 then exp (r * gx_lb) else exp (r * gx_ub) in aR {fx = f_x, subf = SUB (const f_x) (abs r * b), sdsf = SUB (\ beta -> abs r * f_x * sdsf1g (beta - abs r * b)) (abs r * b + beta2), gub = f_x_ub, gsens = abs r * f_x_ub * gsens, arVarState = Range f_x_lb f_x_ub} Sigmoid a c i -> let x = row !! i vs = varStates !! i y = exp (a * (x - c)) z = y / (y + 1) -- tested whether 1 / (1 + 1/y) preforms better than y / (y + 1) -- y' = exp (a * (c - x)) -- z' = 1 / (1 + y') a' = abs a ub = case vs of Range lb ub -> let x = ub y = exp (a * (x - c)) z = y / (y + 1) in z _ -> infinity sf = colTableCounts !! i sb = scaleBeta sf sg = scaleGsens sf in aR {fx = z, subf = sb $ SUB (const z) a', sdsf = sb $ SUB (const $ a' * y / (y+1)**2) a', gub = ub, gsens = sg $ a'/4, arVarState = Range 0 ub} ComposeSigmoid a c e1 -> let ar @ (AR gx _ (SUB sdsf1g beta2) _ gsens _) = ae e1 b = gsens y = exp (a * (gx - c)) z = y / (y + 1) -- tested whether 1 / (1 + 1/y) preforms better than y / (y + 1) --y' = exp (a * (c - gx)) --z' = 1 / (1 + y') a' = abs a gx_ub = getUbFromAr ar ub = if isInfinite gx_ub then infinity else let gx = gx_ub y = exp (a * (gx - c)) z = y / (y + 1) in z in aR {fx = z, subf = SUB (const z) (a' * b), sdsf = SUB (\ beta -> a' * y / (y+1)**2 * sdsf1g (beta - a' * b)) (a' * b + beta2), gub = ub, gsens = a'/4 * gsens, arVarState = Range 0 ub} -- 'aa' is the actual sigmoid precision, 'ab' is the smoothness that we want SigmoidPrecise aa ab c i -> let x = row !! i vs = varStates !! i y = exp (ab * (x - c)) y' = exp (aa * (x - c)) z = y' / (y'+1) -- tested whether 1 / (1 + 1/y) preforms better than y / (y + 1) --y'' = exp (aa * (c - x)) --z'' = 1 / (1+y'') a' = abs ab x_ub = getUbFromVs vs ub = case vs of Range lb ub -> let x = x_ub y' = exp (aa * (x - c)) z = y' / (y'+1) in z _ -> infinity sf = colTableCounts !! i sb = scaleBeta sf sg = scaleGsens sf in aR {fx = z, subf = sb $ SUB (const (Q 1)) 0, sdsf = sb $ SUB (const $ aa * y / (y+1)**2) a', gub = ub, gsens = sg $ abs aa / 4, arVarState = Range 0 ub} ComposeSigmoidPrecise aa ab c e1 -> let ar @ (AR gx _ (SUB sdsf1g beta2) _ gsens _) = ae e1 b = gsens y = exp (ab * (gx - c)) y' = exp (aa * (gx - c)) y'' :: Double -> ExprQ -- choose ab automatically from beta y'' beta = let ab = (beta - beta2) / b in exp (ab * (gx - c)) z = y' / (y' + 1) -- tested whether 1 / (1 + 1/y) preforms better than y / (y + 1) --y''' = exp (aa * (c - gx)) --z''' = 1 / (1+y''') a' = abs ab gx_ub = getUbFromAr ar ub = if isInfinite gx_ub then infinity else let gx = gx_ub y' = exp (aa * (gx - c)) z = y' / (y' + 1) in z in aR {fx = z, subf = SUB (const (Q 1)) 0, sdsf = SUB (if ab == 0 then \ beta -> let y = y'' beta in aa * y / (y+1)**2 * sdsf1g (beta - a' * b) else \ beta -> aa * y / (y+1)**2 * sdsf1g (beta - a' * b)) (a' * b + beta2), gub = ub, gsens = abs aa / 4 * gsens, arVarState = Range 0 ub} Tauoid a c i -> let x = row !! i y1 = exp ((-a) * (x - c)) y2 = exp (a * (x - c)) z = 2 / (y1 + y2) a' = abs a sf = colTableCounts !! i sb = scaleBeta sf sg = scaleGsens sf in aR {fx = z, subf = sb $ SUB (const z) a', sdsf = sb $ SUB (const $ a' * z) a', gub = 1, gsens = sg a', arVarState = Range 0 1} ComposeTauoid a c e1 -> let AR gx _ (SUB sdsf1g beta2) _ gsens _ = ae e1 b = gsens y1 = exp ((-a) * (gx - c)) y2 = exp (a * (gx - c)) z = 2 / (y1 + y2) a' = abs a in aR {fx = z, subf = SUB (const z) (a' * b), sdsf = SUB (\ beta -> a' * z * sdsf1g (beta - a' * b)) (a' * b + beta2), gub = 1, gsens = a' * gsens, arVarState = Range 0 1} TauoidPrecise aa ab c i -> let x = row !! i y1 = exp ((-ab) * (x - c)) y2 = exp (ab * (x - c)) y1' = exp ((-aa) * (x - c)) y2' = exp (aa * (x - c)) z = 2 / (y1' + y2') a' = abs ab sf = colTableCounts !! i sb = scaleBeta sf sg = scaleGsens sf in aR {fx = z, subf = sb $ SUB (const (Q 1)) 0, sdsf = sb $ SUB (const $ aa * 2 / (y1 + y2)) a', gub = 1, gsens = sg $ abs aa, arVarState = Range 0 1} ComposeTauoidPrecise aa ab c e1 -> let AR gx _ (SUB sdsf1g beta2) _ gsens _ = ae e1 b = gsens y1 = exp ((-ab) * (gx - c)) y2 = exp (ab * (gx - c)) y1' = exp ((-aa) * (gx - c)) y2' = exp (aa * (gx - c)) z = 2 / (y1' + y2') a' = abs ab in aR {fx = z, subf = SUB (const (Q 1)) 0, sdsf = SUB (\ beta -> aa * 2 / (y1 + y2) * sdsf1g (beta - a' * b)) (a' * b + beta2), gub = 1, gsens = abs aa * gsens, arVarState = Range 0 1} L0Predicate i p -> let VarQ x = row !! i sf = colTableCounts !! i sb = scaleBeta sf sg = scaleGsens sf in aR {fx = if BoolExprQ (p x) then Q 1 else Q 0, subf = sb $ SUB (\ beta -> if BoolExprQ (p x) then Q 1 else Q (exp (-beta))) 0, sdsf = sb $ SUB (const (Q 1)) 0, gub = 1, gsens = sg 1, arVarState = Range 0 1} PowerLN i r -> let x = row !! i x_ub = getUbFromVs (varStates !! i) x_lb = getLbFromVs (varStates !! i) y_ub = if r >= 0 then x_ub ** r else if x_lb > 0 then x_lb ** r else infinity y_lb = if r >= 0 then (if x_lb > 0 then x_lb ** r else 0) else x_ub ** r sf = colTableCounts !! i sb = scaleBeta sf sg = scaleGsens sf in if x > 0 then aR {fx = x ** r, subf = sb $ SUB (const $ x ** r) (abs r), sdsf = sb $ SUB (const $ abs r * x ** r) (abs r), gub = y_ub, gsens = sg $ if r >= 0 then r * x_ub ** r else if x_lb > 0 then abs r * x_lb ** r else infinity, arVarState = Range y_lb y_ub} else error "analyzeExpr/PowerLN: condition (x > 0) not satisfied" Const c -> aR {fx = Q c, subf = SUB (const (Q $ abs c)) 0, sdsf = SUB (const (Q 0)) 0, gub = abs c, gsens = 0, arVarState = Range c c} ScaleNorm a e1 -> let AR fx1 (SUB subf1g subf1beta) (SUB sdsf1g sdsf1beta) gub gsens vs = ae e1 in aR {fx = fx1, subf = SUB (\ beta -> subf1g (beta*a)) (subf1beta/a), sdsf = SUB (\ beta -> sdsf1g (beta*a) / a) (sdsf1beta/a), gub = gub, gsens = gsens/a, arVarState = vs} ZeroSens e1 -> let AR fx1 subf@(SUB subf1g subf1beta) (SUB sdsf1g sdsf1beta) gub gsens vs = ae e1 isSensitive = any (`S.member` sensitiveVarSet) (findUsedVars e1) in aR {fx = fx1, subf = if isSensitive then subf else SUB (const fx1) 0, sdsf = SUB (const (Q 0)) 0, gub = gub, gsens = if isSensitive && not computeGsens then gsens else 0, arVarState = vs} L p is -> let xs = map (row !!) is y = lpnorm p xs vs = rangeLpNorm p $ map (getRangeFromVs . (varStates !!)) is ub = getGubFromVs vs sf = P.maximum $ map (colTableCounts !!) is sb = scaleBeta sf sg = scaleGsens sf in aR {fx = y, subf = sb $ SUB (\ beta -> if ub < 1/beta then exp (beta * (y - ub)) * ub else if y >= 1/beta then y else exp (beta * y - 1) / beta) 0, sdsf = sb $ SUB (const (Q 1)) 0, gub = ub, gsens = sg 1, arVarState = vs} LInf is -> let xs = map (row !!) is y = linfnorm xs vs = rangeLInfNorm $ map (getRangeFromVs . (varStates !!)) is ub = getGubFromVs vs sf = P.maximum $ map (colTableCounts !!) is sb = scaleBeta sf sg = scaleGsens sf in aR {fx = y, subf = sb $ SUB (\ beta -> if ub < 1/beta then exp (beta * (y - ub)) * ub else if y >= 1/beta then y else exp (beta * y - 1) / beta) 0, sdsf = sb $ SUB (const (Q 1)) 0, gub = ub, gsens = sg 1, arVarState = vs} Prod es -> combineArsProd $ map ae es Prod2 es -> combineArsProd2 $ map ae es Min es -> combineArsMin $ map ae es Max es -> combineArsMax $ map ae es ComposeL p es -> combineArsL p $ map ae es Sump p es -> combineArsSump p $ map ae es SumInf es -> combineArsSumInf $ map ae es Sum2 es -> combineArsSum2 $ map ae es combineArsProd :: [AnalysisResult] -> AnalysisResult combineArsProd ars = let fxs = map fx ars subfs = map subf ars sdsfs = map sdsf ars subfBetas = map subBeta subfs sdsfBetas = map subBeta sdsfs subgs = map subg subfs sdsgs = map subg sdsfs gubs = map gub ars gsenss = map gsens ars rs = map getRangeFromAr ars n = length ars c i beta = let s = ((sdsgs !! i) beta) in if s == 0 then Q 0 else s * product (map ($ beta) $ skipith i subgs) gc i = let s = (gsenss !! i) in if s == 0 then 0 else s * product (skipith i gubs) in aR {fx = product fxs, subf = SUB (\ beta -> product (map ($ beta) subgs)) (maximum subfBetas), sdsf = SUB (\ beta -> linfnorm (map (\ i -> c i beta) [round 0..n P.- round 1])) (maximum (subfBetas ++ sdsfBetas)), gub = product gubs, gsens = B.linfnorm (map gc [round 0..n P.- round 1]), arVarState = rangeProduct rs} combineArsProd2 :: [AnalysisResult] -> AnalysisResult combineArsProd2 ars = let fxs = map fx ars subfs = map subf ars sdsfs = map sdsf ars subfBetas = map subBeta subfs sdsfBetas = map subBeta sdsfs subgs = map subg subfs sdsgs = map subg sdsfs gubs = map gub ars gsenss = map gsens ars rs = map getRangeFromAr ars minSubfBeta = sum subfBetas n = length ars n' = fromIntegral n :: Double divideSubfBeta :: [Double] -> Double -> [Double] divideSubfBeta subfBetas beta = let numZeroSubfBetas = fromIntegral (length $ filter (== 0) subfBetas) :: Double minSubfBeta = sum subfBetas in if numZeroSubfBetas == 0 then let excess = (beta - minSubfBeta) / n' in map (+ excess) subfBetas else let excess = (beta - minSubfBeta) / numZeroSubfBetas in map (\ x -> if x == 0 then excess else x) subfBetas --divByn :: Double -> Double --divByn x = x / n' c i beta = product $ zipWith ($) ((sdsgs !! i) : skipith i subgs) (divideSubfBeta ((sdsfBetas !! i) : skipith i subfBetas) beta) --c i beta = ((sdsgs !! i) (divByn beta)) * product (map ($ (divByn beta)) $ skipith i subgs) gc i = let s = (gsenss !! i) in if s == 0 then 0 else s * product (skipith i gubs) in aR {fx = product fxs, subf = SUB (\ beta -> product (zipWith ($) subgs (divideSubfBeta subfBetas beta))) minSubfBeta, sdsf = SUB (\ beta -> sum (map (\ i -> c i beta) [round 0..n P.- round 1])) (sum subfBetas + maximum (zipWith (-) sdsfBetas subfBetas)), gub = product gubs, gsens = sum (map gc [round 0..n P.- round 1]), arVarState = rangeProduct rs} combineArsMin :: [AnalysisResult] -> AnalysisResult combineArsMin ars = let fxs = map fx ars subfs = map subf ars sdsfs = map sdsf ars subfBetas = map subBeta subfs sdsfBetas = map subBeta sdsfs subgs = map subg subfs sdsgs = map subg sdsfs gsenss = map gsens ars lbs = map getLbFromAr ars ubs = map getUbFromAr ars vs = Range (minimum lbs) (minimum ubs) in aR {fx = minimum fxs, subf = SUB (\ beta -> minimum (map ($ beta) subgs)) (maximum subfBetas), sdsf = SUB (\ beta -> maximum (map ($ beta) sdsgs)) (maximum sdsfBetas), gub = getGubFromVs vs, gsens = maximum gsenss, arVarState = vs} combineArsMinT :: AnalysisResult -> AnalysisResult combineArsMinT ar = aR {fx = minimumT (fx ar), subf = SUB (\ beta -> minimumT (subg (subf ar) beta)) (subBeta (subf ar)), sdsf = SUB (\ beta -> maximumT (subg (sdsf ar) beta)) (subBeta (sdsf ar))} combineArsMax :: [AnalysisResult] -> AnalysisResult combineArsMax ars = let fxs = map fx ars subfs = map subf ars sdsfs = map sdsf ars subfBetas = map subBeta subfs sdsfBetas = map subBeta sdsfs subgs = map subg subfs sdsgs = map subg sdsfs gubs = map gub ars gsenss = map gsens ars lbs = map getLbFromAr ars ubs = map getUbFromAr ars vs = Range (maximum lbs) (maximum ubs) in aR {fx = maximum fxs, subf = SUB (\ beta -> maximum (map ($ beta) subgs)) (maximum subfBetas), sdsf = SUB (\ beta -> maximum (map ($ beta) sdsgs)) (maximum sdsfBetas), gub = getGubFromVs vs, gsens = maximum gsenss, arVarState = vs} combineArsMaxT :: AnalysisResult -> AnalysisResult combineArsMaxT ar = aR {fx = maximumT (fx ar), subf = SUB (\ beta -> maximumT (subg (subf ar) beta)) (subBeta (subf ar)), sdsf = SUB (\ beta -> maximumT (subg (sdsf ar) beta)) (subBeta (sdsf ar))} combineArsLp :: Double -> [AnalysisResult] -> AnalysisResult combineArsLp p ars = let fxs = map fx ars subfs = map subf ars subfBetas = map subBeta subfs subgs = map subg subfs vs = rangeLpNorm p $ map getRangeFromAr ars in aR {fx = lpnorm p fxs, subf = SUB (\ beta -> lpnorm p (map ($ beta) subgs)) (maximum subfBetas), gub = getUbFromVs vs, arVarState = vs} combineArsL :: Double -> [AnalysisResult] -> AnalysisResult combineArsL p ars = let sdsfs = map sdsf ars sdsfBetas = map subBeta sdsfs sdsgs = map subg sdsfs gsenss = map gsens ars in (combineArsLp p ars) { sdsf = SUB (\ beta -> maximum (map ($ beta) sdsgs)) (maximum sdsfBetas), gsens = maximum gsenss} -- the computed function is l_p-norm but the norm is l_infinity combineArsLpInf :: Double -> [AnalysisResult] -> AnalysisResult combineArsLpInf p ars = let sdsfs = map sdsf ars sdsfBetas = map subBeta sdsfs sdsgs = map subg sdsfs gsenss = map gsens ars in (combineArsLp p ars) { sdsf = SUB (\ beta -> lpnorm 1 (map ($ beta) sdsgs)) (maximum sdsfBetas), gsens = B.lpnorm 1 gsenss} combineArsLpT :: Double -> AnalysisResult -> AnalysisResult combineArsLpT p ar = aR {fx = lpnormT p (fx ar), subf = SUB (\ beta -> lpnormT p (subg (subf ar) beta)) (subBeta (subf ar)), gub = if p == 1 then gub ar else infinity, gsens = if p == 1 then gsens ar else infinity} combineArsLT :: Double -> AnalysisResult -> AnalysisResult combineArsLT p ar = (combineArsLpT p ar) {sdsf = SUB (\ beta -> maximumT (subg (sdsf ar) beta)) (subBeta (sdsf ar))} combineArsLpInfT :: Double -> AnalysisResult -> AnalysisResult combineArsLpInfT p ar = (combineArsLpT p ar) {sdsf = SUB (\ beta -> lpnormT 1 (subg (sdsf ar) beta)) (subBeta (sdsf ar))} combineArsSum :: [AnalysisResult] -> AnalysisResult combineArsSum ars = let fxs = map fx ars subfs = map subf ars subfBetas = map subBeta subfs subgs = map subg subfs gubs = map gub ars lbs = map getLbFromAr ars ubs = map getUbFromAr ars vs = Range (sum lbs) (sum ubs) in aR {fx = sum fxs, subf = SUB (\ beta -> sum (map ($ beta) subgs)) (maximum subfBetas), gub = getGubFromVs vs, arVarState = vs} combineArsSump :: Double -> [AnalysisResult] -> AnalysisResult combineArsSump p ars = let sdsfs = map sdsf ars sdsfBetas = map subBeta sdsfs sdsgs = map subg sdsfs gsenss = map gsens ars in (combineArsSum ars) { sdsf = SUB (\ beta -> lqnorm p (map ($ beta) sdsgs)) (maximum sdsfBetas), gsens = B.lqnorm p gsenss} combineArsSumInf :: [AnalysisResult] -> AnalysisResult combineArsSumInf ars = let sdsfs = map sdsf ars sdsfBetas = map subBeta sdsfs sdsgs = map subg sdsfs gsenss = map gsens ars in (combineArsSum ars) { sdsf = SUB (\ beta -> lpnorm 1 (map ($ beta) sdsgs)) (maximum sdsfBetas), gsens = B.lpnorm 1 gsenss} combineArsSum2 :: [AnalysisResult] -> AnalysisResult combineArsSum2 ars = let sdsfs = map sdsf ars sdsfBetas = map subBeta sdsfs sdsgs = map subg sdsfs gsenss = map gsens ars in (combineArsSum ars) { sdsf = SUB (\ beta -> sum (map ($ beta) sdsgs)) (maximum sdsfBetas), gsens = sum gsenss} combineArsSumT :: AnalysisResult -> AnalysisResult combineArsSumT ar = aR {fx = sumT (fx ar), subf = SUB (\ beta -> sumT (subg (subf ar) beta)) (subBeta (subf ar)), gub = gub ar, gsens = gsens ar} combineArsSumpT :: Double -> AnalysisResult -> AnalysisResult combineArsSumpT p ar = (combineArsSumT ar) {sdsf = SUB (\ beta -> lqnormT p (subg (sdsf ar) beta)) (subBeta (sdsf ar))} combineArsSumInfT :: AnalysisResult -> AnalysisResult combineArsSumInfT ar = (combineArsSumT ar) {sdsf = SUB (\ beta -> lpnormT 1 (subg (sdsf ar) beta)) (subBeta (sdsf ar))} -- useFixedBudgetPerRowUse should be true if and only if (maxProvUses args) != Nothing -- useFixedBudgetPerRowUse=True is implemented only for SUM queries analyzeTableExpr :: [String] -> S.Set Int -> [VarState] -> [Int] -> Bool -> Bool -> String -> String -> (M.Map String AnalysisResult) -> TableExpr -> AnalysisResult analyzeTableExpr cols sensitiveVarSet varStates colTableCounts computeGsens useFixedBudgetPerRowUse sensCond srt subQueryMap te = case te of SelectMin (expr : _) -> oneStepCombine combineArsMinT expr SelectMax (expr : _) -> oneStepCombine combineArsMaxT expr SelectL p (expr : _) -> twoStepCombine (combineArsLT p) (combineArsLpInfT p) expr SelectSump p (expr : _) -> twoStepCombine (combineArsSumpT p) (if useFixedBudgetPerRowUse then combineArsSumpT 1 else combineArsSumInfT) expr SelectSumInf (expr : _) -> (if useFixedBudgetPerRowUse then twoStepCombine combineArsSumInfT (combineArsSumpT 1) else oneStepCombine combineArsSumInfT) expr where fixedArg arg expr arg' | arg' P.== arg = expr oneStepCombine combine expr = let AR fx _ (SUB sdsg subBeta) gub gsens _ = combine (analyzeExprQ cols sensitiveVarSet varStates colTableCounts computeGsens subQueryMap expr) in aR {fx = fx, sdsf = SUB (\ beta -> sdsg beta `Where` (sensCond ++ (if equal sensCond "" then "" else " AND ") ++ srt ++ ".sensitive")) subBeta, gub = gub, gsens = gsens} twoStepCombine combine_p combine_inf expr = let AR fx _ (SUB sdsg subBeta) gub gsens _ = combine_inf (analyzeExprQ cols sensitiveVarSet varStates colTableCounts computeGsens subQueryMap expr) AR _ _ (SUB _ subBeta2) _ _ _ = combine_p $ AR {sdsf = SUB undefined subBeta} in aR {fx = fx, sdsf = SUB (\ beta -> let subquery = GroupBy ((sdsg beta `As` "sdsg") `Where` (sensCond ++ (if equal sensCond "" then "" else " AND ") ++ srt ++ ".sensitive")) (srt ++ ".ID") "" AR _ _ (SUB sdsg2 _) _ _ _ = combine_p $ AR {sdsf = SUB (fixedArg beta $ VarQ "sdsg") subBeta} mainquery = sdsg2 beta in Subquery mainquery subquery) subBeta2, gub = gub, gsens = gsens} -- SELECT expr FROM fr WHERE wh -- (colNames !! i) is the name of the variable with number i in expr -- fr may contain multiple tables and aliases, e.g. "t as t1, t as t2, t3" -- wh is the WHERE condition as a string, e.g. "t1.c1 = t2.c1 AND t1.c2 >= t2.c2" -- srt is the name of the sensitive rows table analyzeTableExprQ :: ProgramOptions -> String -> String -> String -> String -> [String] -> S.Set Int -> [VarState] -> [Int] -> Bool -> (M.Map String AnalysisResult) -> TableExpr -> AnalysisResult analyzeTableExprQ args fr wh sensCond srt colNames sensitiveVarSet varStates colTableCounts computeGsens subQueryMap te = let useFixedBudgetPerRowUse = case maxProvUses args of Just _ -> True; Nothing -> False AR fx1 (SUB subf1g subf1beta) (SUB sdsf1g sdsf1beta) gub gsens vs = analyzeTableExpr colNames sensitiveVarSet varStates colTableCounts computeGsens useFixedBudgetPerRowUse sensCond srt subQueryMap te in AR (Select fx1 fr wh) (SUB ((\ x -> Select x fr wh) . subf1g) subf1beta) (SUB ((\ x -> Select x fr wh) . sdsf1g) sdsf1beta) gub gsens vs performAnalyses :: ProgramOptions -> Bool -> Double -> Maybe Double -> String -> String -> String -> [String] -> Int -> [String] -> [(String,[(String, String)])] -> TaskMap -> [String] -> [DataWrtTable] -> M.Map String VarState -> [(String, Maybe Double)] -> [Int] -> Maybe String -> IO (M.Map [String] Double, M.Map [String] [(String, [(String, (Double, Double))])], Double) performAnalyses args silent epsilon' fixedBeta dataPath separator initialQuery initQueries numOfOutputs colNames typeMap taskNameList sensitiveVarList tableExprData' attMap tableGs colTableCounts extraWheres = do (qmap, taskAggr, queryResults) <- performAnalyses' args silent epsilon' fixedBeta dataPath separator initialQuery initQueries numOfOutputs colNames typeMap taskNameList sensitiveVarList tableExprData' attMap tableGs colTableCounts extraWheres let queryResult = case compare (M.size queryResults) (length []) of {GT -> head (M.elems queryResults); _ -> error "performAnalyses: query result not obtained"} return (qmap, taskAggr, queryResult) performAnalyses' :: ProgramOptions -> Bool -> Double -> Maybe Double -> String -> String -> String -> [String] -> Int -> [String] -> [(String,[(String, String)])] -> TaskMap -> [String] -> [DataWrtTable] -> M.Map String VarState -> [(String, Maybe Double)] -> [Int] -> Maybe String -> IO (M.Map [String] Double, M.Map [String] [(String, [(String, (Double, Double))])], M.Map [String] Double) performAnalyses' args silent epsilon' fixedBeta dataPath separator initialQuery initQueries numOfOutputs colNames typeMap taskNameList sensitiveVarList tableExprData' attMap tableGs colTableCounts extraWheres = do let debug = not (alternative args) && not silent let vb = not (succinct args) --printf "performAnalyses': debug = %s, vb = %s\n" (show debug) (show vb) let tableGmap = M.fromList tableGs let tableGstr = intercalate "," $ map (\ tg -> case tg of (tbl,Nothing) -> tbl (tbl,Just g) | isInfinite g -> tbl | otherwise -> tbl ++ ':' : show g) tableGs -- for time series analysis, we execute initial queries in module TimeSeriesQ instead of here, to avoid executing them again at each time point -- it turned out that for the guessing advantage analysis it is more reasonable to execute these queries in module PreprocessQ as well --case timeSeries args of -- Nothing -> do -- when debug $ putStrLn "=================================" -- when debug $ putStrLn "Computing queries that create empty intermediate query tables and the input tables\n" -- sendQueriesToDbAndCommit args initQueries -- _ -> return () --when debug $ printf "tableGstr = %s\n" tableGstr when debug $ putStrLn "=================================" when debug $ putStrLn "Computing the initial query" when debug $ putStrLn initialQuery -- used for generating CSF benchmarks -- putStrLn (initialQuery ++ ";") --qr <- if (dbSensitivity args) then sendDoubleQueryToDb args initialQuery else (do return 0) -- a group-by query may return several outputs; we store it as a map group -> value -- TODO we need to somehow attach the resulting column names to the map list qmapList <- if (dbSensitivity args) then sendStringListsDoublesQueryToDb args initialQuery else (do return [([],0)]) let qmap = M.fromList qmapList -- scale epsilon according to the number of outputs let epsilon = divide epsilon' (fromIntegral numOfOutputs) when (dbSensitivity args && debug && vb) $ putStrLn (show qmapList) when debug $ putStrLn "=================================" when debug $ putStrLn "Generating SQL queries for computing the analysis results" --let fromPart = intercalate ", " tableNames --let wherePart = "" --forM_ tableExprData $ \ (tableName, te, sqlQuery) -> do -- let [_, fromWhere] = splitOn " FROM " sqlQuery -- let [fromPart, wherePart] = splitOn " WHERE " fromWhere when debug $ putStrLn "=================================" maxGsens <- case fixedBeta of Nothing -> do res <- findMaximumGsens args silent epsilon fixedBeta dataPath separator initialQuery colNames typeMap sensitiveVarList tableExprData' attMap tableGs colTableCounts when debug $ printf "maxGsens = %f\n" res return res Just _ -> return 0 minBeta <- case fixedBeta of Nothing -> findMinimumBeta args silent epsilon fixedBeta dataPath separator initialQuery colNames typeMap sensitiveVarList tableExprData' attMap tableGs colTableCounts Just beta1 -> return beta1 when debug $ printf "epsilon = %0.6f\n" epsilon when debug $ printf "gamma = %0.6f\n" gamma let defaultBeta = epsilon / (2 * (gamma + 1)) when (debug && vb) $ printf "defaultBeta = %0.6f\n" defaultBeta let minBeta' = if isInfinite maxGsens then max defaultBeta minBeta else minBeta let beta = chooseSUBBeta defaultBeta fixedBeta (SUB {subBeta = minBeta'}) when (debug && vb) $ case fixedBeta of Just beta1 -> printf "fixedBeta = %0.6f\n" beta1 Nothing -> do printf "minBeta = %0.6f\n" minBeta printf "minBeta' = %0.6f\n" minBeta' when debug $ printf "beta = %0.6f\n" beta let b = epsilon / (gamma + 1) - beta when debug $ printf "b = %0.6f\n" b -- extract the known bounds for columns (if any) from the attMap, convert everything to ranges whenever possible let varStates = map (anyVarStateToRange . (M.findWithDefault Exact `flip` attMap)) colNames when (debug && vb) $ printf "attMap = %s\n" (show attMap) when (debug && vb) $ printf "colNames = %s\n" (show colNames) when (debug && vb) $ printf "varStates = %s\n" (show varStates) when (debug && vb) $ printf "colTableCounts = %s\n" (show colTableCounts) sqlsaCache <- newIORef M.empty :: IO (IORef (M.Map [String] (M.Map String Double, M.Map String Double))) -- ###################### -- here we converted forM to foldM to keep track of subexpression map that collects intermediate AR-s -- res00 <- forM tableExprData $ \ (tableName, taskName, te, (_,fromPart,wherePart)) -> do let tableExprData = getData tableExprData' (_,res00,queryResults,taskNames,groupNames,usedTaskNames) <- foldM (\ (subQueryMap',results',queryResults,taskNames',groupNames',usedTaskNames') (tableName, taskName, group, te, (sensCond,fromPart,wherePart), usedTaskNames, queryStr) -> do when debug $ putStrLn "" when debug $ putStrLn "--------------------------------" when debug $ putStrLn $ "\\echo === Analyzing table " ++ tableName ++ " in task " ++ taskName ++ " ===" when (debug && vb) $ print te let groupvars = getOneGroupColName group let groupkeys = getOneGroupValue group -- the group names returned by psql may be formatted differently -- we ensure that the group keys will be the same as in qmap let groupMatchings = concat $ zipWith (\x y -> " AND " ++ x ++ " = " ++ y) groupvars groupkeys let initQueryGroup = "SELECT * FROM (" ++ queryStr ++ ") AS " ++ temporaryTableName ++ " WHERE true" ++ groupMatchings qrs <- if (dbSensitivity args) then sendStringListsDoublesQueryToDb args initQueryGroup else (do return [([],1/0)]) let (groupStrings,qr) = if equal (length qrs) (round 0) then ([], 1/0) else head qrs let f v x y w u = performAnalysis args silent epsilon (Just beta) qr x y sensCond tableName w taskName group colNames varStates sensitiveVarList te sqlsaCache tableGstr colTableCounts u v extraWheres let pa v = performSubExprAnalysis args silent fromPart wherePart sensCond tableName taskName group subQueryMap' (f v) case M.lookup tableName tableGmap of Nothing -> do (newSubQueryMap,results0) <- pa (getG args) let (qrs,results) = unzip $ map (\ (x1,(x2,x3,x4,x5,x6)) -> (x6,(x1,(x2,x3,x4,x5)))) results0 -- let us remember for which task we computed these analyses let taskNames = replicate (length results) taskName let groupNames = replicate (length results) groupStrings let usedTaskNamess = replicate (length results) usedTaskNames --return (tableName, result) return (newSubQueryMap, results' ++ results, queryResults ++ qrs, taskNames' ++ taskNames, groupNames' ++ groupNames, usedTaskNames' ++ usedTaskNamess) Just Nothing -> -- this table is considered insensitive, so computing its sensitivity is skipped -- return (tableName, (0, 0, printf "Table %s skipped" tableName, (0,0,0,0,0))) return (subQueryMap', results' ++ [(tableName, (0, 0, printf "Table %s skipped" tableName, (0,0,0,0,0)))], queryResults, taskNames' ++ [taskName], groupNames' ++ [groupStrings], usedTaskNames' ++ [usedTaskNames]) Just tableG -> do (newSubQueryMap,results0) <- pa tableG let (qrs,results) = unzip $ map (\ (x1,(x2,x3,x4,x5,x6)) -> (x6,(x1,(x2,x3,x4,x5)))) results0 -- let us remember for which task we computed these analyses let taskNames = replicate (length results) taskName let groupNames = replicate (length results) groupStrings let usedTaskNamess = replicate (length results) usedTaskNames --return (tableName, result) return (newSubQueryMap, results' ++ results, queryResults ++ qrs, taskNames' ++ taskNames, groupNames' ++ groupNames, usedTaskNames' ++ usedTaskNamess)) (M.empty,[],[],[],[],[]) tableExprData -- ###################### -- all groups are now coming from the final table let taskMaps' = concat $ zipWith3 (\ ts gr i -> map (\t -> ((t,gr),[i])) ts) usedTaskNames groupNames [round 0 ..] let taskMaps = M.toList $ M.fromListWith (++) taskMaps' -- set the flag to 0 for output table tasks, since we construct "all tables together" reuslt for them let taskNameMap = getMap taskNameList let taskMap' = map (\((t,gr),is) -> let b = (if M.member t taskNameMap then taskNameMap ! t else False) in ((t,gr,b),is) ) $ taskMaps let taskMap = map (\((t,gr,b),is) -> (t,is,gr,b)) $ M.toList $ M.fromListWith (++) taskMap' when (combinedSens args) $ when debug $ putStrLn "\n-----------------\nCombined sensitivities with Banach sensitivity smoothed only w.r.t. the same table-copy for adding/removing rows:" res0 <- forM res00 $ \ (tableName, (b,sds,combinedRes,_)) -> do when (combinedSens args) $ when debug $ putStr combinedRes return (tableName,(b,sds)) let smoothingDatas = map (\ (_,(_,_,_,x)) -> x) res00 --taskAggr0 <- forM taskMap $ \ (taskName, is, b) -> do taskAggr0 <- forM taskMap $ \ (taskName, is, gr, b) -> do when (debug && vb) $ printf "taskName=%s is=%s b=%s\n" (show taskName) (show is) (show b) let res1 = map ((\ (tableName, (b, _, _, (cc1, eembg, c0, c3, cc4))) -> (tableName, (b, cc1, eembg, c0, c3, c3, cc4))) . (res00 !!)) is when (debug && vb) $ do putStrLn "res1:" forM_ res1 print let res2 = B.sumGroupsWith (foldr (\ (b, cc1, eembg, c0, c3, minc3, cc4) (b', cc1', eembg', c0', c3', minc3', cc4') -> (min b b', max cc1 cc1', max eembg eembg', c0 + c0', c3 + c3', min minc3 minc3', max cc4 cc4')) (infinity,0,0,0,0,infinity,0)) res1 when (debug && vb) $ do putStrLn "res2:" forM_ res2 print let res2smoothed = flip map (zip [round 0..] res2) $ \ (i, (tableName, (b,cc1i,eembgi,cc0i,cc3i,minc3i,cc4i))) -> -- adding/removing-rows-smoothed w.r.t. the copies of the same table let smoothed1Sds = if combinedSens args then maximum [cc1i, cc0i, (eembgi * max (cc3i * cc4i) (cc0i + (cc3i - minc3i) * cc4i))] else cc0i -- adding/removing-rows-smoothed w.r.t. all tables smoothed2Sds = if combinedSens args then maximum (smoothed1Sds : [eembgj * (cc0i + cc3i * cc4j) | (_,(_,_,eembgj, _, _, _, cc4j)) <- skipith i res2]) else cc0i in (tableName,(b,smoothed1Sds,smoothed2Sds)) when (debug && vb) $ do putStrLn "res2smoothed:" forM_ res2smoothed print return (taskName, res2smoothed, gr, b) --when debug $ do -- printf "taskMap = %s\n" (show taskMap) -- printf "res00 = %s\n" (show res00) --let -- res0smoothed = flip map (zip [round 0..] res00) $ \ (i, (tableName, (b,sds,combinedRes,(_,_,c0i,c3i,_)))) -> -- let smoothedSds = if combinedSens args then sds `max` maximum [embgj * (c0i + c3i * c4j) | (_,embgj, _, _, c4j) <- skipith i smoothingDatas] else sds -- in (tableName,(b,smoothedSds)) --when (combinedSens args) $ when debug $ do -- putStrLn "\n-----------------\nCombined sensitivities smoothed w.r.t all tables for adding/removing rows:" -- forM_ res0smoothed $ \ (tableName, (b, smoothedSds)) -> -- printf "tableName=%s b=%0.6f ssds=%0.6f\n" tableName b smoothedSds --let taskAggr0 = map (\(taskName,is,b) -> (taskName, B.sumGroupsWith (foldr (\(x1,y1) (x2,y2) -> (max x1 x2, y1 + y2)) (0,0)) $ map (res0 !!) is, b)) taskMap -- min x1 x2 should be correct as we need to take the minimal b --let aggrTasks res0 = map (\ (taskName,is,b) -> (taskName, B.sumGroupsWith (foldr (\ (x1,y1) (x2,y2) -> (min x1 x2, y1 + y2)) (infinity,0)) $ map (res0 !!) is, b)) taskMap --let taskAggr0 = aggrTasks res0 --let taskAggr0smoothed = aggrTasks res0smoothed -- add an aggregated result to the output, sum up the sensitivities and take time minimal b let taskAggr' = map (\ (taskName,vs,gr,b) -> if b then --let v = foldr (\(x1,y1) (x2,y2) -> (max x1 x2, y1 + y2)) (0,0) (snd $ unzip vs) in -- Use L1-norm to combine tables, so take the maximum of sensitivities instead of sum let vs1 = map (\ (tableName,(b,smoothed1Sds,smoothed2Sds)) -> (tableName,(b,smoothed1Sds))) vs vs2 = map (\ (tableName,(b,smoothed1Sds,smoothed2Sds)) -> (tableName,(b,smoothed2Sds))) vs vSmoothed = foldr (\(x1,y1) (x2,y2) -> (min x1 x2, max y1 y2)) (infinity,0) (snd $ unzip vs2) in (gr, [(taskName, (B.resultForAllTables, vSmoothed):vs1)]) else let vs1 = map (\ (tableName,(b,smoothed1Sds,smoothed2Sds)) -> (tableName,(b,smoothed1Sds))) vs in (gr,[(taskName,vs1)]) ) taskAggr0 let taskAggr = M.fromListWith (++) $ taskAggr' when (debug && vb) $ putStrLn ("--- ") when (debug && vb) $ putStrLn ("groupNames: " ++ (show groupNames)) when (debug && vb) $ putStrLn ("--- ") when (debug && vb) $ putStrLn ("queryResults: " ++ (show queryResults)) when (debug && vb) $ putStrLn ("--- ") when (debug && vb) $ putStrLn ("taskkeys: " ++ (show taskAggr)) when (debug && vb) $ putStrLn ("--- ") when (debug && vb) $ putStrLn ("taskmap: " ++ (show taskMap)) when (debug && vb) $ putStrLn ("--- ") when (debug && vb) $ putStrLn ("tasknames: " ++ (show taskNames)) when (debug && vb) $ putStrLn ("--- ") when (debug && vb) $ putStrLn ("usedTaskNames: " ++ (show usedTaskNames)) -- queryResult may be different from the one in qmap because it uses the modified query (with sigmoid instead of private filters, etc.) instead of original query -- it is used for time series analysis -- and it was actually needed also for the other types of analysis, so we now are using it everywhere! let modifedQmap = M.fromList $ zip groupNames queryResults -- TODO use this after initialQMap gets formatted similarly --let modifedQmap = M.fromList $ zip (zip groupNames taskNames) queryResults when (debug && vb) $ putStrLn ("--- ") when (debug && vb) $ putStrLn ("initialQMap: " ++ show qmap) when (debug && vb) $ putStrLn ("modifiedQMap: " ++ show modifedQmap) when (debug && vb) $ putStrLn ("--- ") return (qmap, taskAggr, modifedQmap) -- find the minimum value of beta that is allowed for all tables findMinimumBeta :: ProgramOptions -> Bool -> Double -> Maybe Double -> String -> String -> String -> [String] -> [(String,[(String, String)])] -> [String] -> [DataWrtTable] -> M.Map String VarState -> [(String, Maybe Double)] -> [Int] -> IO Double findMinimumBeta args silent epsilon beta dataPath separator initialQuery colNames typeMap sensitiveVarList tableExprData' attMap tableGs colTableCounts = do let varStates = map (anyVarStateToRange . (M.findWithDefault Exact `flip` attMap)) colNames -- ###################### -- subqueries are taken into account for computation of beta --minBetas <- forM tableExprData $ \ (tableName, _, te, (_,fromPart,wherePart),_) -> -- findMinimumBeta1 args fromPart wherePart tableName colNames varStates sensitiveVarList M.empty te colTableCounts let tableExprData = getData tableExprData' (_,minBetas) <- foldM (\ (subQueryMap',results') (tableName, taskName, gr, te, (sensCond,fromPart,wherePart),_, _) -> do (newSubQueryMap,result) <- findMinimumBeta1 args silent fromPart wherePart sensCond tableName taskName gr colNames varStates sensitiveVarList subQueryMap' te colTableCounts return (newSubQueryMap, results' ++ [result])) (M.empty,[]) tableExprData -- ###################### return $ maximum minBetas -- find the minimum value of beta that is allowed for a given (copy of a) table findMinimumBeta1 :: ProgramOptions -> Bool -> String -> String -> String -> String -> String -> OneGroupData-> [String] -> [VarState] -> [String] -> M.Map String (OneGroupData,(M.Map String AnalysisResult)) -> TableExpr -> [Int] -> IO (M.Map String (OneGroupData,(M.Map String AnalysisResult)), Double) findMinimumBeta1 args silent fromPart wherePart sensCond tableName taskName group colNames varStates sensitiveVarList subExprMap te colTableCounts = do let debug = not (alternative args) && not silent let vb = not (succinct args) let sensitiveVarSet = S.fromList sensitiveVarList let sensitiveVarIndices = [i | (i,colName) <- zip [round 0..] colNames, colName `S.member` sensitiveVarSet] let sensitiveVarIndicesSet = S.fromList sensitiveVarIndices let f x y _ w = let res = analyzeTableExprQ args x y sensCond (sensRows tableName) colNames sensitiveVarIndicesSet varStates colTableCounts False w te in (do return (res,res)) (outputMap,results) <- performSubExprAnalysis args silent fromPart wherePart sensCond tableName taskName group subExprMap f let (_,ars) = unzip results let minBeta = foldr max 0 $ map (\ar -> subBeta (sdsf ar)) ars when (debug && vb) $ printf "tableName=%s minBeta=%0.6f\n" tableName minBeta return (outputMap, minBeta) -- find the maximum value of gsens over all tables findMaximumGsens :: ProgramOptions -> Bool -> Double -> Maybe Double -> String -> String -> String -> [String] -> [(String,[(String, String)])] -> [String] -> [DataWrtTable] -> M.Map String VarState -> [(String, Maybe Double)] -> [Int] -> IO Double findMaximumGsens args silent epsilon beta dataPath separator initialQuery colNames typeMap sensitiveVarList tableExprData' attMap tableGs colTableCounts = do let varStates = map (anyVarStateToRange . (M.findWithDefault Exact `flip` attMap)) colNames -- ###################### -- subqueries are taken into account for computation of beta let tableExprData = getData tableExprData' (_,maxGsenss) <- foldM (\ (subQueryMap',results') (tableName, taskName, gr, te, (sensCond,fromPart,wherePart),_, _) -> do (newSubQueryMap,result) <- findMaximumGsens1 args silent fromPart wherePart sensCond tableName taskName gr colNames varStates sensitiveVarList subQueryMap' te colTableCounts return (newSubQueryMap, results' ++ [result])) (M.empty,[]) tableExprData -- ###################### return $ maximum maxGsenss -- find the maximum value of gsens for a given (copy of a) table findMaximumGsens1 :: ProgramOptions -> Bool -> String -> String -> String -> String -> String -> OneGroupData-> [String] -> [VarState] -> [String] -> M.Map String (OneGroupData,(M.Map String AnalysisResult)) -> TableExpr -> [Int] -> IO (M.Map String (OneGroupData,(M.Map String AnalysisResult)), Double) findMaximumGsens1 args silent fromPart wherePart sensCond tableName taskName group colNames varStates sensitiveVarList subExprMap te colTableCounts = do let debug = not (alternative args) && not silent let sensitiveVarSet = S.fromList sensitiveVarList let sensitiveVarIndices = [i | (i,colName) <- zip [round 0..] colNames, colName `S.member` sensitiveVarSet] let sensitiveVarIndicesSet = S.fromList sensitiveVarIndices let f x y _ w = let res = analyzeTableExprQ args x y sensCond (sensRows tableName) colNames sensitiveVarIndicesSet varStates colTableCounts True w te in (do return (res,res)) (outputMap,results) <- performSubExprAnalysis args silent fromPart wherePart sensCond tableName taskName group subExprMap f let (_,ars) = unzip results let maxGsens = foldr max 0 $ map gsens ars when debug $ printf "tableName=%s maxGsens=%0.6f\n" tableName maxGsens return (outputMap, maxGsens) -- find the maximum value of gub over all tables (actually gub should be the same for all tables) findGub :: ProgramOptions -> Bool -> Double -> Maybe Double -> String -> String -> String -> [String] -> [(String,[(String, String)])] -> [String] -> [DataWrtTable] -> M.Map String VarState -> [(String, Maybe Double)] -> [Int] -> IO Double findGub args silent epsilon beta dataPath separator initialQuery colNames typeMap sensitiveVarList tableExprData' attMap tableGs colTableCounts = do let varStates = map (anyVarStateToRange . (M.findWithDefault Exact `flip` attMap)) colNames -- ###################### -- subqueries are taken into account for computation of beta let tableExprData = getData tableExprData' (_,maxGubs) <- foldM (\ (subQueryMap',results') (tableName, taskName, gr, te, (sensCond,fromPart,wherePart),_, _) -> do (newSubQueryMap,result) <- findGub1 args silent fromPart wherePart sensCond tableName taskName gr colNames varStates sensitiveVarList subQueryMap' te colTableCounts return (newSubQueryMap, results' ++ [result])) (M.empty,[]) tableExprData -- ###################### return $ maximum maxGubs -- find the maximum value of gub for a given (copy of a) table findGub1 :: ProgramOptions -> Bool -> String -> String -> String -> String -> String -> OneGroupData-> [String] -> [VarState] -> [String] -> M.Map String (OneGroupData,(M.Map String AnalysisResult)) -> TableExpr -> [Int] -> IO (M.Map String (OneGroupData,(M.Map String AnalysisResult)), Double) findGub1 args silent fromPart wherePart sensCond tableName taskName group colNames varStates sensitiveVarList subExprMap te colTableCounts = do let debug = not (alternative args) && not silent let sensitiveVarSet = S.fromList sensitiveVarList let sensitiveVarIndices = [i | (i,colName) <- zip [round 0..] colNames, colName `S.member` sensitiveVarSet] let sensitiveVarIndicesSet = S.fromList sensitiveVarIndices let f x y _ w = let res = analyzeTableExprQ args x y sensCond (sensRows tableName) colNames sensitiveVarIndicesSet varStates colTableCounts True w te in (do return (res,res)) (outputMap,results) <- performSubExprAnalysis args silent fromPart wherePart sensCond tableName taskName group subExprMap f let (_,ars) = unzip results let maxGub = foldr max 0 $ map gub ars when debug $ printf "tableName=%s maxGub=%0.6f\n" tableName maxGub return (outputMap, maxGub) -- find the SELECT part (without the SELECT keyword and SUM function) of the modified query (actually fx should be the same for all tables) findModifiedQuery :: ProgramOptions -> Bool -> Double -> Maybe Double -> String -> String -> String -> [String] -> [(String,[(String, String)])] -> [String] -> [DataWrtTable] -> M.Map String VarState -> [(String, Maybe Double)] -> [Int] -> IO String findModifiedQuery args silent epsilon beta dataPath separator initialQuery colNames typeMap sensitiveVarList tableExprData' attMap tableGs colTableCounts = do let varStates = map (anyVarStateToRange . (M.findWithDefault Exact `flip` attMap)) colNames -- ###################### -- subqueries are taken into account for computation of beta let tableExprData = getData tableExprData' (_,fxs) <- foldM (\ (subQueryMap',results') (tableName, taskName, gr, te, (sensCond,fromPart,wherePart),_, _) -> do (newSubQueryMap,result) <- findModifiedQuery1 args silent fromPart wherePart sensCond tableName taskName gr colNames varStates sensitiveVarList subQueryMap' te colTableCounts return (newSubQueryMap, results' ++ [result])) (M.empty,[]) tableExprData -- ###################### return $ head fxs -- find the modified query for a given (copy of a) table findModifiedQuery1 :: ProgramOptions -> Bool -> String -> String -> String -> String -> String -> OneGroupData-> [String] -> [VarState] -> [String] -> M.Map String (OneGroupData,(M.Map String AnalysisResult)) -> TableExpr -> [Int] -> IO (M.Map String (OneGroupData,(M.Map String AnalysisResult)), String) findModifiedQuery1 args silent fromPart wherePart sensCond tableName taskName group colNames varStates sensitiveVarList subExprMap te colTableCounts = do let debug = not (alternative args) && not silent let sensitiveVarSet = S.fromList sensitiveVarList let sensitiveVarIndices = [i | (i,colName) <- zip [round 0..] colNames, colName `S.member` sensitiveVarSet] let sensitiveVarIndicesSet = S.fromList sensitiveVarIndices let f x y _ w = let res = analyzeTableExprQ args x y sensCond (sensRows tableName) colNames sensitiveVarIndicesSet varStates colTableCounts True w te in (do return (res,res)) (outputMap,results) <- performSubExprAnalysis args silent fromPart wherePart sensCond tableName taskName group subExprMap f let (_,ars) = unzip results let theFx = head $ map ((\ (Select (FunQ fun s) f w) -> if fun P.== "sum" then show s else error "findModifiedQuery1: function " ++ fun ++ " not supported") . fx) ars when debug $ printf "tableName=%s fx:\n%s\n" tableName theFx return (outputMap, theFx) performAnalysis :: ProgramOptions -> Bool -> Double -> Maybe Double -> Double -> String -> String -> String -> String -> String -> String -> OneGroupData -> [String] -> [VarState] -> [String] -> TableExpr -> IORef (M.Map [String] (M.Map String Double, M.Map String Double)) -> String -> [Int] -> M.Map String AnalysisResult -> Maybe Double -> Maybe String -> IO (AnalysisResult, (Double,Double,String,(Double,Double,Double,Double,Double),Double)) performAnalysis args silent epsilon fixedBeta initialQr fromPart wherePart sensCond tableName analyzedTable taskName group colNames varStates sensitiveVarList te sqlsaCache tableGstr colTableCounts subExprMap tableG extraWheres = do let debug = not (alternative args) && not silent let vb = not (succinct args) --when debug $ printf "varStates = %s\n" (show varStates) let sensitiveVarSet = S.fromList sensitiveVarList let sensitiveVarIndices = [i | (i,colName) <- zip [round 0..] colNames, colName `S.member` sensitiveVarSet] let sensitiveVarIndicesSet = S.fromList sensitiveVarIndices --when debug $ printf "sensitiveVarIndices = %s\n" (show sensitiveVarIndices) let ar_for_gsens = analyzeTableExprQ args fromPart wherePart sensCond (sensRows tableName) colNames sensitiveVarIndicesSet varStates colTableCounts True subExprMap te let ar = (analyzeTableExprQ args fromPart wherePart sensCond (sensRows tableName) colNames sensitiveVarIndicesSet varStates colTableCounts False subExprMap te) {gsens = gsens ar_for_gsens} when (debug && vb) $putStrLn "Analysis result:" when (debug && vb) $print ar when (debug && vb) $ printf "epsilon = %0.6f\n" epsilon when (debug && vb) $ printf "gamma = %0.6f\n" gamma let defaultBeta = epsilon / (2 * (gamma + 1)) let beta = chooseSUBBeta defaultBeta fixedBeta (sdsf ar) when (debug && vb) $ printf "beta = %0.6f\n" beta let b = epsilon / (gamma + 1) - beta when (debug && vb) $ printf "b = %0.6f\n" b processIntermediateResults args silent beta taskName analyzedTable group ar subExprMap let qr = constProp $ fx ar when debug $ putStrLn "====================" when debug $ putStrLn "-- modified query: " when debug $ putStrLn (show qr ++ ";") --when (dbSensitivity args && debug) $ sendDoubleQueryToDb args (show qr) >>= \ qr -> printf "database returns %0.6f (relative error from sigmoids %0.3f%%)\n" qr (abs (qr / initialQr - 1) * 100) qr_value <- do res <- sendDoubleQueryToDb args (show qr) when debug $ printf "database returns %0.6f\n" res return res {- qr_value <- case timeSeries args of Just _ -> do res <- sendDoubleQueryToDb args (show qr) when debug $ printf "database returns %0.6f\n" res return res Nothing -> return $ error "Query result here currently computed only for time series analysis" -} let sds = constProp $ subg (sdsf ar) beta when debug $ putStrLn "-- beta-smooth derivative sensitivity query:" when debug $ putStrLn (show sds ++ ";") sds_value <- if (dbSensitivity args) then sendDoubleQueryToDb args (show sds) else (do return 0) when (dbSensitivity args && debug) $ printf "database returns %0.6f\n" sds_value -- used for generating CSF benchmarks --putStrLn ("\\echo $$" ++ tableName) --putStrLn ("\\echo " ++ show beta) --putStrLn (show qr ++ ";") --putStrLn (show sds ++ ";") --when (dbSensitivity args) $ sendDoubleQueryToDb args (show qr) >>= \ qr -> printf "\\echo %0.6f %0.3f\n" qr (abs (qr / initialQr - 1) * 100) --when (dbSensitivity args) $ printf "\\echo %0.6f\n" sds_value let tableGJustInf = case tableG of Just g | isInfinite g -> True; _ -> False (combinedSens_value,combinedRes,smoothingData) <- if combinedSens args && not tableGJustInf then do --let sqlsaExecutablePath = if debug then sqlsaExecutablePathQuiet else sqlsaExecutablePathVerbose let sqlsaExecutablePath = case inputFp5 args of {"" -> sqlsaExecutablePathQuiet; _ -> inputFp5 args ++ sqlsaExecutablePathQuiet} --when debug $ printf "%s --combined-sens%s -B %f -S %f %s %s\n" sqlsaExecutablePath (if null sensitiveVarList then "" else " -f " ++ intercalate "," sensitiveVarList) beta (gub ar) (inputFp1 args) (inputFp2 args) let Just defaultG = getG args let sqlsaArgs = ("--combined-sens" : (if null sensitiveVarList then id else ("-f" :) . (intercalate "," sensitiveVarList :)) ((if null tableGstr then id else ("--table-Gs" :) . (tableGstr :)) $ (case extraWheres of Nothing -> id; Just w -> ("-W" :) . (w :)) ["-G", show defaultG, "-B", show beta, "-S", show (gub ar), inputFp1 args, inputFp2 args])) --print sqlsaExecutablePath --print sqlsaArgs when debug $ putStrLn $ intercalate " " (sqlsaExecutablePath : sqlsaArgs) cache <- readIORef sqlsaCache (localSensMap,localCountSensMap) <- case M.lookup sqlsaArgs cache of Nothing -> do callProcess sqlsaExecutablePath sqlsaArgs localSensMap <- readTableToSensitivityMap callProcess sqlsaExecutablePath (["--count-query", "--localsens"] ++ sqlsaArgs) localCountSensMap <- readTableToSensitivityMap writeIORef sqlsaCache $ M.insert sqlsaArgs (localSensMap,localCountSensMap) cache return (localSensMap,localCountSensMap) Just maps -> return maps when debug $ printf "localSensMap = %s\n" (show localSensMap) when debug $ printf "localCountSensMap = %s\n" (show localCountSensMap) let localSens = localSensMap M.! tableName let localCountSens = localCountSensMap M.! tableName --let Just distanceG = getG args let Just distanceG = tableG when debug $ printf "G = %0.3f\n" distanceG let ls = localSens / distanceG -- local sensitivity scaled to the combined distance let embg = exp (- beta * distanceG) let gsb = localCountSens * gsens ar * embg let combinedSens = maximum [ls, sds_value, gsb] let res = printf "table=%s gub=%0.6f gsens=%0.6f localSens=%0.6f localCountSens=%0.6f ls=%0.6f sds=%0.6f gsb=%0.6f combinedSens=%0.6f\n" tableName (gub ar) (gsens ar) localSens localCountSens ls sds_value gsb combinedSens when debug $ putStr res return (combinedSens,res, (ls, embg, sds_value, gsens ar, localCountSens)) else return (sds_value,"", (0, 0, sds_value, 0, 0)) -- TODO think what to do with ar_gsens, do they actually need a separate map, or can we reuse the ar map return (ar, (b,combinedSens_value,combinedRes,smoothingData,qr_value)) --return (b,combinedSens_value,combinedRes,smoothingData) --return (b,sds_value,combinedRes) processIntermediateResults :: ProgramOptions -> Bool -> Double -> String -> String -> OneGroupData -> AnalysisResult -> M.Map String AnalysisResult -> IO () processIntermediateResults args silent beta taskName analyzedTable group ar subExprMap = do if (not (isIntermediateQueryName taskName)) then (do return ()) else do let debug = not (alternative args) && not silent let outputTableName = queryNameToTableName taskName -- store the intermediate result into a database let groupColumns = getOneGroupColName group let groupNames = getOneGroupValue group let qr = constProp $ fx ar fx_value <- sendDoubleQueryToDb args (show qr) -- we choose beta that is good enough already, since we do not give too small betas as an input, so we can do queryDB immediately let subf_expr = if equal (show $ subf ar) "unknown" then Q 0 else constProp $ subg (subf ar) beta let sdsf_expr = if equal (show $ sdsf ar) "unknown" then Q 0 else constProp $ subg (sdsf ar) beta subf_value <- if (dbSensitivity args && not (equal (show $ subf ar) "unknown")) then sendDoubleQueryToDb args (show subf_expr) else (do return 0) sdsf_value <- if (dbSensitivity args && not (equal (show $ sdsf ar) "unknown")) then sendDoubleQueryToDb args (show sdsf_expr) else (do return 0) -- for fx, we record the output only once even if several tables were analysed, it is not clear whether multiple records could be more useful let recordedTable = "#" let tbl_fx = [show fx_value, "\'" ++ recordedTable ++ "\'"] ++ groupNames -- for sensitivities, we record the result separately for each table let tbl_sens = [show subf_value, show sdsf_value, "\'" ++ analyzedTable ++ "\'"] ++ groupNames when debug $ putStrLn ("-- intermediate output information for " ++ taskName ++ " w.r.t " ++ analyzedTable ++ ":") when debug $ putStrLn (show qr) when debug $ putStrLn (show tbl_fx) when debug $ putStrLn (show tbl_sens) when debug $ putStrLn ("------------------") let intermediateTableCreateStatement1 = insertUniqueIntoIntermediateAggrTableSql outputTableName [tbl_fx] let intermediateTableCreateStatement2 = insertIntoIntermediateAggrTableSensSql ("_sens_" ++ outputTableName) [tbl_sens] let intermediateTableCreateStatement = intermediateTableCreateStatement1 ++ intermediateTableCreateStatement2 when debug $ putStrLn (show intermediateTableCreateStatement) sendQueriesToDb args intermediateTableCreateStatement performSubExprAnalysis :: ProgramOptions -> Bool -> String -> String -> String -> String -> String -> OneGroupData-> M.Map String (OneGroupData,(M.Map String AnalysisResult)) -> (String -> String -> String -> M.Map String AnalysisResult -> IO (AnalysisResult,b)) -> IO(M.Map String (OneGroupData,(M.Map String AnalysisResult)), [(String,b)]) performSubExprAnalysis args silent fromPart wherePart sensCond tableName taskName group subExprMap f = do let debug = not (alternative args) && not silent let vb = not (succinct args) -- extract the variable names from the taskName and the tableName let varName = queryNameToPreficedVarName taskName let tableVarName = queryNameToPreficedVarName tableName -- the query comes with "tableName = subQueryTable", and we want sensitivities w.r.t. all tables used by subQueryTable instead -- we store the analysis results separately for each input table let goodVarNames = filter (\x -> equal (varNameToTableName x) tableName) (M.keys subExprMap) -- if there are several aggregations for the same table name, all of them use the same grouping -- TODO we do not support several aggregations yet let groupColumns = if equal goodVarNames [] then [] else head $ map (\x -> getOneGroupColName (fst (subExprMap M.! x))) goodVarNames let temp = case goodVarNames of [] -> [([tableName],M.empty)] _ -> map (\goodVarName -> let m = snd $ subExprMap M.! goodVarName in let tableNames = M.keys m in (tableNames, M.fromList (map (\x -> (goodVarName, m M.! x)) tableNames))) goodVarNames let (analyzedTables0,subExprAnalysisResults0) = unzip temp let analyzedTables = concat analyzedTables0 let subExprAnalysisResults = foldr M.union M.empty subExprAnalysisResults0 when (debug && vb) $ putStrLn ("initial table: " ++ show tableName) when (debug && vb) $ putStrLn ("suitable tables: " ++ show goodVarNames) when (debug && vb) $ putStrLn ("analyzed tables: " ++ show analyzedTables) let results00 = map (\analyzedTable -> let extFromPart = if equal analyzedTable tableName then fromPart else fromPart ++ ", _sens_" ++ tableName in let extWherePart = if equal analyzedTable tableName then wherePart else wherePart ++ (if equal wherePart "" then "" else " AND ") ++ "_sens_" ++ tableName ++ ".tableName = \'" ++ analyzedTable ++ "\'" ++ (concat $ map (\groupColumn -> " AND _sens_" ++ tableName ++ "." ++ groupColumn ++ " = " ++ tableName ++ "." ++ groupColumn) groupColumns) in f extFromPart extWherePart analyzedTable subExprAnalysisResults) analyzedTables results0 <- sequence results00 let (ars,results) = unzip results0 let mapKey = removeGroupFromQName varName let outputMap = M.insertWith chooseSaferARs mapKey (group, M.fromList $ zip analyzedTables ars) subExprMap when (debug && vb) $ putStrLn ("outputMap: " ++ show outputMap) --return (outputMap, zip analyzedTables results) return (outputMap, if (isIntermediateQueryName taskName) then [] else zip analyzedTables results) -- if we have different analysis results for different groups, we take the one that works for all of them chooseSaferARs :: (OneGroupData,M.Map String AnalysisResult) -> (OneGroupData,M.Map String AnalysisResult) -> (OneGroupData,M.Map String AnalysisResult) chooseSaferARs (x1,ars1) (x2,ars2) = -- TODO the column names should be the same, but we need to verify it just in case (x1,M.unionWith chooseSaferAR ars1 ars2) chooseSaferAR :: AnalysisResult -> AnalysisResult -> AnalysisResult chooseSaferAR ar1 ar2 = -- we do not use fx in the map anyway, so it can be arbitrary let new_fx = fx ar1 in -- we do not use the SUB function in the map anyway, so it can be arbitrary -- however, we do need the beta values let (SUB y sub_beta1) = subf ar1 in let (SUB _ sub_beta2) = subf ar2 in let new_subf = (SUB y (max sub_beta1 sub_beta2)) in let (SUB z sds_beta1) = sdsf ar1 in let (SUB _ sds_beta2) = sdsf ar2 in let new_sdsf = (SUB z (max sds_beta1 sds_beta2)) in let new_gub = max (gub ar1) (gub ar2) in let new_gsens = max (gsens ar1) (gsens ar2) in aR {fx = new_fx, subf = new_subf, sdsf = new_sdsf, gub = new_gub, gsens = new_gsens}
def _get_data(self,filename): self.data = [] with open(filename,'r') as f: for line in f: m = re.match('\[([a-zA-Z0-9\- :,]+)\]\s*\[([a-zA-Z]+)\]\s*(.*)', line) if m: temp = m.group(1) ts = time.mktime(time.strptime(temp, "%a, %d %b %Y %H:%M:%S UTC")) notifName = m.group(2) temp = m.group(3).strip() if temp: notifParams = json.loads(temp) else: notifParams = None self.data += [ { 'ts': ts, 'notifName': notifName, 'notifParams': notifParams } ] else: print 'WARNING: could not match "{0}"'.format(line)
def build(self, model, dataset, circuits, resource_alloc=None, verbosity=0): return self.cls_to_build.create_from(model=model, dataset=dataset, circuits=circuits, resource_alloc=resource_alloc, verbosity=verbosity, regularization=self.regularization, penalties=self.penalties, name=self.name, description=self.description, **self.additional_args)