text
stringlengths 2
100k
| meta
dict |
---|---|
//
// GrowlApplicationBridge.h
// Growl
//
// Created by Evan Schoenberg on Wed Jun 16 2004.
// Copyright 2004-2006 The Growl Project. All rights reserved.
//
/*!
* @header GrowlApplicationBridge.h
* @abstract Defines the GrowlApplicationBridge class.
* @discussion This header defines the GrowlApplicationBridge class as well as
* the GROWL_PREFPANE_BUNDLE_IDENTIFIER constant.
*/
#ifndef __GrowlApplicationBridge_h__
#define __GrowlApplicationBridge_h__
#import <Foundation/Foundation.h>
#import <AppKit/AppKit.h>
#import <Growl/GrowlDefines.h>
//Forward declarations
@protocol GrowlApplicationBridgeDelegate;
//------------------------------------------------------------------------------
#pragma mark -
/*!
* @class GrowlApplicationBridge
* @abstract A class used to interface with Growl.
* @discussion This class provides a means to interface with Growl.
*
* Currently it provides a way to detect if Growl is installed and launch the
* GrowlHelperApp if it's not already running.
*/
@interface GrowlApplicationBridge : NSObject {
}
/*!
* @method isGrowlInstalled
* @abstract Detects whether Growl is installed.
* @discussion Determines if the Growl prefpane and its helper app are installed.
* @result this method will forever return YES.
*/
+ (BOOL) isGrowlInstalled __attribute__((deprecated));
/*!
* @method isGrowlRunning
* @abstract Detects whether GrowlHelperApp is currently running.
* @discussion Cycles through the process list to find whether GrowlHelperApp is running and returns its findings.
* @result Returns YES if GrowlHelperApp is running, NO otherwise.
*/
+ (BOOL) isGrowlRunning;
/*!
* @method isMistEnabled
* @abstract Gives the caller a fairly good indication of whether or not built-in notifications(Mist) will be used.
* @discussion since this call makes use of isGrowlRunning it is entirely possible for this value to change between call and
* executing a notification dispatch
* @result Returns YES if Growl isn't reachable and the developer has not opted-out of
* Mist and the user hasn't set the global mist enable key to false.
*/
+ (BOOL)isMistEnabled;
/*!
* @method setShouldUseBuiltInNotifications
* @abstract opt-out mechanism for the mist notification style in the event growl can't be reached.
* @discussion if growl is unavailable due to not being installed or as a result of being turned off then
* this option can enable/disable a built-in fire and forget display style
* @param should Specifies whether or not the developer wants to opt-in (default) or opt out
* of the built-in Mist style in the event Growl is unreachable.
*/
+ (void)setShouldUseBuiltInNotifications:(BOOL)should;
/*!
* @method shouldUseBuiltInNotifications
* @abstract returns the current opt-in state of the framework's use of the Mist display style.
* @result Returns NO if the developer opt-ed out of Mist, the default value is YES.
*/
+ (BOOL)shouldUseBuiltInNotifications;
#pragma mark -
/*!
* @method setGrowlDelegate:
* @abstract Set the object which will be responsible for providing and receiving Growl information.
* @discussion This must be called before using GrowlApplicationBridge.
*
* The methods in the GrowlApplicationBridgeDelegate protocol are required
* and return the basic information needed to register with Growl.
*
* The methods in the GrowlApplicationBridgeDelegate_InformalProtocol
* informal protocol are individually optional. They provide a greater
* degree of interaction between the application and growl such as informing
* the application when one of its Growl notifications is clicked by the user.
*
* The methods in the GrowlApplicationBridgeDelegate_Installation_InformalProtocol
* informal protocol are individually optional and are only applicable when
* using the Growl-WithInstaller.framework which allows for automated Growl
* installation.
*
* When this method is called, data will be collected from inDelegate, Growl
* will be launched if it is not already running, and the application will be
* registered with Growl.
*
* If using the Growl-WithInstaller framework, if Growl is already installed
* but this copy of the framework has an updated version of Growl, the user
* will be prompted to update automatically.
*
* @param inDelegate The delegate for the GrowlApplicationBridge. It must conform to the GrowlApplicationBridgeDelegate protocol.
*/
+ (void) setGrowlDelegate:(NSObject<GrowlApplicationBridgeDelegate> *)inDelegate;
/*!
* @method growlDelegate
* @abstract Return the object responsible for providing and receiving Growl information.
* @discussion See setGrowlDelegate: for details.
* @result The Growl delegate.
*/
+ (NSObject<GrowlApplicationBridgeDelegate> *) growlDelegate;
#pragma mark -
/*!
* @method notifyWithTitle:description:notificationName:iconData:priority:isSticky:clickContext:
* @abstract Send a Growl notification.
* @discussion This is the preferred means for sending a Growl notification.
* The notification name and at least one of the title and description are
* required (all three are preferred). All other parameters may be
* <code>nil</code> (or 0 or NO as appropriate) to accept default values.
*
* If using the Growl-WithInstaller framework, if Growl is not installed the
* user will be prompted to install Growl. If the user cancels, this method
* will have no effect until the next application session, at which time when
* it is called the user will be prompted again. The user is also given the
* option to not be prompted again. If the user does choose to install Growl,
* the requested notification will be displayed once Growl is installed and
* running.
*
* @param title The title of the notification displayed to the user.
* @param description The full description of the notification displayed to the user.
* @param notifName The internal name of the notification. Should be human-readable, as it will be displayed in the Growl preference pane.
* @param iconData <code>NSData</code> object to show with the notification as its icon. If <code>nil</code>, the application's icon will be used instead.
* @param priority The priority of the notification. The default value is 0; positive values are higher priority and negative values are lower priority. Not all Growl displays support priority.
* @param isSticky If YES, the notification will remain on screen until clicked. Not all Growl displays support sticky notifications.
* @param clickContext A context passed back to the Growl delegate if it implements -(void)growlNotificationWasClicked: and the notification is clicked. Not all display plugins support clicking. The clickContext must be plist-encodable (completely of <code>NSString</code>, <code>NSArray</code>, <code>NSNumber</code>, <code>NSDictionary</code>, and <code>NSData</code> types).
*/
+ (void) notifyWithTitle:(NSString *)title
description:(NSString *)description
notificationName:(NSString *)notifName
iconData:(NSData *)iconData
priority:(signed int)priority
isSticky:(BOOL)isSticky
clickContext:(id)clickContext;
/*!
* @method notifyWithTitle:description:notificationName:iconData:priority:isSticky:clickContext:identifier:
* @abstract Send a Growl notification.
* @discussion This is the preferred means for sending a Growl notification.
* The notification name and at least one of the title and description are
* required (all three are preferred). All other parameters may be
* <code>nil</code> (or 0 or NO as appropriate) to accept default values.
*
* If using the Growl-WithInstaller framework, if Growl is not installed the
* user will be prompted to install Growl. If the user cancels, this method
* will have no effect until the next application session, at which time when
* it is called the user will be prompted again. The user is also given the
* option to not be prompted again. If the user does choose to install Growl,
* the requested notification will be displayed once Growl is installed and
* running.
*
* @param title The title of the notification displayed to the user.
* @param description The full description of the notification displayed to the user.
* @param notifName The internal name of the notification. Should be human-readable, as it will be displayed in the Growl preference pane.
* @param iconData <code>NSData</code> object to show with the notification as its icon. If <code>nil</code>, the application's icon will be used instead.
* @param priority The priority of the notification. The default value is 0; positive values are higher priority and negative values are lower priority. Not all Growl displays support priority.
* @param isSticky If YES, the notification will remain on screen until clicked. Not all Growl displays support sticky notifications.
* @param clickContext A context passed back to the Growl delegate if it implements -(void)growlNotificationWasClicked: and the notification is clicked. Not all display plugins support clicking. The clickContext must be plist-encodable (completely of <code>NSString</code>, <code>NSArray</code>, <code>NSNumber</code>, <code>NSDictionary</code>, and <code>NSData</code> types).
* @param identifier An identifier for this notification. Notifications with equal identifiers are coalesced.
*/
+ (void) notifyWithTitle:(NSString *)title
description:(NSString *)description
notificationName:(NSString *)notifName
iconData:(NSData *)iconData
priority:(signed int)priority
isSticky:(BOOL)isSticky
clickContext:(id)clickContext
identifier:(NSString *)identifier;
/*! @method notifyWithDictionary:
* @abstract Notifies using a userInfo dictionary suitable for passing to
* <code>NSDistributedNotificationCenter</code>.
* @param userInfo The dictionary to notify with.
* @discussion Before Growl 0.6, your application would have posted
* notifications using <code>NSDistributedNotificationCenter</code> by
* creating a userInfo dictionary with the notification data. This had the
* advantage of allowing you to add other data to the dictionary for programs
* besides Growl that might be listening.
*
* This method allows you to use such dictionaries without being restricted
* to using <code>NSDistributedNotificationCenter</code>. The keys for this dictionary
* can be found in GrowlDefines.h.
*/
+ (void) notifyWithDictionary:(NSDictionary *)userInfo;
#pragma mark -
/*! @method registerWithDictionary:
* @abstract Register your application with Growl without setting a delegate.
* @discussion When you call this method with a dictionary,
* GrowlApplicationBridge registers your application using that dictionary.
* If you pass <code>nil</code>, GrowlApplicationBridge will ask the delegate
* (if there is one) for a dictionary, and if that doesn't work, it will look
* in your application's bundle for an auto-discoverable plist.
* (XXX refer to more information on that)
*
* If you pass a dictionary to this method, it must include the
* <code>GROWL_APP_NAME</code> key, unless a delegate is set.
*
* This method is mainly an alternative to the delegate system introduced
* with Growl 0.6. Without a delegate, you cannot receive callbacks such as
* <code>-growlIsReady</code> (since they are sent to the delegate). You can,
* however, set a delegate after registering without one.
*
* This method was introduced in Growl.framework 0.7.
*/
+ (BOOL) registerWithDictionary:(NSDictionary *)regDict;
/*! @method reregisterGrowlNotifications
* @abstract Reregister the notifications for this application.
* @discussion This method does not normally need to be called. If your
* application changes what notifications it is registering with Growl, call
* this method to have the Growl delegate's
* <code>-registrationDictionaryForGrowl</code> method called again and the
* Growl registration information updated.
*
* This method is now implemented using <code>-registerWithDictionary:</code>.
*/
+ (void) reregisterGrowlNotifications;
#pragma mark -
/*! @method setWillRegisterWhenGrowlIsReady:
* @abstract Tells GrowlApplicationBridge to register with Growl when Growl
* launches (or not).
* @discussion When Growl has started listening for notifications, it posts a
* <code>GROWL_IS_READY</code> notification on the Distributed Notification
* Center. GrowlApplicationBridge listens for this notification, using it to
* perform various tasks (such as calling your delegate's
* <code>-growlIsReady</code> method, if it has one). If this method is
* called with <code>YES</code>, one of those tasks will be to reregister
* with Growl (in the manner of <code>-reregisterGrowlNotifications</code>).
*
* This attribute is automatically set back to <code>NO</code> (the default)
* after every <code>GROWL_IS_READY</code> notification.
* @param flag <code>YES</code> if you want GrowlApplicationBridge to register with
* Growl when next it is ready; <code>NO</code> if not.
*/
+ (void) setWillRegisterWhenGrowlIsReady:(BOOL)flag;
/*! @method willRegisterWhenGrowlIsReady
* @abstract Reports whether GrowlApplicationBridge will register with Growl
* when Growl next launches.
* @result <code>YES</code> if GrowlApplicationBridge will register with Growl
* when next it posts GROWL_IS_READY; <code>NO</code> if not.
*/
+ (BOOL) willRegisterWhenGrowlIsReady;
#pragma mark -
/*! @method registrationDictionaryFromDelegate
* @abstract Asks the delegate for a registration dictionary.
* @discussion If no delegate is set, or if the delegate's
* <code>-registrationDictionaryForGrowl</code> method returns
* <code>nil</code>, this method returns <code>nil</code>.
*
* This method does not attempt to clean up the dictionary in any way - for
* example, if it is missing the <code>GROWL_APP_NAME</code> key, the result
* will be missing it too. Use <code>+[GrowlApplicationBridge
* registrationDictionaryByFillingInDictionary:]</code> or
* <code>+[GrowlApplicationBridge
* registrationDictionaryByFillingInDictionary:restrictToKeys:]</code> to try
* to fill in missing keys.
*
* This method was introduced in Growl.framework 0.7.
* @result A registration dictionary.
*/
+ (NSDictionary *) registrationDictionaryFromDelegate;
/*! @method registrationDictionaryFromBundle:
* @abstract Looks in a bundle for a registration dictionary.
* @discussion This method looks in a bundle for an auto-discoverable
* registration dictionary file using <code>-[NSBundle
* pathForResource:ofType:]</code>. If it finds one, it loads the file using
* <code>+[NSDictionary dictionaryWithContentsOfFile:]</code> and returns the
* result.
*
* If you pass <code>nil</code> as the bundle, the main bundle is examined.
*
* This method does not attempt to clean up the dictionary in any way - for
* example, if it is missing the <code>GROWL_APP_NAME</code> key, the result
* will be missing it too. Use <code>+[GrowlApplicationBridge
* registrationDictionaryByFillingInDictionary:]</code> or
* <code>+[GrowlApplicationBridge
* registrationDictionaryByFillingInDictionary:restrictToKeys:]</code> to try
* to fill in missing keys.
*
* This method was introduced in Growl.framework 0.7.
* @result A registration dictionary.
*/
+ (NSDictionary *) registrationDictionaryFromBundle:(NSBundle *)bundle;
/*! @method bestRegistrationDictionary
* @abstract Obtains a registration dictionary, filled out to the best of
* GrowlApplicationBridge's knowledge.
* @discussion This method creates a registration dictionary as best
* GrowlApplicationBridge knows how.
*
* First, GrowlApplicationBridge contacts the Growl delegate (if there is
* one) and gets the registration dictionary from that. If no such dictionary
* was obtained, GrowlApplicationBridge looks in your application's main
* bundle for an auto-discoverable registration dictionary file. If that
* doesn't exist either, this method returns <code>nil</code>.
*
* Second, GrowlApplicationBridge calls
* <code>+registrationDictionaryByFillingInDictionary:</code> with whatever
* dictionary was obtained. The result of that method is the result of this
* method.
*
* GrowlApplicationBridge uses this method when you call
* <code>+setGrowlDelegate:</code>, or when you call
* <code>+registerWithDictionary:</code> with <code>nil</code>.
*
* This method was introduced in Growl.framework 0.7.
* @result A registration dictionary.
*/
+ (NSDictionary *) bestRegistrationDictionary;
#pragma mark -
/*! @method registrationDictionaryByFillingInDictionary:
* @abstract Tries to fill in missing keys in a registration dictionary.
* @discussion This method examines the passed-in dictionary for missing keys,
* and tries to work out correct values for them. As of 0.7, it uses:
*
* Key Value
* --- -----
* <code>GROWL_APP_NAME</code> <code>CFBundleExecutableName</code>
* <code>GROWL_APP_ICON_DATA</code> The data of the icon of the application.
* <code>GROWL_APP_LOCATION</code> The location of the application.
* <code>GROWL_NOTIFICATIONS_DEFAULT</code> <code>GROWL_NOTIFICATIONS_ALL</code>
*
* Keys are only filled in if missing; if a key is present in the dictionary,
* its value will not be changed.
*
* This method was introduced in Growl.framework 0.7.
* @param regDict The dictionary to fill in.
* @result The dictionary with the keys filled in. This is an autoreleased
* copy of <code>regDict</code>.
*/
+ (NSDictionary *) registrationDictionaryByFillingInDictionary:(NSDictionary *)regDict;
/*! @method registrationDictionaryByFillingInDictionary:restrictToKeys:
* @abstract Tries to fill in missing keys in a registration dictionary.
* @discussion This method examines the passed-in dictionary for missing keys,
* and tries to work out correct values for them. As of 0.7, it uses:
*
* Key Value
* --- -----
* <code>GROWL_APP_NAME</code> <code>CFBundleExecutableName</code>
* <code>GROWL_APP_ICON_DATA</code> The data of the icon of the application.
* <code>GROWL_APP_LOCATION</code> The location of the application.
* <code>GROWL_NOTIFICATIONS_DEFAULT</code> <code>GROWL_NOTIFICATIONS_ALL</code>
*
* Only those keys that are listed in <code>keys</code> will be filled in.
* Other missing keys are ignored. Also, keys are only filled in if missing;
* if a key is present in the dictionary, its value will not be changed.
*
* This method was introduced in Growl.framework 0.7.
* @param regDict The dictionary to fill in.
* @param keys The keys to fill in. If <code>nil</code>, any missing keys are filled in.
* @result The dictionary with the keys filled in. This is an autoreleased
* copy of <code>regDict</code>.
*/
+ (NSDictionary *) registrationDictionaryByFillingInDictionary:(NSDictionary *)regDict restrictToKeys:(NSSet *)keys;
/*! @brief Tries to fill in missing keys in a notification dictionary.
* @param notifDict The dictionary to fill in.
* @return The dictionary with the keys filled in. This will be a separate instance from \a notifDict.
* @discussion This function examines the \a notifDict for missing keys, and
* tries to get them from the last known registration dictionary. As of 1.1,
* the keys that it will look for are:
*
* \li <code>GROWL_APP_NAME</code>
* \li <code>GROWL_APP_ICON_DATA</code>
*
* @since Growl.framework 1.1
*/
+ (NSDictionary *) notificationDictionaryByFillingInDictionary:(NSDictionary *)regDict;
+ (NSDictionary *) frameworkInfoDictionary;
@end
//------------------------------------------------------------------------------
#pragma mark -
/*!
* @protocol GrowlApplicationBridgeDelegate
* @abstract Required protocol for the Growl delegate.
* @discussion The methods in this protocol are required and are called
* automatically as needed by GrowlApplicationBridge. See
* <code>+[GrowlApplicationBridge setGrowlDelegate:]</code>.
* See also <code>GrowlApplicationBridgeDelegate_InformalProtocol</code>.
*/
@protocol GrowlApplicationBridgeDelegate
// -registrationDictionaryForGrowl has moved to the informal protocol as of 0.7.
@end
//------------------------------------------------------------------------------
#pragma mark -
/*!
* @category NSObject(GrowlApplicationBridgeDelegate_InformalProtocol)
* @abstract Methods which may be optionally implemented by the GrowlDelegate.
* @discussion The methods in this informal protocol will only be called if implemented by the delegate.
*/
@interface NSObject (GrowlApplicationBridgeDelegate_InformalProtocol)
/*!
* @method registrationDictionaryForGrowl
* @abstract Return the dictionary used to register this application with Growl.
* @discussion The returned dictionary gives Growl the complete list of
* notifications this application will ever send, and it also specifies which
* notifications should be enabled by default. Each is specified by an array
* of <code>NSString</code> objects.
*
* For most applications, these two arrays can be the same (if all sent
* notifications should be displayed by default).
*
* The <code>NSString</code> objects of these arrays will correspond to the
* <code>notificationName:</code> parameter passed in
* <code>+[GrowlApplicationBridge
* notifyWithTitle:description:notificationName:iconData:priority:isSticky:clickContext:]</code> calls.
*
* The dictionary should have the required key object pairs:
* key: GROWL_NOTIFICATIONS_ALL object: <code>NSArray</code> of <code>NSString</code> objects
* key: GROWL_NOTIFICATIONS_DEFAULT object: <code>NSArray</code> of <code>NSString</code> objects
*
* The dictionary may have the following key object pairs:
* key: GROWL_NOTIFICATIONS_HUMAN_READABLE_NAMES object: <code>NSDictionary</code> of key: notification name object: human-readable notification name
*
* You do not need to implement this method if you have an auto-discoverable
* plist file in your app bundle. (XXX refer to more information on that)
*
* @result The <code>NSDictionary</code> to use for registration.
*/
- (NSDictionary *) registrationDictionaryForGrowl;
/*!
* @method applicationNameForGrowl
* @abstract Return the name of this application which will be used for Growl bookkeeping.
* @discussion This name is used both internally and in the Growl preferences.
*
* This should remain stable between different versions and incarnations of
* your application.
* For example, "SurfWriter" is a good app name, whereas "SurfWriter 2.0" and
* "SurfWriter Lite" are not.
*
* You do not need to implement this method if you are providing the
* application name elsewhere, meaning in an auto-discoverable plist file in
* your app bundle (XXX refer to more information on that) or in the result
* of -registrationDictionaryForGrowl.
*
* @result The name of the application using Growl.
*/
- (NSString *) applicationNameForGrowl;
/*!
* @method applicationIconForGrowl
* @abstract Return the <code>NSImage</code> to treat as the application icon.
* @discussion The delegate may optionally return an <code>NSImage</code>
* object to use as the application icon. If this method is not implemented,
* {{{-applicationIconDataForGrowl}}} is tried. If that method is not
* implemented, the application's own icon is used. Neither method is
* generally needed.
* @result The <code>NSImage</code> to treat as the application icon.
*/
- (NSImage *) applicationIconForGrowl;
/*!
* @method applicationIconDataForGrowl
* @abstract Return the <code>NSData</code> to treat as the application icon.
* @discussion The delegate may optionally return an <code>NSData</code>
* object to use as the application icon; if this is not implemented, the
* application's own icon is used. This is not generally needed.
* @result The <code>NSData</code> to treat as the application icon.
* @deprecated In version 1.1, in favor of {{{-applicationIconForGrowl}}}.
*/
- (NSData *) applicationIconDataForGrowl;
/*!
* @method growlIsReady
* @abstract Informs the delegate that Growl has launched.
* @discussion Informs the delegate that Growl (specifically, the
* GrowlHelperApp) was launched successfully. The application can take actions
* with the knowledge that Growl is installed and functional.
*/
- (void) growlIsReady;
/*!
* @method growlNotificationWasClicked:
* @abstract Informs the delegate that a Growl notification was clicked.
* @discussion Informs the delegate that a Growl notification was clicked. It
* is only sent for notifications sent with a non-<code>nil</code>
* clickContext, so if you want to receive a message when a notification is
* clicked, clickContext must not be <code>nil</code> when calling
* <code>+[GrowlApplicationBridge notifyWithTitle: description:notificationName:iconData:priority:isSticky:clickContext:]</code>.
* @param clickContext The clickContext passed when displaying the notification originally via +[GrowlApplicationBridge notifyWithTitle:description:notificationName:iconData:priority:isSticky:clickContext:].
*/
- (void) growlNotificationWasClicked:(id)clickContext;
/*!
* @method growlNotificationTimedOut:
* @abstract Informs the delegate that a Growl notification timed out.
* @discussion Informs the delegate that a Growl notification timed out. It
* is only sent for notifications sent with a non-<code>nil</code>
* clickContext, so if you want to receive a message when a notification is
* clicked, clickContext must not be <code>nil</code> when calling
* <code>+[GrowlApplicationBridge notifyWithTitle: description:notificationName:iconData:priority:isSticky:clickContext:]</code>.
* @param clickContext The clickContext passed when displaying the notification originally via +[GrowlApplicationBridge notifyWithTitle:description:notificationName:iconData:priority:isSticky:clickContext:].
*/
- (void) growlNotificationTimedOut:(id)clickContext;
/*!
* @method hasNetworkClientEntitlement
* @abstract Used only in sandboxed situations since we don't know whether the app has com.apple.security.network.client entitlement
* @discussion GrowlDelegate calls to find out if we have the com.apple.security.network.client entitlement,
* since we can't find this out without hitting the sandbox. We only call it if we detect that the application is sandboxed.
*/
- (BOOL) hasNetworkClientEntitlement;
@end
#pragma mark -
#endif /* __GrowlApplicationBridge_h__ */
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (C) 2006 W3C (R) (MIT ERCIM Keio), All Rights Reserved.
W3C liability, trademark and document use rules apply.
http://www.w3.org/Consortium/Legal/ipr-notice
http://www.w3.org/Consortium/Legal/copyright-documents
Generated from: $Id: examples.xml,v 1.57 2008/02/20 16:41:48 pdowney Exp $
-->
<env:Envelope xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:p="http://www.w3.org/2002/ws/databinding/patterns/6/09/"
xmlns:ex="http://www.w3.org/2002/ws/databinding/examples/6/09/"
xmlns:env="http://www.w3.org/2003/05/soap-envelope">
<env:Header/>
<env:Body>
<ex:echoGlobalElementSimpleType>
<ex:globalElementSimpleType xmlns:wsdl11="http://schemas.xmlsoap.org/wsdl/"
xmlns:soap11enc="http://schemas.xmlsoap.org/soap/encoding/">value2</ex:globalElementSimpleType>
</ex:echoGlobalElementSimpleType>
</env:Body>
</env:Envelope> | {
"pile_set_name": "Github"
} |
/*
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
*/
#pragma once
#include <limits>
#include "util/rb_map.h"
namespace lean {
/** \brief A "functional" priority queue, i.e., copy is O(1).
If two keys have the same priority, then we break ties by
giving higher priority to the last one added.
The insert/erase operations are O(log n),
where n is the size of the queue.
The content of the queue can be dumped into a buffer or traversed */
template<typename K, typename CMP>
class priority_queue {
typedef pair<unsigned, unsigned> pos;
struct pos_cmp {
int operator()(pos const & p1, pos const & p2) const {
if (p1.first == p2.first)
return p1.second == p2.second ? 0 : (p1.second > p2.second ? -1 : 1);
else
return p1.first > p2.first ? -1 : 1;
}
};
unsigned m_next{0};
rb_map<K, pos, CMP> m_key_to_pos;
rb_map<pos, K, pos_cmp> m_pos_to_key;
void normalize() {
buffer<K> ks;
to_buffer(ks);
clear();
for (K const & k : ks)
insert(k);
}
public:
priority_queue(CMP const & cmp = CMP()):m_key_to_pos(cmp) {}
template<typename F>
void for_each(F && f) const {
m_pos_to_key.for_each([&](pos const &, K const & k) {
f(k);
});
}
void to_buffer(buffer<K> & r) const {
for_each([&](K const & k) { r.push_back(k); });
}
bool contains(K const & k) const {
return m_key_to_pos.contains(k);
}
optional<unsigned> get_prio(K const & k) const {
if (auto p = m_key_to_pos.find(k))
return optional<unsigned>(p->first);
else
return optional<unsigned>();
}
// useful if \c CMP only compares part of \c K
K const * get_key(K const & k) const {
if (auto p = m_key_to_pos.find(k))
return m_pos_to_key.find(*p);
else
return nullptr;
}
void clear() {
m_key_to_pos.clear();
m_pos_to_key.clear();
m_next = 0;
}
void insert(K const & k, unsigned prio = 0) {
if (m_next == std::numeric_limits<unsigned>::max())
normalize();
if (auto pos = m_key_to_pos.find(k)) {
m_pos_to_key.erase(*pos);
}
m_key_to_pos.insert(k, pos(prio, m_next));
m_pos_to_key.insert(pos(prio, m_next), k);
m_next++;
}
void erase(K const & k) {
if (auto pos = m_key_to_pos.find(k)) {
m_pos_to_key.erase(*pos);
m_key_to_pos.erase(k);
}
}
};
}
| {
"pile_set_name": "Github"
} |
/*
* eos - A 3D Morphable Model fitting library written in modern C++11/14.
*
* File: include/eos/render/detail/utils.hpp
*
* Copyright 2014-2017 Patrik Huber
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RENDER_DETAIL_UTILS_HPP_
#define RENDER_DETAIL_UTILS_HPP_
#include "eos/core/Rect.hpp"
#include "eos/render/detail/Vertex.hpp"
#include "glm/vec2.hpp"
#include "glm/vec4.hpp"
#include "glm/geometric.hpp"
#include <algorithm>
#include <cmath>
/**
* Implementations of internal functions, not part of the
* API we expose and not meant to be used by a user.
*/
namespace eos {
namespace render {
namespace detail {
/**
* Calculates the enclosing bounding box of 3 vertices (a triangle). If the
* triangle is partly outside the screen, it will be clipped appropriately.
*
* Todo: If it is fully outside the screen, check what happens, but it works.
*
* @param[in] v0 First vertex.
* @param[in] v1 Second vertex.
* @param[in] v2 Third vertex.
* @param[in] viewport_width Screen width.
* @param[in] viewport_height Screen height.
* @return A bounding box rectangle.
*/
template <typename T, glm::precision P = glm::defaultp>
core::Rect<int> calculate_clipped_bounding_box(const glm::tvec2<T, P>& v0, const glm::tvec2<T, P>& v1,
const glm::tvec2<T, P>& v2, int viewport_width,
int viewport_height)
{
/* Old, producing artifacts:
t.minX = max(min(t.v0.position[0], min(t.v1.position[0], t.v2.position[0])), 0.0f);
t.maxX = min(max(t.v0.position[0], max(t.v1.position[0], t.v2.position[0])), (float)(viewportWidth - 1));
t.minY = max(min(t.v0.position[1], min(t.v1.position[1], t.v2.position[1])), 0.0f);
t.maxY = min(max(t.v0.position[1], max(t.v1.position[1], t.v2.position[1])), (float)(viewportHeight -
1));*/
using std::ceil;
using std::floor;
using std::max;
using std::min;
// Readded this comment after merge: What about rounding, or rather the conversion from double to int?
const int minX = max(min(floor(v0[0]), min(floor(v1[0]), floor(v2[0]))), T(0));
const int maxX = min(max(ceil(v0[0]), max(ceil(v1[0]), ceil(v2[0]))), static_cast<T>(viewport_width - 1));
const int minY = max(min(floor(v0[1]), min(floor(v1[1]), floor(v2[1]))), T(0));
const int maxY = min(max(ceil(v0[1]), max(ceil(v1[1]), ceil(v2[1]))), static_cast<T>(viewport_height - 1));
return core::Rect<int>{minX, minY, maxX - minX, maxY - minY};
};
/**
* Computes whether the triangle formed out of the given three vertices is
* counter-clockwise in screen space. Assumes the origin of the screen is on
* the top-left, and the y-axis goes down (as in OpenCV images).
*
* @param[in] v0 First vertex.
* @param[in] v1 Second vertex.
* @param[in] v2 Third vertex.
* @return Whether the vertices are CCW in screen space.
*/
template <typename T, glm::precision P = glm::defaultp>
bool are_vertices_ccw_in_screen_space(const glm::tvec2<T, P>& v0, const glm::tvec2<T, P>& v1,
const glm::tvec2<T, P>& v2)
{
const auto dx01 = v1[0] - v0[0]; // todo: replace with x/y (GLM)
const auto dy01 = v1[1] - v0[1];
const auto dx02 = v2[0] - v0[0];
const auto dy02 = v2[1] - v0[1];
return (dx01 * dy02 - dy01 * dx02 < T(0)); // Original: (dx01*dy02 - dy01*dx02 > 0.0f). But: OpenCV has origin top-left, y goes down
};
template <typename T, glm::precision P = glm::defaultp>
double implicit_line(float x, float y, const glm::tvec4<T, P>& v1, const glm::tvec4<T, P>& v2)
{
return ((double)v1[1] - (double)v2[1]) * (double)x + ((double)v2[0] - (double)v1[0]) * (double)y +
(double)v1[0] * (double)v2[1] - (double)v2[0] * (double)v1[1];
};
inline std::vector<Vertex<float>> clip_polygon_to_plane_in_4d(const std::vector<Vertex<float>>& vertices,
const glm::tvec4<float>& plane_normal)
{
std::vector<Vertex<float>> clippedVertices;
// We can have 2 cases:
// * 1 vertex visible: we make 1 new triangle out of the visible vertex plus the 2 intersection points
// with the near-plane
// * 2 vertices visible: we have a quad, so we have to make 2 new triangles out of it.
// See here for more info?
// http://math.stackexchange.com/questions/400268/equation-for-a-line-through-a-plane-in-homogeneous-coordinates
for (unsigned int i = 0; i < vertices.size(); i++)
{
const int a = i; // the current vertex
const int b = (i + 1) % vertices.size(); // the following vertex (wraps around 0)
const float fa = glm::dot(vertices[a].position, plane_normal); // Note: Shouldn't they be unit length?
const float fb = glm::dot(vertices[b].position,
plane_normal); // < 0 means on visible side, > 0 means on invisible side?
if ((fa < 0 && fb > 0) || (fa > 0 && fb < 0)) // one vertex is on the visible side of the plane, one
// on the invisible? so we need to split?
{
const auto direction = vertices[b].position - vertices[a].position;
const float t = -(glm::dot(plane_normal, vertices[a].position)) /
(glm::dot(plane_normal, direction)); // the parametric value on the line, where the line
// to draw intersects the plane?
// generate a new vertex at the line-plane intersection point
const auto position = vertices[a].position + t * direction;
const auto color = vertices[a].color + t * (vertices[b].color - vertices[a].color);
const auto texCoord =
vertices[a].texcoords +
t * (vertices[b].texcoords -
vertices[a].texcoords); // We could omit that if we don't render with texture.
if (fa < 0) // we keep the original vertex plus the new one
{
clippedVertices.push_back(vertices[a]);
clippedVertices.push_back(Vertex<float>{position, color, texCoord});
} else if (fb < 0) // we use only the new vertex
{
clippedVertices.push_back(Vertex<float>{position, color, texCoord});
}
} else if (fa < 0 && fb < 0) // both are visible (on the "good" side of the plane), no splitting
// required, use the current vertex
{
clippedVertices.push_back(vertices[a]);
}
// else, both vertices are not visible, nothing to add and draw
}
return clippedVertices;
};
/**
* @brief Todo.
*
* Takes in clip coords? and outputs NDC.
* divides by w and outputs [x_ndc, y_ndc, z_ndc, 1/w_clip].
* The w-component is set to 1/w_clip (which is what OpenGL passes to the FragmentShader).
*
* @param[in] vertex X.
* @ return X.
*/
template <typename T, glm::precision P = glm::defaultp>
glm::tvec4<T, P> divide_by_w(const glm::tvec4<T, P>& vertex)
{
const auto one_over_w = 1.0 / vertex.w;
// divide by w: (if ortho, w will just be 1)
glm::tvec4<T, P> v_ndc(vertex / vertex.w);
// Set the w coord to 1/w (i.e. 1/w_clip). This is what OpenGL passes to the FragmentShader.
v_ndc.w = one_over_w;
return v_ndc;
};
} /* namespace detail */
} /* namespace render */
} /* namespace eos */
#endif /* RENDER_DETAIL_UTILS_HPP_ */
| {
"pile_set_name": "Github"
} |
[EMC]
DEBUG=0
LOG_LEVEL=0
[RS274NGC]
SUBROUTINE_PATH = .
[PYTHON]
PATH_PREPEND=.
TOPLEVEL=subs.py
| {
"pile_set_name": "Github"
} |
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// TIFF decode.
#ifndef WEBP_EXAMPLES_TIFFDEC_H_
#define WEBP_EXAMPLES_TIFFDEC_H_
#ifdef __cplusplus
extern "C" {
#endif
struct Metadata;
struct WebPPicture;
// Reads a TIFF from 'filename', returning the decoded output in 'pic'.
// If 'keep_alpha' is true and the TIFF has an alpha channel, the output is RGBA
// otherwise it will be RGB.
// Returns true on success.
int ReadTIFF(const char* const filename,
struct WebPPicture* const pic, int keep_alpha,
struct Metadata* const metadata);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_EXAMPLES_TIFFDEC_H_
| {
"pile_set_name": "Github"
} |
import argparse
import ast
import pprint
import mxnet as mx
from mxnet.module import Module
from symdata.loader import AnchorGenerator, AnchorSampler, AnchorLoader
from symnet.logger import logger
from symnet.model import load_param, infer_data_shape, check_shape, initialize_frcnn, get_fixed_params
from symnet.metric import RPNAccMetric, RPNLogLossMetric, RPNL1LossMetric, RCNNAccMetric, RCNNLogLossMetric, RCNNL1LossMetric
def train_net(sym, roidb, args):
# print config
logger.info('called with args\n{}'.format(pprint.pformat(vars(args))))
# setup multi-gpu
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]
batch_size = args.rcnn_batch_size * len(ctx)
# load training data
feat_sym = sym.get_internals()['rpn_cls_score_output']
ag = AnchorGenerator(feat_stride=args.rpn_feat_stride,
anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios)
asp = AnchorSampler(allowed_border=args.rpn_allowed_border, batch_rois=args.rpn_batch_rois,
fg_fraction=args.rpn_fg_fraction, fg_overlap=args.rpn_fg_overlap,
bg_overlap=args.rpn_bg_overlap)
train_data = AnchorLoader(roidb, batch_size, args.img_short_side, args.img_long_side,
args.img_pixel_means, args.img_pixel_stds, feat_sym, ag, asp, shuffle=True)
# produce shape max possible
_, out_shape, _ = feat_sym.infer_shape(data=(1, 3, args.img_long_side, args.img_long_side))
feat_height, feat_width = out_shape[0][-2:]
rpn_num_anchors = len(args.rpn_anchor_scales) * len(args.rpn_anchor_ratios)
data_names = ['data', 'im_info', 'gt_boxes']
label_names = ['label', 'bbox_target', 'bbox_weight']
data_shapes = [('data', (batch_size, 3, args.img_long_side, args.img_long_side)),
('im_info', (batch_size, 3)),
('gt_boxes', (batch_size, 100, 5))]
label_shapes = [('label', (batch_size, 1, rpn_num_anchors * feat_height, feat_width)),
('bbox_target', (batch_size, 4 * rpn_num_anchors, feat_height, feat_width)),
('bbox_weight', (batch_size, 4 * rpn_num_anchors, feat_height, feat_width))]
# print shapes
data_shape_dict, out_shape_dict = infer_data_shape(sym, data_shapes + label_shapes)
logger.info('max input shape\n%s' % pprint.pformat(data_shape_dict))
logger.info('max output shape\n%s' % pprint.pformat(out_shape_dict))
# load and initialize params
if args.resume:
arg_params, aux_params = load_param(args.resume)
else:
arg_params, aux_params = load_param(args.pretrained)
arg_params, aux_params = initialize_frcnn(sym, data_shapes, arg_params, aux_params)
# check parameter shapes
check_shape(sym, data_shapes + label_shapes, arg_params, aux_params)
# check fixed params
fixed_param_names = get_fixed_params(sym, args.net_fixed_params)
logger.info('locking params\n%s' % pprint.pformat(fixed_param_names))
# metric
rpn_eval_metric = RPNAccMetric()
rpn_cls_metric = RPNLogLossMetric()
rpn_bbox_metric = RPNL1LossMetric()
eval_metric = RCNNAccMetric()
cls_metric = RCNNLogLossMetric()
bbox_metric = RCNNL1LossMetric()
eval_metrics = mx.metric.CompositeEvalMetric()
for child_metric in [rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric]:
eval_metrics.add(child_metric)
# callback
batch_end_callback = mx.callback.Speedometer(batch_size, frequent=args.log_interval, auto_reset=False)
epoch_end_callback = mx.callback.do_checkpoint(args.save_prefix)
# learning schedule
base_lr = args.lr
lr_factor = 0.1
lr_epoch = [int(epoch) for epoch in args.lr_decay_epoch.split(',')]
lr_epoch_diff = [epoch - args.start_epoch for epoch in lr_epoch if epoch > args.start_epoch]
lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))
lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff]
logger.info('lr %f lr_epoch_diff %s lr_iters %s' % (lr, lr_epoch_diff, lr_iters))
lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(lr_iters, lr_factor)
# optimizer
optimizer_params = {'momentum': 0.9,
'wd': 0.0005,
'learning_rate': lr,
'lr_scheduler': lr_scheduler,
'rescale_grad': (1.0 / batch_size),
'clip_gradient': 5}
# train
mod = Module(sym, data_names=data_names, label_names=label_names,
logger=logger, context=ctx, work_load_list=None,
fixed_param_names=fixed_param_names)
mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback, kvstore='device',
optimizer='sgd', optimizer_params=optimizer_params,
arg_params=arg_params, aux_params=aux_params, begin_epoch=args.start_epoch, num_epoch=args.epochs)
def parse_args():
parser = argparse.ArgumentParser(description='Train Faster R-CNN network',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--network', type=str, default='vgg16', help='base network')
parser.add_argument('--pretrained', type=str, default='', help='path to pretrained model')
parser.add_argument('--dataset', type=str, default='voc', help='training dataset')
parser.add_argument('--imageset', type=str, default='', help='imageset splits')
parser.add_argument('--gpus', type=str, default='0', help='gpu devices eg. 0,1')
parser.add_argument('--epochs', type=int, default=10, help='training epochs')
parser.add_argument('--lr', type=float, default=0.001, help='base learning rate')
parser.add_argument('--lr-decay-epoch', type=str, default='7', help='epoch to decay lr')
parser.add_argument('--resume', type=str, default='', help='path to last saved model')
parser.add_argument('--start-epoch', type=int, default=0, help='start epoch for resuming')
parser.add_argument('--log-interval', type=int, default=100, help='logging mini batch interval')
parser.add_argument('--save-prefix', type=str, default='', help='saving params prefix')
# faster rcnn params
parser.add_argument('--img-short-side', type=int, default=600)
parser.add_argument('--img-long-side', type=int, default=1000)
parser.add_argument('--img-pixel-means', type=str, default='(0.0, 0.0, 0.0)')
parser.add_argument('--img-pixel-stds', type=str, default='(1.0, 1.0, 1.0)')
parser.add_argument('--net-fixed-params', type=str, default='["conv0", "stage1", "gamma", "beta"]')
parser.add_argument('--rpn-feat-stride', type=int, default=16)
parser.add_argument('--rpn-anchor-scales', type=str, default='(8, 16, 32)')
parser.add_argument('--rpn-anchor-ratios', type=str, default='(0.5, 1, 2)')
parser.add_argument('--rpn-pre-nms-topk', type=int, default=12000)
parser.add_argument('--rpn-post-nms-topk', type=int, default=2000)
parser.add_argument('--rpn-nms-thresh', type=float, default=0.7)
parser.add_argument('--rpn-min-size', type=int, default=16)
parser.add_argument('--rpn-batch-rois', type=int, default=256)
parser.add_argument('--rpn-allowed-border', type=int, default=0)
parser.add_argument('--rpn-fg-fraction', type=float, default=0.5)
parser.add_argument('--rpn-fg-overlap', type=float, default=0.7)
parser.add_argument('--rpn-bg-overlap', type=float, default=0.3)
parser.add_argument('--rcnn-num-classes', type=int, default=21)
parser.add_argument('--rcnn-feat-stride', type=int, default=16)
parser.add_argument('--rcnn-pooled-size', type=str, default='(14, 14)')
parser.add_argument('--rcnn-batch-size', type=int, default=1)
parser.add_argument('--rcnn-batch-rois', type=int, default=128)
parser.add_argument('--rcnn-fg-fraction', type=float, default=0.25)
parser.add_argument('--rcnn-fg-overlap', type=float, default=0.5)
parser.add_argument('--rcnn-bbox-stds', type=str, default='(0.1, 0.1, 0.2, 0.2)')
args = parser.parse_args()
args.img_pixel_means = ast.literal_eval(args.img_pixel_means)
args.img_pixel_stds = ast.literal_eval(args.img_pixel_stds)
args.net_fixed_params = ast.literal_eval(args.net_fixed_params)
args.rpn_anchor_scales = ast.literal_eval(args.rpn_anchor_scales)
args.rpn_anchor_ratios = ast.literal_eval(args.rpn_anchor_ratios)
args.rcnn_pooled_size = ast.literal_eval(args.rcnn_pooled_size)
args.rcnn_bbox_stds = ast.literal_eval(args.rcnn_bbox_stds)
return args
def get_voc(args):
from symimdb.pascal_voc import PascalVOC
if not args.imageset:
args.imageset = '2007_trainval'
args.rcnn_num_classes = len(PascalVOC.classes)
isets = args.imageset.split('+')
roidb = []
for iset in isets:
imdb = PascalVOC(iset, 'data', 'data/VOCdevkit')
imdb.filter_roidb()
imdb.append_flipped_images()
roidb.extend(imdb.roidb)
return roidb
def get_coco(args):
from symimdb.coco import coco
if not args.imageset:
args.imageset = 'train2017'
args.rcnn_num_classes = len(coco.classes)
isets = args.imageset.split('+')
roidb = []
for iset in isets:
imdb = coco(iset, 'data', 'data/coco')
imdb.filter_roidb()
imdb.append_flipped_images()
roidb.extend(imdb.roidb)
return roidb
def get_vgg16_train(args):
from symnet.symbol_vgg import get_vgg_train
if not args.pretrained:
args.pretrained = 'model/vgg16-0000.params'
if not args.save_prefix:
args.save_prefix = 'model/vgg16'
args.img_pixel_means = (123.68, 116.779, 103.939)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.net_fixed_params = ['conv1', 'conv2']
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (7, 7)
return get_vgg_train(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size, rpn_batch_rois=args.rpn_batch_rois,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size,
rcnn_batch_rois=args.rcnn_batch_rois, rcnn_fg_fraction=args.rcnn_fg_fraction,
rcnn_fg_overlap=args.rcnn_fg_overlap, rcnn_bbox_stds=args.rcnn_bbox_stds)
def get_resnet50_train(args):
from symnet.symbol_resnet import get_resnet_train
if not args.pretrained:
args.pretrained = 'model/resnet-50-0000.params'
if not args.save_prefix:
args.save_prefix = 'model/resnet50'
args.img_pixel_means = (0.0, 0.0, 0.0)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.net_fixed_params = ['conv0', 'stage1', 'gamma', 'beta']
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (14, 14)
return get_resnet_train(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size, rpn_batch_rois=args.rpn_batch_rois,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size,
rcnn_batch_rois=args.rcnn_batch_rois, rcnn_fg_fraction=args.rcnn_fg_fraction,
rcnn_fg_overlap=args.rcnn_fg_overlap, rcnn_bbox_stds=args.rcnn_bbox_stds,
units=(3, 4, 6, 3), filter_list=(256, 512, 1024, 2048))
def get_resnet101_train(args):
from symnet.symbol_resnet import get_resnet_train
if not args.pretrained:
args.pretrained = 'model/resnet-101-0000.params'
if not args.save_prefix:
args.save_prefix = 'model/resnet101'
args.img_pixel_means = (0.0, 0.0, 0.0)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.net_fixed_params = ['conv0', 'stage1', 'gamma', 'beta']
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (14, 14)
return get_resnet_train(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size, rpn_batch_rois=args.rpn_batch_rois,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size,
rcnn_batch_rois=args.rcnn_batch_rois, rcnn_fg_fraction=args.rcnn_fg_fraction,
rcnn_fg_overlap=args.rcnn_fg_overlap, rcnn_bbox_stds=args.rcnn_bbox_stds,
units=(3, 4, 23, 3), filter_list=(256, 512, 1024, 2048))
def get_dataset(dataset, args):
datasets = {
'voc': get_voc,
'coco': get_coco
}
if dataset not in datasets:
raise ValueError("dataset {} not supported".format(dataset))
return datasets[dataset](args)
def get_network(network, args):
networks = {
'vgg16': get_vgg16_train,
'resnet50': get_resnet50_train,
'resnet101': get_resnet101_train
}
if network not in networks:
raise ValueError("network {} not supported".format(network))
return networks[network](args)
def main():
args = parse_args()
roidb = get_dataset(args.dataset, args)
sym = get_network(args.network, args)
train_net(sym, roidb, args)
if __name__ == '__main__':
main()
| {
"pile_set_name": "Github"
} |
[nosetests]
verbose=True
detailed-errors=True
with-gae=True
stop=True
without-sandbox=True
with-coverage=True
| {
"pile_set_name": "Github"
} |
/* Copyright 2008, Google Inc.
* All rights reserved.
*
* Code released into the public domain.
*
* curve25519-donna: Curve25519 elliptic curve, public key function
*
* http://code.google.com/p/curve25519-donna/
*
* Adam Langley <[email protected]>
* Parts optimised by floodyberry
* Derived from public domain C code by Daniel J. Bernstein <[email protected]>
*
* More information about curve25519 can be found here
* http://cr.yp.to/ecdh.html
*
* djb's sample implementation of curve25519 is written in a special assembly
* language called qhasm and uses the floating point registers.
*
* This is, almost, a clean room reimplementation from the curve25519 paper. It
* uses many of the tricks described therein. Only the crecip function is taken
* from the sample implementation.
*/
#include <stdint.h>
#include <string.h>
#ifdef HAVE_TI_MODE
#include "../scalarmult_curve25519.h"
#include "curve25519_donna_c64.h"
#include "utils.h"
typedef uint8_t u8;
typedef uint64_t limb;
typedef limb felem[5];
/* Special gcc mode for 128-bit integers */
typedef unsigned uint128_t __attribute__((mode(TI)));
/* Sum two numbers: output += in */
static inline void
fsum(limb *output, const limb *in)
{
output[0] += in[0];
output[1] += in[1];
output[2] += in[2];
output[3] += in[3];
output[4] += in[4];
}
/* Find the difference of two numbers: output = in - output
* (note the order of the arguments!)
*
* Assumes that out[i] < 2**52
* On return, out[i] < 2**55
*/
static inline void
fdifference_backwards(felem out, const felem in)
{
/* 152 is 19 << 3 */
static const limb two54m152 = (((limb)1) << 54) - 152;
static const limb two54m8 = (((limb)1) << 54) - 8;
out[0] = in[0] + two54m152 - out[0];
out[1] = in[1] + two54m8 - out[1];
out[2] = in[2] + two54m8 - out[2];
out[3] = in[3] + two54m8 - out[3];
out[4] = in[4] + two54m8 - out[4];
}
/* Multiply a number by a scalar: output = in * scalar */
static inline void
fscalar_product(felem output, const felem in, const limb scalar)
{
uint128_t a;
a = in[0] * (uint128_t)scalar;
output[0] = ((limb)a) & 0x7ffffffffffff;
a = in[1] * (uint128_t)scalar + ((limb)(a >> 51));
output[1] = ((limb)a) & 0x7ffffffffffff;
a = in[2] * (uint128_t)scalar + ((limb)(a >> 51));
output[2] = ((limb)a) & 0x7ffffffffffff;
a = in[3] * (uint128_t)scalar + ((limb)(a >> 51));
output[3] = ((limb)a) & 0x7ffffffffffff;
a = in[4] * (uint128_t)scalar + ((limb)(a >> 51));
output[4] = ((limb)a) & 0x7ffffffffffff;
output[0] += (a >> 51) * 19;
}
/* Multiply two numbers: output = in2 * in
*
* output must be distinct to both inputs. The inputs are reduced coefficient
* form, the output is not.
*
* Assumes that in[i] < 2**55 and likewise for in2.
* On return, output[i] < 2**52
*/
static inline void
fmul(felem output, const felem in2, const felem in)
{
uint128_t t[5];
limb r0, r1, r2, r3, r4, s0, s1, s2, s3, s4, c;
r0 = in[0];
r1 = in[1];
r2 = in[2];
r3 = in[3];
r4 = in[4];
s0 = in2[0];
s1 = in2[1];
s2 = in2[2];
s3 = in2[3];
s4 = in2[4];
t[0] = ((uint128_t)r0) * s0;
t[1] = ((uint128_t)r0) * s1 + ((uint128_t)r1) * s0;
t[2] = ((uint128_t)r0) * s2 + ((uint128_t)r2) * s0 + ((uint128_t)r1) * s1;
t[3] = ((uint128_t)r0) * s3 + ((uint128_t)r3) * s0 + ((uint128_t)r1) * s2
+ ((uint128_t)r2) * s1;
t[4] = ((uint128_t)r0) * s4 + ((uint128_t)r4) * s0 + ((uint128_t)r3) * s1
+ ((uint128_t)r1) * s3 + ((uint128_t)r2) * s2;
r4 *= 19;
r1 *= 19;
r2 *= 19;
r3 *= 19;
t[0] += ((uint128_t)r4) * s1 + ((uint128_t)r1) * s4 + ((uint128_t)r2) * s3
+ ((uint128_t)r3) * s2;
t[1] += ((uint128_t)r4) * s2 + ((uint128_t)r2) * s4 + ((uint128_t)r3) * s3;
t[2] += ((uint128_t)r4) * s3 + ((uint128_t)r3) * s4;
t[3] += ((uint128_t)r4) * s4;
r0 = (limb)t[0] & 0x7ffffffffffff;
c = (limb)(t[0] >> 51);
t[1] += c;
r1 = (limb)t[1] & 0x7ffffffffffff;
c = (limb)(t[1] >> 51);
t[2] += c;
r2 = (limb)t[2] & 0x7ffffffffffff;
c = (limb)(t[2] >> 51);
t[3] += c;
r3 = (limb)t[3] & 0x7ffffffffffff;
c = (limb)(t[3] >> 51);
t[4] += c;
r4 = (limb)t[4] & 0x7ffffffffffff;
c = (limb)(t[4] >> 51);
r0 += c * 19;
c = r0 >> 51;
r0 = r0 & 0x7ffffffffffff;
r1 += c;
c = r1 >> 51;
r1 = r1 & 0x7ffffffffffff;
r2 += c;
output[0] = r0;
output[1] = r1;
output[2] = r2;
output[3] = r3;
output[4] = r4;
}
static inline void
fsquare_times(felem output, const felem in, limb count)
{
uint128_t t[5];
limb r0, r1, r2, r3, r4, c;
limb d0, d1, d2, d4, d419;
r0 = in[0];
r1 = in[1];
r2 = in[2];
r3 = in[3];
r4 = in[4];
do {
d0 = r0 * 2;
d1 = r1 * 2;
d2 = r2 * 2 * 19;
d419 = r4 * 19;
d4 = d419 * 2;
t[0] = ((uint128_t)r0) * r0 + ((uint128_t)d4) * r1
+ (((uint128_t)d2) * (r3));
t[1] = ((uint128_t)d0) * r1 + ((uint128_t)d4) * r2
+ (((uint128_t)r3) * (r3 * 19));
t[2] = ((uint128_t)d0) * r2 + ((uint128_t)r1) * r1
+ (((uint128_t)d4) * (r3));
t[3] = ((uint128_t)d0) * r3 + ((uint128_t)d1) * r2
+ (((uint128_t)r4) * (d419));
t[4] = ((uint128_t)d0) * r4 + ((uint128_t)d1) * r3
+ (((uint128_t)r2) * (r2));
r0 = (limb)t[0] & 0x7ffffffffffff;
c = (limb)(t[0] >> 51);
t[1] += c;
r1 = (limb)t[1] & 0x7ffffffffffff;
c = (limb)(t[1] >> 51);
t[2] += c;
r2 = (limb)t[2] & 0x7ffffffffffff;
c = (limb)(t[2] >> 51);
t[3] += c;
r3 = (limb)t[3] & 0x7ffffffffffff;
c = (limb)(t[3] >> 51);
t[4] += c;
r4 = (limb)t[4] & 0x7ffffffffffff;
c = (limb)(t[4] >> 51);
r0 += c * 19;
c = r0 >> 51;
r0 = r0 & 0x7ffffffffffff;
r1 += c;
c = r1 >> 51;
r1 = r1 & 0x7ffffffffffff;
r2 += c;
} while (--count);
output[0] = r0;
output[1] = r1;
output[2] = r2;
output[3] = r3;
output[4] = r4;
}
#ifdef NATIVE_LITTLE_ENDIAN
static inline limb
load_limb(const u8 *in)
{
limb out;
memcpy(&out, in, sizeof(limb));
return out;
}
static inline void
store_limb(u8 *out, limb in)
{
memcpy(out, &in, sizeof(limb));
}
#else
static inline limb
load_limb(const u8 *in)
{
return ((limb)in[0]) | (((limb)in[1]) << 8) | (((limb)in[2]) << 16)
| (((limb)in[3]) << 24) | (((limb)in[4]) << 32)
| (((limb)in[5]) << 40) | (((limb)in[6]) << 48)
| (((limb)in[7]) << 56);
}
static inline void
store_limb(u8 *out, limb in)
{
out[0] = in & 0xff;
out[1] = (in >> 8) & 0xff;
out[2] = (in >> 16) & 0xff;
out[3] = (in >> 24) & 0xff;
out[4] = (in >> 32) & 0xff;
out[5] = (in >> 40) & 0xff;
out[6] = (in >> 48) & 0xff;
out[7] = (in >> 56) & 0xff;
}
#endif
/* Take a little-endian, 32-byte number and expand it into polynomial form */
static void
fexpand(limb *output, const u8 *in)
{
output[0] = load_limb(in) & 0x7ffffffffffff;
output[1] = (load_limb(in + 6) >> 3) & 0x7ffffffffffff;
output[2] = (load_limb(in + 12) >> 6) & 0x7ffffffffffff;
output[3] = (load_limb(in + 19) >> 1) & 0x7ffffffffffff;
output[4] = (load_limb(in + 24) >> 12) & 0x7ffffffffffff;
}
/* Take a fully reduced polynomial form number and contract it into a
* little-endian, 32-byte array
*/
static void
fcontract(u8 *output, const felem input)
{
uint128_t t[5];
t[0] = input[0];
t[1] = input[1];
t[2] = input[2];
t[3] = input[3];
t[4] = input[4];
t[1] += t[0] >> 51;
t[0] &= 0x7ffffffffffff;
t[2] += t[1] >> 51;
t[1] &= 0x7ffffffffffff;
t[3] += t[2] >> 51;
t[2] &= 0x7ffffffffffff;
t[4] += t[3] >> 51;
t[3] &= 0x7ffffffffffff;
t[0] += 19 * (t[4] >> 51);
t[4] &= 0x7ffffffffffff;
t[1] += t[0] >> 51;
t[0] &= 0x7ffffffffffff;
t[2] += t[1] >> 51;
t[1] &= 0x7ffffffffffff;
t[3] += t[2] >> 51;
t[2] &= 0x7ffffffffffff;
t[4] += t[3] >> 51;
t[3] &= 0x7ffffffffffff;
t[0] += 19 * (t[4] >> 51);
t[4] &= 0x7ffffffffffff;
/* now t is between 0 and 2^255-1, properly carried. */
/* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
t[0] += 19;
t[1] += t[0] >> 51;
t[0] &= 0x7ffffffffffff;
t[2] += t[1] >> 51;
t[1] &= 0x7ffffffffffff;
t[3] += t[2] >> 51;
t[2] &= 0x7ffffffffffff;
t[4] += t[3] >> 51;
t[3] &= 0x7ffffffffffff;
t[0] += 19 * (t[4] >> 51);
t[4] &= 0x7ffffffffffff;
/* now between 19 and 2^255-1 in both cases, and offset by 19. */
t[0] += 0x8000000000000 - 19;
t[1] += 0x8000000000000 - 1;
t[2] += 0x8000000000000 - 1;
t[3] += 0x8000000000000 - 1;
t[4] += 0x8000000000000 - 1;
/* now between 2^255 and 2^256-20, and offset by 2^255. */
t[1] += t[0] >> 51;
t[0] &= 0x7ffffffffffff;
t[2] += t[1] >> 51;
t[1] &= 0x7ffffffffffff;
t[3] += t[2] >> 51;
t[2] &= 0x7ffffffffffff;
t[4] += t[3] >> 51;
t[3] &= 0x7ffffffffffff;
t[4] &= 0x7ffffffffffff;
store_limb(output, t[0] | (t[1] << 51));
store_limb(output + 8, (t[1] >> 13) | (t[2] << 38));
store_limb(output + 16, (t[2] >> 26) | (t[3] << 25));
store_limb(output + 24, (t[3] >> 39) | (t[4] << 12));
}
/* Input: Q, Q', Q-Q'
* Output: 2Q, Q+Q'
*
* x2 z2: long form
* x3 z3: long form
* x z: short form, destroyed
* xprime zprime: short form, destroyed
* qmqp: short form, preserved
*/
static void
fmonty(limb *x2, limb *z2, /* output 2Q */
limb *x3, limb *z3, /* output Q + Q' */
limb *x, limb *z, /* input Q */
limb *xprime, limb *zprime, /* input Q' */
const limb *qmqp /* input Q - Q' */)
{
limb origx[5], origxprime[5], zzz[5], xx[5], zz[5], xxprime[5], zzprime[5],
zzzprime[5];
memcpy(origx, x, 5 * sizeof(limb));
fsum(x, z);
fdifference_backwards(z, origx); /* does x - z */
memcpy(origxprime, xprime, sizeof(limb) * 5);
fsum(xprime, zprime);
fdifference_backwards(zprime, origxprime);
fmul(xxprime, xprime, z);
fmul(zzprime, x, zprime);
memcpy(origxprime, xxprime, sizeof(limb) * 5);
fsum(xxprime, zzprime);
fdifference_backwards(zzprime, origxprime);
fsquare_times(x3, xxprime, 1);
fsquare_times(zzzprime, zzprime, 1);
fmul(z3, zzzprime, qmqp);
fsquare_times(xx, x, 1);
fsquare_times(zz, z, 1);
fmul(x2, xx, zz);
fdifference_backwards(zz, xx); /* does zz = xx - zz */
fscalar_product(zzz, zz, 121665);
fsum(zzz, xx);
fmul(z2, zz, zzz);
}
/* -----------------------------------------------------------------------------
Maybe swap the contents of two limb arrays (@a and @b), each @len elements
long. Perform the swap iff @swap is non-zero.
This function performs the swap without leaking any side-channel
information.
-----------------------------------------------------------------------------
*/
static void
swap_conditional(limb a[5], limb b[5], limb iswap)
{
const limb swap = -iswap;
unsigned i;
for (i = 0; i < 5; ++i) {
const limb x = swap & (a[i] ^ b[i]);
a[i] ^= x;
b[i] ^= x;
}
}
/* Calculates nQ where Q is the x-coordinate of a point on the curve
*
* resultx/resultz: the x coordinate of the resulting curve point (short form)
* n: a little endian, 32-byte number
* q: a point of the curve (short form)
*/
static void
cmult(limb *resultx, limb *resultz, const u8 *n, const limb *q)
{
limb a[5] = { 0 }, b[5] = { 1 }, c[5] = { 1 }, d[5] = { 0 };
limb *nqpqx = a, *nqpqz = b, *nqx = c, *nqz = d, *t;
limb e[5] = { 0 }, f[5] = { 1 }, g[5] = { 0 }, h[5] = { 1 };
limb *nqpqx2 = e, *nqpqz2 = f, *nqx2 = g, *nqz2 = h;
unsigned i, j;
memcpy(nqpqx, q, sizeof(limb) * 5);
for (i = 0; i < 32; ++i) {
u8 byte = n[31 - i];
for (j = 0; j < 8; ++j) {
const limb bit = byte >> 7;
swap_conditional(nqx, nqpqx, bit);
swap_conditional(nqz, nqpqz, bit);
fmonty(nqx2, nqz2, nqpqx2, nqpqz2, nqx, nqz, nqpqx, nqpqz, q);
swap_conditional(nqx2, nqpqx2, bit);
swap_conditional(nqz2, nqpqz2, bit);
t = nqx;
nqx = nqx2;
nqx2 = t;
t = nqz;
nqz = nqz2;
nqz2 = t;
t = nqpqx;
nqpqx = nqpqx2;
nqpqx2 = t;
t = nqpqz;
nqpqz = nqpqz2;
nqpqz2 = t;
byte <<= 1;
}
}
memcpy(resultx, nqx, sizeof(limb) * 5);
memcpy(resultz, nqz, sizeof(limb) * 5);
}
/* -----------------------------------------------------------------------------
Shamelessly copied from djb's code, tightened a little
-----------------------------------------------------------------------------
*/
static void
crecip(felem out, const felem z)
{
felem a, t0, b, c;
/* 2 */ fsquare_times(a, z, 1); /* a = 2 */
/* 8 */ fsquare_times(t0, a, 2);
/* 9 */ fmul(b, t0, z); /* b = 9 */
/* 11 */ fmul(a, b, a); /* a = 11 */
/* 22 */ fsquare_times(t0, a, 1);
/* 2^5 - 2^0 = 31 */ fmul(b, t0, b);
/* 2^10 - 2^5 */ fsquare_times(t0, b, 5);
/* 2^10 - 2^0 */ fmul(b, t0, b);
/* 2^20 - 2^10 */ fsquare_times(t0, b, 10);
/* 2^20 - 2^0 */ fmul(c, t0, b);
/* 2^40 - 2^20 */ fsquare_times(t0, c, 20);
/* 2^40 - 2^0 */ fmul(t0, t0, c);
/* 2^50 - 2^10 */ fsquare_times(t0, t0, 10);
/* 2^50 - 2^0 */ fmul(b, t0, b);
/* 2^100 - 2^50 */ fsquare_times(t0, b, 50);
/* 2^100 - 2^0 */ fmul(c, t0, b);
/* 2^200 - 2^100 */ fsquare_times(t0, c, 100);
/* 2^200 - 2^0 */ fmul(t0, t0, c);
/* 2^250 - 2^50 */ fsquare_times(t0, t0, 50);
/* 2^250 - 2^0 */ fmul(t0, t0, b);
/* 2^255 - 2^5 */ fsquare_times(t0, t0, 5);
/* 2^255 - 21 */ fmul(out, t0, a);
}
static const unsigned char basepoint[32] = { 9 };
static int
crypto_scalarmult_curve25519_donna_c64(unsigned char *mypublic,
const unsigned char *secret,
const unsigned char *basepoint)
{
limb bp[5], x[5], z[5], zmone[5];
uint8_t e[32];
int i;
for (i = 0; i < 32; ++i) {
e[i] = secret[i];
}
e[0] &= 248;
e[31] &= 127;
e[31] |= 64;
fexpand(bp, basepoint);
cmult(x, z, e, bp);
crecip(zmone, z);
fmul(z, x, zmone);
fcontract(mypublic, z);
return 0;
}
static int
crypto_scalarmult_curve25519_donna_c64_base(unsigned char *q,
const unsigned char *n)
{
return crypto_scalarmult_curve25519_donna_c64(q, n, basepoint);
}
struct crypto_scalarmult_curve25519_implementation
crypto_scalarmult_curve25519_donna_c64_implementation = {
SODIUM_C99(.mult =) crypto_scalarmult_curve25519_donna_c64,
SODIUM_C99(.mult_base =) crypto_scalarmult_curve25519_donna_c64_base
};
#endif
| {
"pile_set_name": "Github"
} |
#!/bin/sh
# This will generate the list of feature flags for implemented symbols.
echo '/* DO NOT EDIT. This is auto-generated from feature.sh */'
nm ../regressions/ck_pr/validate/ck_pr_cas|cut -d ' ' -f 3|sed s/ck_pr/ck_f_pr/|awk '/^ck_f_pr/ {print "#define " toupper($1);}'|sort
| {
"pile_set_name": "Github"
} |

A JavaScript library for arbitrary-precision decimal and non-decimal arithmetic.
[](https://travis-ci.org/MikeMcl/bignumber.js)
<br />
## Features
- Faster, smaller, and perhaps easier to use than JavaScript versions of Java's BigDecimal
- 8 KB minified and gzipped
- Simple API but full-featured
- Works with numbers with or without fraction digits in bases from 2 to 64 inclusive
- Replicates the `toExponential`, `toFixed`, `toPrecision` and `toString` methods of JavaScript's Number type
- Includes a `toFraction` and a correctly-rounded `squareRoot` method
- Supports cryptographically-secure pseudo-random number generation
- No dependencies
- Wide platform compatibility: uses JavaScript 1.5 (ECMAScript 3) features only
- Comprehensive [documentation](http://mikemcl.github.io/bignumber.js/) and test set

If a smaller and simpler library is required see [big.js](https://github.com/MikeMcl/big.js/).
It's less than half the size but only works with decimal numbers and only has half the methods.
It also does not allow `NaN` or `Infinity`, or have the configuration options of this library.
See also [decimal.js](https://github.com/MikeMcl/decimal.js/), which among other things adds support for non-integer powers, and performs all operations to a specified number of significant digits.
## Load
The library is the single JavaScript file *bignumber.js* (or minified, *bignumber.min.js*).
Browser:
```html
<script src='path/to/bignumber.js'></script>
```
[Node.js](http://nodejs.org):
```bash
$ npm install --save bignumber.js
```
```js
var BigNumber = require('bignumber.js');
```
ES6 module (*bignumber.mjs*):
```js
//import BigNumber from 'bignumber.js';
import {BigNumber} from 'bignumber.js';
```
AMD loader libraries such as [requireJS](http://requirejs.org/):
```js
require(['bignumber'], function(BigNumber) {
// Use BigNumber here in local scope. No global BigNumber.
});
```
## Use
*In all examples below, `var`, semicolons and `toString` calls are not shown.
If a commented-out value is in quotes it means `toString` has been called on the preceding expression.*
The library exports a single function: `BigNumber`, the constructor of BigNumber instances.
It accepts a value of type number *(up to 15 significant digits only)*, string or BigNumber object,
```javascript
x = new BigNumber(123.4567)
y = BigNumber('123456.7e-3')
z = new BigNumber(x)
x.isEqualTo(y) && y.isEqualTo(z) && x.isEqualTo(z) // true
```
and a base from 2 to 36 inclusive can be specified.
```javascript
x = new BigNumber(1011, 2) // "11"
y = new BigNumber('zz.9', 36) // "1295.25"
z = x.plus(y) // "1306.25"
```
A BigNumber is immutable in the sense that it is not changed by its methods.
```javascript
0.3 - 0.1 // 0.19999999999999998
x = new BigNumber(0.3)
x.minus(0.1) // "0.2"
x // "0.3"
```
The methods that return a BigNumber can be chained.
```javascript
x.dividedBy(y).plus(z).times(9)
x.times('1.23456780123456789e+9').plus(9876.5432321).dividedBy('4444562598.111772').integerValue()
```
Some of the longer method names have a shorter alias.
```javascript
x.squareRoot().dividedBy(y).exponentiatedBy(3).isEqualTo( x.sqrt().div(y).pow(3) ) // true
x.modulo(y).multipliedBy(z).eq( x.mod(y).times(z) ) // true
```
Like JavaScript's number type, there are `toExponential`, `toFixed` and `toPrecision` methods
```javascript
x = new BigNumber(255.5)
x.toExponential(5) // "2.55500e+2"
x.toFixed(5) // "255.50000"
x.toPrecision(5) // "255.50"
x.toNumber() // 255.5
```
and a base can be specified for `toString`.
```javascript
x.toString(16) // "ff.8"
```
There is also a `toFormat` method which may be useful for internationalisation
```javascript
y = new BigNumber('1234567.898765')
y.toFormat(2) // "1,234,567.90"
```
The maximum number of decimal places of the result of an operation involving division (i.e. a division, square root, base conversion or negative power operation) is set using the `config` method of the `BigNumber` constructor.
The other arithmetic operations always give the exact result.
```javascript
BigNumber.config({ DECIMAL_PLACES: 10, ROUNDING_MODE: 4 })
x = new BigNumber(2);
y = new BigNumber(3);
z = x.dividedBy(y) // "0.6666666667"
z.squareRoot() // "0.8164965809"
z.exponentiatedBy(-3) // "3.3749999995"
z.toString(2) // "0.1010101011"
z.multipliedBy(z) // "0.44444444448888888889"
z.multipliedBy(z).decimalPlaces(10) // "0.4444444445"
```
There is a `toFraction` method with an optional *maximum denominator* argument
```javascript
y = new BigNumber(355)
pi = y.dividedBy(113) // "3.1415929204"
pi.toFraction() // [ "7853982301", "2500000000" ]
pi.toFraction(1000) // [ "355", "113" ]
```
and `isNaN` and `isFinite` methods, as `NaN` and `Infinity` are valid `BigNumber` values.
```javascript
x = new BigNumber(NaN) // "NaN"
y = new BigNumber(Infinity) // "Infinity"
x.isNaN() && !y.isNaN() && !x.isFinite() && !y.isFinite() // true
```
The value of a BigNumber is stored in a decimal floating point format in terms of a coefficient, exponent and sign.
```javascript
x = new BigNumber(-123.456);
x.c // [ 123, 45600000000000 ] coefficient (i.e. significand)
x.e // 2 exponent
x.s // -1 sign
```
For advanced usage, multiple BigNumber constructors can be created, each with their own independent configuration which applies to all BigNumber's created from it.
```javascript
// Set DECIMAL_PLACES for the original BigNumber constructor
BigNumber.config({ DECIMAL_PLACES: 10 })
// Create another BigNumber constructor, optionally passing in a configuration object
BN = BigNumber.clone({ DECIMAL_PLACES: 5 })
x = new BigNumber(1)
y = new BN(1)
x.div(3) // '0.3333333333'
y.div(3) // '0.33333'
```
For futher information see the [API](http://mikemcl.github.io/bignumber.js/) reference in the *doc* directory.
## Test
The *test/modules* directory contains the test scripts for each method.
The tests can be run with Node.js or a browser. For Node.js use
$ npm test
or
$ node test/test
To test a single method, use, for example
$ node test/methods/toFraction
For the browser, open *test/test.html*.
## Performance
See the [README](https://github.com/MikeMcl/bignumber.js/tree/master/perf) in the *perf* directory.
## Build
For Node, if [uglify-js](https://github.com/mishoo/UglifyJS2) is installed
npm install uglify-js -g
then
npm run build
will create *bignumber.min.js*.
A source map will also be created in the root directory.
## Feedback
Open an issue, or email
Michael
<a href="mailto:[email protected]">[email protected]</a>
## Licence
The MIT Licence.
See [LICENCE](https://github.com/MikeMcl/bignumber.js/blob/master/LICENCE).
| {
"pile_set_name": "Github"
} |
; make the procedure object and assign where body is and in which environment it executes
(assign val (op make-compiled-procedure) (label entry18) (reg env))
(goto (label after-lambda19)) ; go to after definition
; compiler will jump here when procedure is called
entry18
; load environment stored in procedure object
(assign env (op compiled-procedure-env) (reg proc))
; extend environment with the provided arguments (which are in fact local varible assignments)
(assign env (op extend-environment) (const (n)) (reg argl) (reg env))
; make internal iterative procedure and assign the extended envirnment to it.
(assign val (op make-compiled-procedure) (label entry20) (reg env))
(goto (label after-lambda21))
; body of the internal iterative procedure, calls to it will jump to here
entry20
; here we already see a pattern in the execution.
; First build environment in which we will execute our code by extending the procedure env
; with the passed params as local variables.
(assign env (op compiled-procedure-env) (reg proc))
(assign env (op extend-environment) (const (product counter)) (reg argl) (reg env))
(save continue) ; save state of the parent (one who called us and told where to go next)
(save env) ; save our state so we know in which env we are executing
; compute (> counter n)
; Here I don't really get why we have to check if > is primitive procedure since I guess
; we can already know that in the compilation phase.
(assign proc (op lookup-variable-value) (const >) (reg env))
(assign val (op lookup-variable-value) (const n) (reg env))
(assign argl (op list) (reg val))
(assign val (op lookup-variable-value) (const counter) (reg env))
(assign argl (op cons) (reg val) (reg argl))
(test (op primitive-procedure?) (reg proc))
(branch (label primitive-branch25))
compiled-branch26
(assign continue (label after-call27))
(assign val (op compiled-procedure-entry) (reg proc))
(goto (reg val))
primitive-branch25
; applying primitive > to argument list we have built above.
(assign val (op apply-primitive-procedure) (reg proc) (reg argl))
after-call27
; continue our execution after we evaluated (> counter n) by checking if result
; was true or false.
(restore env)
(restore continue)
(test (op false?) (reg val))
(branch (label false-branch23))
; if true just return product from the environment as result and go wherever parent told
; us to go.
true-branch22
(assign val (op lookup-variable-value) (const product) (reg env))
(goto (reg continue))
; if false than setup for the next invocation of the procedure iter
false-branch23
(assign proc (op lookup-variable-value) (const iter) (reg env))
(save continue)
(save proc)
(save env)
; compute (+ counter 1)
(assign proc (op lookup-variable-value) (const +) (reg env))
(assign val (const 1))
(assign argl (op list) (reg val))
(assign val (op lookup-variable-value) (const counter) (reg env))
(assign argl (op cons) (reg val) (reg argl))
; again, we should know that + is primitive
(test (op primitive-procedure?) (reg proc))
(branch (label primitive-branch31))
compiled-branch32
(assign continue (label after-call33))
(assign val (op compiled-procedure-entry) (reg proc))
(goto (reg val))
primitive-branch31
(assign val (op apply-primitive-procedure) (reg proc) (reg argl))
; compute (* counter product)
after-call33
(assign argl (op list) (reg val))
(restore env)
(save argl)
(assign proc (op lookup-variable-value) (const *) (reg env))
(assign val (op lookup-variable-value) (const product) (reg env))
(assign argl (op list) (reg val))
(assign val (op lookup-variable-value) (const counter) (reg env))
(assign argl (op cons) (reg val) (reg argl))
(test (op primitive-procedure?) (reg proc))
(branch (label primitive-branch28))
compiled-branch29
(assign continue (label after-call30))
(assign val (op compiled-procedure-entry) (reg proc))
(goto (reg val))
primitive-branch28
(assign val (op apply-primitive-procedure) (reg proc) (reg argl))
; here we finish computation of the second operand of new call to iter
after-call30
(restore argl)
; adding it to the argl
(assign argl (op cons) (reg val) (reg argl))
; getting to see what is next to be executed.
(restore proc)
(restore continue)
(test (op primitive-procedure?) (reg proc))
(branch (label primitive-branch34))
; here we go again to iter.
compiled-branch35
(assign val (op compiled-procedure-entry) (reg proc))
(goto (reg val))
primitive-branch34
(assign val (op apply-primitive-procedure) (reg proc) (reg argl))
(goto (reg continue))
after-call36
after-if24
; here we add reference to iter procedure in the environment so it is
; possible to call it after it is defined.
after-lambda21
(perform (op define-variable!) (const iter) (reg val) (reg env))
(assign val (const ok))
(assign proc (op lookup-variable-value) (const iter) (reg env))
(assign val (const 1))
(assign argl (op list) (reg val))
(assign val (const 1))
(assign argl (op cons) (reg val) (reg argl))
(test (op primitive-procedure?) (reg proc))
(branch (label primitive-branch37))
compiled-branch38
(assign val (op compiled-procedure-entry) (reg proc))
(goto (reg val))
primitive-branch37
(assign val (op apply-primitive-procedure) (reg proc) (reg argl))
(goto (reg continue))
after-call39
; here we add reference to factorial in the enclosing environment.
after-lambda19
(perform (op define-variable!) (const factorial) (reg val) (reg env))
(assign val (const ok))
| {
"pile_set_name": "Github"
} |
page {
background: #f6f6f6;
display: flex;
flex-direction: column;
justify-content: flex-start;
}
.list {
margin-top: 40rpx;
height: auto;
width: 100%;
background: #fff;
padding: 0 40rpx;
border: 1px solid rgba(0, 0, 0, 0.1);
border-left: none;
border-right: none;
transition: all 300ms ease;
display: flex;
flex-direction: column;
align-items: flex-start;
box-sizing: border-box;
}
.list-item {
width: 100%;
padding: 0;
line-height: 104rpx;
font-size: 34rpx;
color: #007aff;
border-top: 1px solid rgba(0, 0, 0, 0.1);
display: flex;
flex-direction: row;
align-content: center;
justify-content: space-between;
box-sizing: border-box;
}
.list-item:first-child {
border-top: none;
}
.list-item image {
max-width: 100%;
max-height: 20vh;
margin: 20rpx 0;
}
.request-text {
color: #222;
padding: 20rpx 0;
font-size: 24rpx;
line-height: 36rpx;
word-break: break-all;
}
.guide {
width: 100%;
padding: 40rpx;
box-sizing: border-box;
display: flex;
flex-direction: column;
}
.guide .headline {
font-size: 34rpx;
font-weight: bold;
color: #555;
line-height: 40rpx;
}
.guide .p {
margin-top: 20rpx;
font-size: 28rpx;
line-height: 36rpx;
color: #666;
}
.guide .code {
margin-top: 20rpx;
font-size: 28rpx;
line-height: 36rpx;
color: #666;
background: white;
white-space: pre;
}
.guide .code-dark {
margin-top: 20rpx;
background: rgba(0, 0, 0, 0.8);
padding: 20rpx;
font-size: 28rpx;
line-height: 36rpx;
border-radius: 6rpx;
color: #fff;
white-space: pre
}
.guide image {
max-width: 100%;
}
.guide .image1 {
margin-top: 20rpx;
max-width: 100%;
width: 356px;
height: 47px;
}
.guide .image2 {
margin-top: 20rpx;
width: 264px;
height: 100px;
}
.guide .flat-image {
height: 100px;
}
.guide .code-image {
max-width: 100%;
}
.guide .copyBtn {
width: 180rpx;
font-size: 20rpx;
margin-top: 16rpx;
margin-left: 0;
}
.guide .nav {
margin-top: 50rpx;
display: flex;
flex-direction: row;
align-content: space-between;
}
.guide .nav .prev {
margin-left: unset;
}
.guide .nav .next {
margin-right: unset;
}
| {
"pile_set_name": "Github"
} |
import pytest
from django.urls import reverse
from ...models import SocialAuthProvider
admin_link = reverse("misago:admin:settings:socialauth:edit", kwargs={"pk": "twitter"})
@pytest.fixture
def provider(db):
return SocialAuthProvider.objects.create(
provider="twitter", is_active=True, order=0
)
def test_twitter_form_can_be_accessed(admin_client):
response = admin_client.get(admin_link)
assert response.status_code == 200
def test_twitter_login_can_be_setup(admin_client):
admin_client.post(
admin_link,
{
"is_active": "1",
"associate_by_email": "1",
"key": "test-key",
"secret": "test-secret",
},
)
provider = SocialAuthProvider.objects.get(provider="twitter")
assert provider.is_active
assert provider.settings == {
"associate_by_email": 1,
"key": "test-key",
"secret": "test-secret",
}
def test_twitter_login_can_be_disabled(admin_client, provider):
admin_client.post(admin_link, {"is_active": "0"})
provider = SocialAuthProvider.objects.get(provider="twitter")
assert not provider.is_active
def test_twitter_login_form_requires_key_to_setup(admin_client):
admin_client.post(admin_link, {"is_active": "1", "secret": "test-secret"})
with pytest.raises(SocialAuthProvider.DoesNotExist):
SocialAuthProvider.objects.get(provider="twitter")
def test_twitter_login_form_requires_secret_to_setup(admin_client):
admin_client.post(admin_link, {"is_active": "1", "key": "test-key"})
with pytest.raises(SocialAuthProvider.DoesNotExist):
SocialAuthProvider.objects.get(provider="twitter")
| {
"pile_set_name": "Github"
} |
<p align="center">
<a href="https://wpscan.org/">
<img src="https://raw.githubusercontent.com/wpscanteam/wpscan/gh-pages/images/wpscan_logo.png" alt="WPScan logo">
</a>
</p>
<h3 align="center">WPScan</h3>
<p align="center">
WordPress Security Scanner
<br>
<br>
<a href="https://wpscan.org/" title="homepage" target="_blank">Homepage</a> - <a href="https://wpscan.io/" title="wpscan.io" target="_blank">WPScan.io</a> - <a href="https://wpvulndb.com/" title="vulnerability database" target="_blank">Vulnerability Database</a> - <a href="https://wordpress.org/plugins/wpscan/" title="wordpress security plugin" target="_blank">WordPress Security Plugin</a>
</p>
<p align="center">
<a href="https://badge.fury.io/rb/wpscan" target="_blank"><img src="https://badge.fury.io/rb/wpscan.svg"></a>
<a href="https://github.com/wpscanteam/wpscan/actions?query=workflow%3ABuild" target="_blank"><img src="https://github.com/wpscanteam/wpscan/workflows/Build/badge.svg"></a>
<a href="https://codeclimate.com/github/wpscanteam/wpscan" target="_blank"><img src="https://codeclimate.com/github/wpscanteam/wpscan/badges/gpa.svg"></a>
</p>
# INSTALL
## Prerequisites
- (Optional but highly recommended: [RVM](https://rvm.io/rvm/install))
- Ruby >= 2.3 - Recommended: latest
- Ruby 2.5.0 to 2.5.3 can cause an 'undefined symbol: rmpd_util_str_to_d' error in some systems, see [#1283](https://github.com/wpscanteam/wpscan/issues/1283)
- Curl >= 7.21 - Recommended: latest
- The 7.29 has a segfault
- RubyGems - Recommended: latest
- Nokogiri might require packages to be installed via your package manager depending on your OS, see https://nokogiri.org/tutorials/installing_nokogiri.html
### In a Pentesting distribution
When using a pentesting distubution (such as Kali Linux), it is recommended to install/update wpscan via the package manager if available.
### From RubyGems
```shell
gem install wpscan
```
On MacOSX, if a ```Gem::FilePermissionError``` is raised due to the Apple's System Integrity Protection (SIP), either install RVM and install wpscan again, or run ```sudo gem install -n /usr/local/bin wpscan``` (see [#1286](https://github.com/wpscanteam/wpscan/issues/1286))
# Updating
You can update the local database by using ```wpscan --update```
Updating WPScan itself is either done via ```gem update wpscan``` or the packages manager (this is quite important for distributions such as in Kali Linux: ```apt-get update && apt-get upgrade```) depending how WPScan was (pre)installed
# Docker
Pull the repo with ```docker pull wpscanteam/wpscan```
Enumerating usernames
```shell
docker run -it --rm wpscanteam/wpscan --url https://target.tld/ --enumerate u
```
Enumerating a range of usernames
```shell
docker run -it --rm wpscanteam/wpscan --url https://target.tld/ --enumerate u1-100
```
** replace u1-100 with a range of your choice.
# Usage
Full user documentation can be found here; https://github.com/wpscanteam/wpscan/wiki/WPScan-User-Documentation
```wpscan --url blog.tld``` This will scan the blog using default options with a good compromise between speed and accuracy. For example, the plugins will be checked passively but their version with a mixed detection mode (passively + aggressively). Potential config backup files will also be checked, along with other interesting findings.
If a more stealthy approach is required, then ```wpscan --stealthy --url blog.tld``` can be used.
As a result, when using the ```--enumerate``` option, don't forget to set the ```--plugins-detection``` accordingly, as its default is 'passive'.
For more options, open a terminal and type ```wpscan --help``` (if you built wpscan from the source, you should type the command outside of the git repo)
The DB is located at ~/.wpscan/db
## Vulnerability Database
The WPScan CLI tool uses the [WPVulnDB API](https://wpvulndb.com/api) to retrieve WordPress vulnerability data in real time. For WPScan to retrieve the vulnerability data an API token must be supplied via the `--api-token` option, or via a configuration file, as discussed below. An API token can be obtained by registering an account on [WPVulnDB](https://wpvulndb.com/users/sign_up). Up to 50 API requests per day are given free of charge to registered users. Once the 50 API requests are exhausted, WPScan will continue to work as normal but without any vulnerability data. Users can upgrade to paid API usage to increase their API limits within their user profile on [WPVulnDB](https://wpvulndb.com/).
## Load CLI options from file/s
WPScan can load all options (including the --url) from configuration files, the following locations are checked (order: first to last):
- ~/.wpscan/scan.json
- ~/.wpscan/scan.yml
- pwd/.wpscan/scan.json
- pwd/.wpscan/scan.yml
If those files exist, options from the `cli_options` key will be loaded and overridden if found twice.
e.g:
~/.wpscan/scan.yml:
```yml
cli_options:
proxy: 'http://127.0.0.1:8080'
verbose: true
```
pwd/.wpscan/scan.yml:
```yml
cli_options:
proxy: 'socks5://127.0.0.1:9090'
url: 'http://target.tld'
```
Running ```wpscan``` in the current directory (pwd), is the same as ```wpscan -v --proxy socks5://127.0.0.1:9090 --url http://target.tld```
## Save API Token in a file
The feature mentioned above is useful to keep the API Token in a config file and not have to supply it via the CLI each time. To do so, create the ~/.wpscan/scan.yml file containing the below:
```yml
cli_options:
api_token: YOUR_API_TOKEN
```
## Load API Token From ENV (since v3.7.10)
The API Token will be automatically loaded from the ENV variable `WPSCAN_API_TOKEN` if present. If the `--api-token` CLI option is also provided, the value from the CLI will be used.
## Enumerating usernames
```shell
wpscan --url https://target.tld/ --enumerate u
```
Enumerating a range of usernames
```shell
wpscan --url https://target.tld/ --enumerate u1-100
```
** replace u1-100 with a range of your choice.
# LICENSE
## WPScan Public Source License
The WPScan software (henceforth referred to simply as "WPScan") is dual-licensed - Copyright 2011-2019 WPScan Team.
Cases that include commercialization of WPScan require a commercial, non-free license. Otherwise, WPScan can be used without charge under the terms set out below.
### 1. Definitions
1.1 "License" means this document.
1.2 "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns WPScan.
1.3 "WPScan Team" means WPScan’s core developers.
### 2. Commercialization
A commercial use is one intended for commercial advantage or monetary compensation.
Example cases of commercialization are:
- Using WPScan to provide commercial managed/Software-as-a-Service services.
- Distributing WPScan as a commercial product or as part of one.
- Using WPScan as a value added service/product.
Example cases which do not require a commercial license, and thus fall under the terms set out below, include (but are not limited to):
- Penetration testers (or penetration testing organizations) using WPScan as part of their assessment toolkit.
- Penetration Testing Linux Distributions including but not limited to Kali Linux, SamuraiWTF, BackBox Linux.
- Using WPScan to test your own systems.
- Any non-commercial use of WPScan.
If you need to purchase a commercial license or are unsure whether you need to purchase a commercial license contact us - [email protected].
Free-use Terms and Conditions;
### 3. Redistribution
Redistribution is permitted under the following conditions:
- Unmodified License is provided with WPScan.
- Unmodified Copyright notices are provided with WPScan.
- Does not conflict with the commercialization clause.
### 4. Copying
Copying is permitted so long as it does not conflict with the Redistribution clause.
### 5. Modification
Modification is permitted so long as it does not conflict with the Redistribution clause.
### 6. Contributions
Any Contributions assume the Contributor grants the WPScan Team the unlimited, non-exclusive right to reuse, modify and relicense the Contributor's content.
### 7. Support
WPScan is provided under an AS-IS basis and without any support, updates or maintenance. Support, updates and maintenance may be given according to the sole discretion of the WPScan Team.
### 8. Disclaimer of Warranty
WPScan is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the WPScan is free of defects, merchantable, fit for a particular purpose or non-infringing.
### 9. Limitation of Liability
To the extent permitted under Law, WPScan is provided under an AS-IS basis. The WPScan Team shall never, and without any limit, be liable for any damage, cost, expense or any other payment incurred as a result of WPScan's actions, failure, bugs and/or any other interaction between WPScan and end-equipment, computers, other software or any 3rd party, end-equipment, computer or services.
### 10. Disclaimer
Running WPScan against websites without prior mutual consent may be illegal in your country. The WPScan Team accept no liability and are not responsible for any misuse or damage caused by WPScan.
### 11. Trademark
The "wpscan" term is a registered trademark. This License does not grant the use of the "wpscan" trademark or the use of the WPScan logo.
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.isis.applib.services.factory;
import java.util.NoSuchElementException;
import javax.annotation.Nullable;
import org.apache.isis.applib.services.repository.RepositoryService;
import org.apache.isis.commons.exceptions.IsisException;
// tag::refguide[]
public interface FactoryService {
// end::refguide[]
/**
* General purpose factory method, to automatically get or create an instance of
* {@code requiredType}.
*
* <p>
* Maps onto one of the specialized factory methods {@link #get(Class)} or {@link #create(Class)}
* based on the type's meta-data.
* </p>
*
* @param <T>
* @param requiredType
* @return
* @throws NoSuchElementException if result is empty
* @throws IsisException if instance creation failed
* @throws IllegalArgumentException if requiredType is not recognized by the meta-model
*
* @since 2.0
*
*/
// tag::refguide[]
<T> T getOrCreate(Class<T> requiredType); // <.>
// end::refguide[]
/**
* Gets an instance (possibly shared or independent) of the specified {@code requiredType},
* with injection points resolved
* and any life-cycle callback processed.
*
* @param <T>
* @param requiredType - only applicable to IoC container managed types
* @return (non-null), an instance of {@code requiredType}, if available and unique
* (i.e. not multiple candidates found with none marked as primary)
*
* @throws NoSuchElementException if result is empty
* @throws IsisException if instance creation failed
*
* @apiNote does not force the requiredType to be added to the meta-model
* @since 2.0
*/
// tag::refguide[]
<T> T get(Class<T> requiredType); // <.>
// end::refguide[]
/**
* Creates a new detached entity instance, with injection points resolved
* and defaults applied.
*
* @param <T>
* @param domainClass - only applicable to entity types
* @return
* @throws IllegalArgumentException if domainClass is not an entity type
* @apiNote forces the domainClass to be added to the meta-model if not already
* @since 2.0
*/
// tag::refguide[]
<T> T detachedEntity(Class<T> domainClass); // <.>
// end::refguide[]
/**
* Creates a new Mixin instance, with injection points resolved.
*
* @param <T>
* @param mixinClass
* @param mixedIn
* @return
* @throws IllegalArgumentException if mixinClass is not a mixin type
* @apiNote forces the mixinClass to be added to the meta-model if not already
*/
// tag::refguide[]
<T> T mixin(Class<T> mixinClass, Object mixedIn); // <.>
// end::refguide[]
/**
* Creates a new ViewModel instance, with injection points resolved,
* and initialized according to the given {@code mementoStr}
*
* @param viewModelClass
* @param mementoStr - ignored if {@code null}
* @throws IllegalArgumentException if viewModelClass is not a viewmodel type
* @apiNote forces the viewModelClass to be added to the meta-model if not already
* @since 2.0
*/
// tag::refguide[]
<T> T viewModel(Class<T> viewModelClass, @Nullable String mementoStr); // <.>
// end::refguide[]
/**
* Creates a new ViewModel instance,
* with injection points resolved
* and defaults applied.
* @param viewModelClass
* @throws IllegalArgumentException if viewModelClass is not a viewmodel type
* @apiNote forces the viewModelClass to be added to the meta-model if not already
* @since 2.0
*/
// tag::refguide[]
default <T> T viewModel(Class<T> viewModelClass) { // <.>
return viewModel(viewModelClass, /*mementoStr*/null);
}
// end::refguide[]
/**
* Creates a new instance of the specified class,
* with injection points resolved
* and defaults applied.
* @param domainClass - not applicable to IoC container managed types
* @throws IllegalArgumentException if domainClass is an IoC container managed type,
* or not recognized by the meta-model
* @apiNote forces the domainClass to be added to the meta-model if not already
* @since 2.0
*/
// tag::refguide[]
<T> T create(Class<T> domainClass); // <.>
// end::refguide[]
// -- DEPRECATIONS
/**
* Creates a new instance of the specified class, but does not persist it.
*
* <p>
* It is recommended that the object be initially instantiated using
* this method, though the framework will also handle the case when
* the object is simply <i>new()</i>ed up. The benefits of using
* {@link #instantiate(Class)} are:
* </p>
*
* <ul>
* <li>any services will be injected into the object immediately
* (otherwise they will not be injected until the framework
* becomes aware of the object, typically when it is
* {@link RepositoryService#persist(Object) persist}ed</li>
* <li>the default value for any properties (usually as specified by
* <tt>default<i>Xxx</i>()</tt> supporting methods) will (since 2.0) be
* used</li>
* <li>the <tt>created()</tt> callback will not be called.
* </ul>
*
* <p>
* The corollary is: if your code never uses <tt>default<i>Xxx</i>()</tt>
* supporting methods or the <tt>created()</tt> callback, then you can
* alternatively just <i>new()</i> up the object rather than call this
* method.
* </p>
* @deprecated with semantic changes since 2.0 previous behavior is no longer guaranteed,
* instead consider use of @{@link #detachedEntity(Class)} or {@link #getOrCreate(Class)}
* if applicable
*/
@Deprecated
default <T> T instantiate(Class<T> domainClass) {
return getOrCreate(domainClass);
}
// tag::refguide[]
}
// end::refguide[]
| {
"pile_set_name": "Github"
} |
/*
Language: Django
Description: Django is a high-level Python Web framework that encourages rapid development and clean, pragmatic design.
Requires: xml.js
Author: Ivan Sagalaev <[email protected]>
Contributors: Ilya Baryshev <[email protected]>
Website: https://www.djangoproject.com
Category: template
*/
export default function(hljs) {
var FILTER = {
begin: /\|[A-Za-z]+:?/,
keywords: {
name:
'truncatewords removetags linebreaksbr yesno get_digit timesince random striptags ' +
'filesizeformat escape linebreaks length_is ljust rjust cut urlize fix_ampersands ' +
'title floatformat capfirst pprint divisibleby add make_list unordered_list urlencode ' +
'timeuntil urlizetrunc wordcount stringformat linenumbers slice date dictsort ' +
'dictsortreversed default_if_none pluralize lower join center default ' +
'truncatewords_html upper length phone2numeric wordwrap time addslashes slugify first ' +
'escapejs force_escape iriencode last safe safeseq truncatechars localize unlocalize ' +
'localtime utc timezone'
},
contains: [
hljs.QUOTE_STRING_MODE,
hljs.APOS_STRING_MODE
]
};
return {
name: 'Django',
aliases: ['jinja'],
case_insensitive: true,
subLanguage: 'xml',
contains: [
hljs.COMMENT(/\{%\s*comment\s*%}/, /\{%\s*endcomment\s*%}/),
hljs.COMMENT(/\{#/, /#}/),
{
className: 'template-tag',
begin: /\{%/, end: /%}/,
contains: [
{
className: 'name',
begin: /\w+/,
keywords: {
name:
'comment endcomment load templatetag ifchanged endifchanged if endif firstof for ' +
'endfor ifnotequal endifnotequal widthratio extends include spaceless ' +
'endspaceless regroup ifequal endifequal ssi now with cycle url filter ' +
'endfilter debug block endblock else autoescape endautoescape csrf_token empty elif ' +
'endwith static trans blocktrans endblocktrans get_static_prefix get_media_prefix ' +
'plural get_current_language language get_available_languages ' +
'get_current_language_bidi get_language_info get_language_info_list localize ' +
'endlocalize localtime endlocaltime timezone endtimezone get_current_timezone ' +
'verbatim'
},
starts: {
endsWithParent: true,
keywords: 'in by as',
contains: [FILTER],
relevance: 0
}
}
]
},
{
className: 'template-variable',
begin: /\{\{/, end: /}}/,
contains: [FILTER]
}
]
};
}
| {
"pile_set_name": "Github"
} |
/* ------------------------------------------------------------------------------
*
* # Dimple.js - multiple horizontal lines
*
* Demo of multiple line chart. Data stored in .tsv file format
*
* Version: 1.0
* Latest update: August 1, 2015
*
* ---------------------------------------------------------------------------- */
$(function () {
// Construct chart
var svg = dimple.newSvg("#dimple-line-horizontal-multiple", "100%", 500);
// Chart setup
// ------------------------------
d3.tsv("assets/demo_data/dimple/demo_data.tsv", function (data) {
// Filter data
data = dimple.filterData(data, "Owner", ["Aperture", "Black Mesa"])
// Create chart
// ------------------------------
// Define chart
var myChart = new dimple.chart(svg, data);
// Set bounds
myChart.setBounds(0, 0, "100%", "100%");
// Set margins
myChart.setMargins(40, 25, 0, 50);
// Create axes
// ------------------------------
// Horizontal
var x = myChart.addCategoryAxis("x", "Month");
x.addOrderRule("Date");
// Vertical
var y = myChart.addMeasureAxis("y", "Unit Sales");
// Construct layout
// ------------------------------
// Add line
myChart
.addSeries("Channel", dimple.plot.line)
.interpolation = "basis";
// Add legend
// ------------------------------
var myLegend = myChart
.addLegend(0, 5, "100%", 0, "right");
// Add styles
// ------------------------------
// Font size
x.fontSize = "12";
y.fontSize = "12";
// Font family
x.fontFamily = "Roboto";
y.fontFamily = "Roboto";
// Legend font style
myLegend.fontSize = "12";
myLegend.fontFamily = "Roboto";
//
// Draw chart
//
// Draw
myChart.draw();
// Position legend text
myLegend.shapes.selectAll("text").attr("dy", "1");
// Remove axis titles
x.titleShape.remove();
y.titleShape.remove();
// Resize chart
// ------------------------------
// Add a method to draw the chart on resize of the window
$(window).on('resize', resize);
$('.sidebar-control').on('click', resize);
// Resize function
function resize() {
setTimeout(function() {
// Redraw chart
myChart.draw(0, true);
// Position legend text
myLegend.shapes.selectAll("text").attr("dy", "1");
// Remove axis titles
x.titleShape.remove();
y.titleShape.remove();
}, 100)
}
});
}); | {
"pile_set_name": "Github"
} |
import { createFilterStructure } from "@saleor/discounts/components/VoucherListPage";
import { VoucherListUrlFilters } from "@saleor/discounts/urls";
import { date } from "@saleor/fixtures";
import {
DiscountStatusEnum,
VoucherDiscountType
} from "@saleor/types/globalTypes";
import { getFilterQueryParams } from "@saleor/utils/filters";
import { getExistingKeys, setFilterOptsStatus } from "@test/filters";
import { config } from "@test/intl";
import { stringify as stringifyQs } from "qs";
import { createIntl } from "react-intl";
import { getFilterQueryParam, getFilterVariables } from "./filters";
describe("Filtering query params", () => {
it("should be empty object if no params given", () => {
const params: VoucherListUrlFilters = {};
const filterVariables = getFilterVariables(params);
expect(getExistingKeys(filterVariables)).toHaveLength(0);
});
it("should not be empty object if params given", () => {
const params: VoucherListUrlFilters = {
startedFrom: date.from,
startedTo: date.to,
status: [DiscountStatusEnum.ACTIVE, DiscountStatusEnum.EXPIRED],
timesUsedFrom: date.from,
timesUsedTo: date.to,
type: [VoucherDiscountType.FIXED, VoucherDiscountType.SHIPPING]
};
const filterVariables = getFilterVariables(params);
expect(getExistingKeys(filterVariables)).toHaveLength(4);
});
});
describe("Filtering URL params", () => {
const intl = createIntl(config);
const filters = createFilterStructure(intl, {
saleType: {
active: false,
value: [VoucherDiscountType.FIXED, VoucherDiscountType.SHIPPING]
},
started: {
active: false,
value: {
max: date.to,
min: date.from
}
},
status: {
active: false,
value: [DiscountStatusEnum.ACTIVE, DiscountStatusEnum.EXPIRED]
},
timesUsed: {
active: false,
value: {
max: "6",
min: "1"
}
}
});
it("should be empty if no active filters", () => {
const filterQueryParams = getFilterQueryParams(
filters,
getFilterQueryParam
);
expect(getExistingKeys(filterQueryParams)).toHaveLength(0);
});
it("should not be empty if active filters are present", () => {
const filterQueryParams = getFilterQueryParams(
setFilterOptsStatus(filters, true),
getFilterQueryParam
);
expect(filterQueryParams).toMatchSnapshot();
expect(stringifyQs(filterQueryParams)).toMatchSnapshot();
});
});
| {
"pile_set_name": "Github"
} |
; RUN: %lli %s > /dev/null
define i32 @main() {
%double1 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=6]
%double2 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=6]
%float1 = fadd float 0.000000e+00, 0.000000e+00 ; <float> [#uses=6]
%float2 = fadd float 0.000000e+00, 0.000000e+00 ; <float> [#uses=6]
%test49 = fcmp oeq float %float1, %float2 ; <i1> [#uses=0]
%test50 = fcmp oge float %float1, %float2 ; <i1> [#uses=0]
%test51 = fcmp ogt float %float1, %float2 ; <i1> [#uses=0]
%test52 = fcmp ole float %float1, %float2 ; <i1> [#uses=0]
%test53 = fcmp olt float %float1, %float2 ; <i1> [#uses=0]
%test54 = fcmp une float %float1, %float2 ; <i1> [#uses=0]
%test55 = fcmp oeq double %double1, %double2 ; <i1> [#uses=0]
%test56 = fcmp oge double %double1, %double2 ; <i1> [#uses=0]
%test57 = fcmp ogt double %double1, %double2 ; <i1> [#uses=0]
%test58 = fcmp ole double %double1, %double2 ; <i1> [#uses=0]
%test59 = fcmp olt double %double1, %double2 ; <i1> [#uses=0]
%test60 = fcmp une double %double1, %double2 ; <i1> [#uses=0]
ret i32 0
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (C) 2012 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
dd
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Styled linear layout, compensating for the lack of a defStyle parameter
in pre-Honeycomb LinearLayout's constructor. -->
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
style="?attr/actionBarTabBarStyle">
</LinearLayout> | {
"pile_set_name": "Github"
} |
/* pcfread.c
FreeType font driver for pcf fonts
Copyright 2000-2010, 2012 by
Francesco Zappa Nardelli
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <ft2build.h>
#include FT_INTERNAL_DEBUG_H
#include FT_INTERNAL_STREAM_H
#include FT_INTERNAL_OBJECTS_H
#include "pcf.h"
#include "pcfread.h"
#include "pcferror.h"
/*************************************************************************/
/* */
/* The macro FT_COMPONENT is used in trace mode. It is an implicit */
/* parameter of the FT_TRACE() and FT_ERROR() macros, used to print/log */
/* messages during execution. */
/* */
#undef FT_COMPONENT
#define FT_COMPONENT trace_pcfread
#ifdef FT_DEBUG_LEVEL_TRACE
static const char* const tableNames[] =
{
"prop", "accl", "mtrcs", "bmps", "imtrcs",
"enc", "swidth", "names", "accel"
};
#endif
static
const FT_Frame_Field pcf_toc_header[] =
{
#undef FT_STRUCTURE
#define FT_STRUCTURE PCF_TocRec
FT_FRAME_START( 8 ),
FT_FRAME_ULONG_LE( version ),
FT_FRAME_ULONG_LE( count ),
FT_FRAME_END
};
static
const FT_Frame_Field pcf_table_header[] =
{
#undef FT_STRUCTURE
#define FT_STRUCTURE PCF_TableRec
FT_FRAME_START( 16 ),
FT_FRAME_ULONG_LE( type ),
FT_FRAME_ULONG_LE( format ),
FT_FRAME_ULONG_LE( size ),
FT_FRAME_ULONG_LE( offset ),
FT_FRAME_END
};
static FT_Error
pcf_read_TOC( FT_Stream stream,
PCF_Face face )
{
FT_Error error;
PCF_Toc toc = &face->toc;
PCF_Table tables;
FT_Memory memory = FT_FACE(face)->memory;
FT_UInt n;
if ( FT_STREAM_SEEK ( 0 ) ||
FT_STREAM_READ_FIELDS ( pcf_toc_header, toc ) )
return PCF_Err_Cannot_Open_Resource;
if ( toc->version != PCF_FILE_VERSION ||
toc->count > FT_ARRAY_MAX( face->toc.tables ) ||
toc->count == 0 )
return PCF_Err_Invalid_File_Format;
if ( FT_NEW_ARRAY( face->toc.tables, toc->count ) )
return PCF_Err_Out_Of_Memory;
tables = face->toc.tables;
for ( n = 0; n < toc->count; n++ )
{
if ( FT_STREAM_READ_FIELDS( pcf_table_header, tables ) )
goto Exit;
tables++;
}
/* Sort tables and check for overlaps. Because they are almost */
/* always ordered already, an in-place bubble sort with simultaneous */
/* boundary checking seems appropriate. */
tables = face->toc.tables;
for ( n = 0; n < toc->count - 1; n++ )
{
FT_UInt i, have_change;
have_change = 0;
for ( i = 0; i < toc->count - 1 - n; i++ )
{
PCF_TableRec tmp;
if ( tables[i].offset > tables[i + 1].offset )
{
tmp = tables[i];
tables[i] = tables[i + 1];
tables[i + 1] = tmp;
have_change = 1;
}
if ( ( tables[i].size > tables[i + 1].offset ) ||
( tables[i].offset > tables[i + 1].offset - tables[i].size ) )
return PCF_Err_Invalid_Offset;
}
if ( !have_change )
break;
}
#ifdef FT_DEBUG_LEVEL_TRACE
{
FT_UInt i, j;
const char* name = "?";
FT_TRACE4(( "pcf_read_TOC:\n" ));
FT_TRACE4(( " number of tables: %ld\n", face->toc.count ));
tables = face->toc.tables;
for ( i = 0; i < toc->count; i++ )
{
for ( j = 0; j < sizeof ( tableNames ) / sizeof ( tableNames[0] );
j++ )
if ( tables[i].type == (FT_UInt)( 1 << j ) )
name = tableNames[j];
FT_TRACE4(( " %d: type=%s, format=0x%X, "
"size=%ld (0x%lX), offset=%ld (0x%lX)\n",
i, name,
tables[i].format,
tables[i].size, tables[i].size,
tables[i].offset, tables[i].offset ));
}
}
#endif
return PCF_Err_Ok;
Exit:
FT_FREE( face->toc.tables );
return error;
}
#define PCF_METRIC_SIZE 12
static
const FT_Frame_Field pcf_metric_header[] =
{
#undef FT_STRUCTURE
#define FT_STRUCTURE PCF_MetricRec
FT_FRAME_START( PCF_METRIC_SIZE ),
FT_FRAME_SHORT_LE( leftSideBearing ),
FT_FRAME_SHORT_LE( rightSideBearing ),
FT_FRAME_SHORT_LE( characterWidth ),
FT_FRAME_SHORT_LE( ascent ),
FT_FRAME_SHORT_LE( descent ),
FT_FRAME_SHORT_LE( attributes ),
FT_FRAME_END
};
static
const FT_Frame_Field pcf_metric_msb_header[] =
{
#undef FT_STRUCTURE
#define FT_STRUCTURE PCF_MetricRec
FT_FRAME_START( PCF_METRIC_SIZE ),
FT_FRAME_SHORT( leftSideBearing ),
FT_FRAME_SHORT( rightSideBearing ),
FT_FRAME_SHORT( characterWidth ),
FT_FRAME_SHORT( ascent ),
FT_FRAME_SHORT( descent ),
FT_FRAME_SHORT( attributes ),
FT_FRAME_END
};
#define PCF_COMPRESSED_METRIC_SIZE 5
static
const FT_Frame_Field pcf_compressed_metric_header[] =
{
#undef FT_STRUCTURE
#define FT_STRUCTURE PCF_Compressed_MetricRec
FT_FRAME_START( PCF_COMPRESSED_METRIC_SIZE ),
FT_FRAME_BYTE( leftSideBearing ),
FT_FRAME_BYTE( rightSideBearing ),
FT_FRAME_BYTE( characterWidth ),
FT_FRAME_BYTE( ascent ),
FT_FRAME_BYTE( descent ),
FT_FRAME_END
};
static FT_Error
pcf_get_metric( FT_Stream stream,
FT_ULong format,
PCF_Metric metric )
{
FT_Error error = PCF_Err_Ok;
if ( PCF_FORMAT_MATCH( format, PCF_DEFAULT_FORMAT ) )
{
const FT_Frame_Field* fields;
/* parsing normal metrics */
fields = PCF_BYTE_ORDER( format ) == MSBFirst
? pcf_metric_msb_header
: pcf_metric_header;
/* the following sets `error' but doesn't return in case of failure */
(void)FT_STREAM_READ_FIELDS( fields, metric );
}
else
{
PCF_Compressed_MetricRec compr;
/* parsing compressed metrics */
if ( FT_STREAM_READ_FIELDS( pcf_compressed_metric_header, &compr ) )
goto Exit;
metric->leftSideBearing = (FT_Short)( compr.leftSideBearing - 0x80 );
metric->rightSideBearing = (FT_Short)( compr.rightSideBearing - 0x80 );
metric->characterWidth = (FT_Short)( compr.characterWidth - 0x80 );
metric->ascent = (FT_Short)( compr.ascent - 0x80 );
metric->descent = (FT_Short)( compr.descent - 0x80 );
metric->attributes = 0;
}
Exit:
return error;
}
static FT_Error
pcf_seek_to_table_type( FT_Stream stream,
PCF_Table tables,
FT_ULong ntables, /* same as PCF_Toc->count */
FT_ULong type,
FT_ULong *aformat,
FT_ULong *asize )
{
FT_Error error = PCF_Err_Invalid_File_Format;
FT_ULong i;
for ( i = 0; i < ntables; i++ )
if ( tables[i].type == type )
{
if ( stream->pos > tables[i].offset )
{
error = PCF_Err_Invalid_Stream_Skip;
goto Fail;
}
if ( FT_STREAM_SKIP( tables[i].offset - stream->pos ) )
{
error = PCF_Err_Invalid_Stream_Skip;
goto Fail;
}
*asize = tables[i].size;
*aformat = tables[i].format;
return PCF_Err_Ok;
}
Fail:
*asize = 0;
return error;
}
static FT_Bool
pcf_has_table_type( PCF_Table tables,
FT_ULong ntables, /* same as PCF_Toc->count */
FT_ULong type )
{
FT_ULong i;
for ( i = 0; i < ntables; i++ )
if ( tables[i].type == type )
return TRUE;
return FALSE;
}
#define PCF_PROPERTY_SIZE 9
static
const FT_Frame_Field pcf_property_header[] =
{
#undef FT_STRUCTURE
#define FT_STRUCTURE PCF_ParsePropertyRec
FT_FRAME_START( PCF_PROPERTY_SIZE ),
FT_FRAME_LONG_LE( name ),
FT_FRAME_BYTE ( isString ),
FT_FRAME_LONG_LE( value ),
FT_FRAME_END
};
static
const FT_Frame_Field pcf_property_msb_header[] =
{
#undef FT_STRUCTURE
#define FT_STRUCTURE PCF_ParsePropertyRec
FT_FRAME_START( PCF_PROPERTY_SIZE ),
FT_FRAME_LONG( name ),
FT_FRAME_BYTE( isString ),
FT_FRAME_LONG( value ),
FT_FRAME_END
};
FT_LOCAL_DEF( PCF_Property )
pcf_find_property( PCF_Face face,
const FT_String* prop )
{
PCF_Property properties = face->properties;
FT_Bool found = 0;
int i;
for ( i = 0 ; i < face->nprops && !found; i++ )
{
if ( !ft_strcmp( properties[i].name, prop ) )
found = 1;
}
if ( found )
return properties + i - 1;
else
return NULL;
}
static FT_Error
pcf_get_properties( FT_Stream stream,
PCF_Face face )
{
PCF_ParseProperty props = 0;
PCF_Property properties = NULL;
FT_ULong nprops, i;
FT_ULong format, size;
FT_Error error;
FT_Memory memory = FT_FACE(face)->memory;
FT_ULong string_size;
FT_String* strings = 0;
error = pcf_seek_to_table_type( stream,
face->toc.tables,
face->toc.count,
PCF_PROPERTIES,
&format,
&size );
if ( error )
goto Bail;
if ( FT_READ_ULONG_LE( format ) )
goto Bail;
FT_TRACE4(( "pcf_get_properties:\n" ));
FT_TRACE4(( " format = %ld\n", format ));
if ( !PCF_FORMAT_MATCH( format, PCF_DEFAULT_FORMAT ) )
goto Bail;
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
(void)FT_READ_ULONG( nprops );
else
(void)FT_READ_ULONG_LE( nprops );
if ( error )
goto Bail;
FT_TRACE4(( " nprop = %d (truncate %d props)\n",
(int)nprops, nprops - (int)nprops ));
nprops = (int)nprops;
/* rough estimate */
if ( nprops > size / PCF_PROPERTY_SIZE )
{
error = PCF_Err_Invalid_Table;
goto Bail;
}
face->nprops = (int)nprops;
if ( FT_NEW_ARRAY( props, nprops ) )
goto Bail;
for ( i = 0; i < nprops; i++ )
{
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
{
if ( FT_STREAM_READ_FIELDS( pcf_property_msb_header, props + i ) )
goto Bail;
}
else
{
if ( FT_STREAM_READ_FIELDS( pcf_property_header, props + i ) )
goto Bail;
}
}
/* pad the property array */
/* */
/* clever here - nprops is the same as the number of odd-units read, */
/* as only isStringProp are odd length (Keith Packard) */
/* */
if ( nprops & 3 )
{
i = 4 - ( nprops & 3 );
if ( FT_STREAM_SKIP( i ) )
{
error = PCF_Err_Invalid_Stream_Skip;
goto Bail;
}
}
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
(void)FT_READ_ULONG( string_size );
else
(void)FT_READ_ULONG_LE( string_size );
if ( error )
goto Bail;
FT_TRACE4(( " string_size = %ld\n", string_size ));
/* rough estimate */
if ( string_size > size - nprops * PCF_PROPERTY_SIZE )
{
error = PCF_Err_Invalid_Table;
goto Bail;
}
/* allocate one more byte so that we have a final null byte */
if ( FT_NEW_ARRAY( strings, string_size + 1 ) )
goto Bail;
error = FT_Stream_Read( stream, (FT_Byte*)strings, string_size );
if ( error )
goto Bail;
if ( FT_NEW_ARRAY( properties, nprops ) )
goto Bail;
face->properties = properties;
for ( i = 0; i < nprops; i++ )
{
FT_Long name_offset = props[i].name;
if ( ( name_offset < 0 ) ||
( (FT_ULong)name_offset > string_size ) )
{
error = PCF_Err_Invalid_Offset;
goto Bail;
}
if ( FT_STRDUP( properties[i].name, strings + name_offset ) )
goto Bail;
FT_TRACE4(( " %s:", properties[i].name ));
properties[i].isString = props[i].isString;
if ( props[i].isString )
{
FT_Long value_offset = props[i].value;
if ( ( value_offset < 0 ) ||
( (FT_ULong)value_offset > string_size ) )
{
error = PCF_Err_Invalid_Offset;
goto Bail;
}
if ( FT_STRDUP( properties[i].value.atom, strings + value_offset ) )
goto Bail;
FT_TRACE4(( " `%s'\n", properties[i].value.atom ));
}
else
{
properties[i].value.l = props[i].value;
FT_TRACE4(( " %d\n", properties[i].value.l ));
}
}
error = PCF_Err_Ok;
Bail:
FT_FREE( props );
FT_FREE( strings );
return error;
}
static FT_Error
pcf_get_metrics( FT_Stream stream,
PCF_Face face )
{
FT_Error error = PCF_Err_Ok;
FT_Memory memory = FT_FACE(face)->memory;
FT_ULong format, size;
PCF_Metric metrics = 0;
FT_ULong nmetrics, i;
error = pcf_seek_to_table_type( stream,
face->toc.tables,
face->toc.count,
PCF_METRICS,
&format,
&size );
if ( error )
return error;
if ( FT_READ_ULONG_LE( format ) )
goto Bail;
if ( !PCF_FORMAT_MATCH( format, PCF_DEFAULT_FORMAT ) &&
!PCF_FORMAT_MATCH( format, PCF_COMPRESSED_METRICS ) )
return PCF_Err_Invalid_File_Format;
if ( PCF_FORMAT_MATCH( format, PCF_DEFAULT_FORMAT ) )
{
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
(void)FT_READ_ULONG( nmetrics );
else
(void)FT_READ_ULONG_LE( nmetrics );
}
else
{
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
(void)FT_READ_USHORT( nmetrics );
else
(void)FT_READ_USHORT_LE( nmetrics );
}
if ( error )
return PCF_Err_Invalid_File_Format;
face->nmetrics = nmetrics;
if ( !nmetrics )
return PCF_Err_Invalid_Table;
FT_TRACE4(( "pcf_get_metrics:\n" ));
FT_TRACE4(( " number of metrics: %d\n", nmetrics ));
/* rough estimate */
if ( PCF_FORMAT_MATCH( format, PCF_DEFAULT_FORMAT ) )
{
if ( nmetrics > size / PCF_METRIC_SIZE )
return PCF_Err_Invalid_Table;
}
else
{
if ( nmetrics > size / PCF_COMPRESSED_METRIC_SIZE )
return PCF_Err_Invalid_Table;
}
if ( FT_NEW_ARRAY( face->metrics, nmetrics ) )
return PCF_Err_Out_Of_Memory;
metrics = face->metrics;
for ( i = 0; i < nmetrics; i++ )
{
error = pcf_get_metric( stream, format, metrics + i );
metrics[i].bits = 0;
FT_TRACE5(( " idx %d: width=%d, "
"lsb=%d, rsb=%d, ascent=%d, descent=%d, swidth=%d\n",
i,
( metrics + i )->characterWidth,
( metrics + i )->leftSideBearing,
( metrics + i )->rightSideBearing,
( metrics + i )->ascent,
( metrics + i )->descent,
( metrics + i )->attributes ));
if ( error )
break;
}
if ( error )
FT_FREE( face->metrics );
Bail:
return error;
}
static FT_Error
pcf_get_bitmaps( FT_Stream stream,
PCF_Face face )
{
FT_Error error = PCF_Err_Ok;
FT_Memory memory = FT_FACE(face)->memory;
FT_Long* offsets = NULL;
FT_Long bitmapSizes[GLYPHPADOPTIONS];
FT_ULong format, size;
FT_ULong nbitmaps, i, sizebitmaps = 0;
error = pcf_seek_to_table_type( stream,
face->toc.tables,
face->toc.count,
PCF_BITMAPS,
&format,
&size );
if ( error )
return error;
error = FT_Stream_EnterFrame( stream, 8 );
if ( error )
return error;
format = FT_GET_ULONG_LE();
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
nbitmaps = FT_GET_ULONG();
else
nbitmaps = FT_GET_ULONG_LE();
FT_Stream_ExitFrame( stream );
if ( !PCF_FORMAT_MATCH( format, PCF_DEFAULT_FORMAT ) )
return PCF_Err_Invalid_File_Format;
FT_TRACE4(( "pcf_get_bitmaps:\n" ));
FT_TRACE4(( " number of bitmaps: %d\n", nbitmaps ));
/* XXX: PCF_Face->nmetrics is singed FT_Long, see pcf.h */
if ( face->nmetrics < 0 || nbitmaps != ( FT_ULong )face->nmetrics )
return PCF_Err_Invalid_File_Format;
if ( FT_NEW_ARRAY( offsets, nbitmaps ) )
return error;
for ( i = 0; i < nbitmaps; i++ )
{
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
(void)FT_READ_LONG( offsets[i] );
else
(void)FT_READ_LONG_LE( offsets[i] );
FT_TRACE5(( " bitmap %d: offset %ld (0x%lX)\n",
i, offsets[i], offsets[i] ));
}
if ( error )
goto Bail;
for ( i = 0; i < GLYPHPADOPTIONS; i++ )
{
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
(void)FT_READ_LONG( bitmapSizes[i] );
else
(void)FT_READ_LONG_LE( bitmapSizes[i] );
if ( error )
goto Bail;
sizebitmaps = bitmapSizes[PCF_GLYPH_PAD_INDEX( format )];
FT_TRACE4(( " padding %d implies a size of %ld\n", i, bitmapSizes[i] ));
}
FT_TRACE4(( " %d bitmaps, padding index %ld\n",
nbitmaps,
PCF_GLYPH_PAD_INDEX( format ) ));
FT_TRACE4(( " bitmap size = %d\n", sizebitmaps ));
FT_UNUSED( sizebitmaps ); /* only used for debugging */
for ( i = 0; i < nbitmaps; i++ )
{
/* rough estimate */
if ( ( offsets[i] < 0 ) ||
( (FT_ULong)offsets[i] > size ) )
{
FT_TRACE0(( "pcf_get_bitmaps:"
" invalid offset to bitmap data of glyph %d\n", i ));
}
else
face->metrics[i].bits = stream->pos + offsets[i];
}
face->bitmapsFormat = format;
Bail:
FT_FREE( offsets );
return error;
}
static FT_Error
pcf_get_encodings( FT_Stream stream,
PCF_Face face )
{
FT_Error error = PCF_Err_Ok;
FT_Memory memory = FT_FACE(face)->memory;
FT_ULong format, size;
int firstCol, lastCol;
int firstRow, lastRow;
int nencoding, encodingOffset;
int i, j;
PCF_Encoding tmpEncoding = NULL, encoding = 0;
error = pcf_seek_to_table_type( stream,
face->toc.tables,
face->toc.count,
PCF_BDF_ENCODINGS,
&format,
&size );
if ( error )
return error;
error = FT_Stream_EnterFrame( stream, 14 );
if ( error )
return error;
format = FT_GET_ULONG_LE();
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
{
firstCol = FT_GET_SHORT();
lastCol = FT_GET_SHORT();
firstRow = FT_GET_SHORT();
lastRow = FT_GET_SHORT();
face->defaultChar = FT_GET_SHORT();
}
else
{
firstCol = FT_GET_SHORT_LE();
lastCol = FT_GET_SHORT_LE();
firstRow = FT_GET_SHORT_LE();
lastRow = FT_GET_SHORT_LE();
face->defaultChar = FT_GET_SHORT_LE();
}
FT_Stream_ExitFrame( stream );
if ( !PCF_FORMAT_MATCH( format, PCF_DEFAULT_FORMAT ) )
return PCF_Err_Invalid_File_Format;
FT_TRACE4(( "pdf_get_encodings:\n" ));
FT_TRACE4(( " firstCol %d, lastCol %d, firstRow %d, lastRow %d\n",
firstCol, lastCol, firstRow, lastRow ));
nencoding = ( lastCol - firstCol + 1 ) * ( lastRow - firstRow + 1 );
if ( FT_NEW_ARRAY( tmpEncoding, nencoding ) )
return PCF_Err_Out_Of_Memory;
error = FT_Stream_EnterFrame( stream, 2 * nencoding );
if ( error )
goto Bail;
for ( i = 0, j = 0 ; i < nencoding; i++ )
{
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
encodingOffset = FT_GET_SHORT();
else
encodingOffset = FT_GET_SHORT_LE();
if ( encodingOffset != -1 )
{
tmpEncoding[j].enc = ( ( ( i / ( lastCol - firstCol + 1 ) ) +
firstRow ) * 256 ) +
( ( i % ( lastCol - firstCol + 1 ) ) +
firstCol );
tmpEncoding[j].glyph = (FT_Short)encodingOffset;
FT_TRACE5(( " code %d (0x%04X): idx %d\n",
tmpEncoding[j].enc, tmpEncoding[j].enc,
tmpEncoding[j].glyph ));
j++;
}
}
FT_Stream_ExitFrame( stream );
if ( FT_NEW_ARRAY( encoding, j ) )
goto Bail;
for ( i = 0; i < j; i++ )
{
encoding[i].enc = tmpEncoding[i].enc;
encoding[i].glyph = tmpEncoding[i].glyph;
}
face->nencodings = j;
face->encodings = encoding;
FT_FREE( tmpEncoding );
return error;
Bail:
FT_FREE( encoding );
FT_FREE( tmpEncoding );
return error;
}
static
const FT_Frame_Field pcf_accel_header[] =
{
#undef FT_STRUCTURE
#define FT_STRUCTURE PCF_AccelRec
FT_FRAME_START( 20 ),
FT_FRAME_BYTE ( noOverlap ),
FT_FRAME_BYTE ( constantMetrics ),
FT_FRAME_BYTE ( terminalFont ),
FT_FRAME_BYTE ( constantWidth ),
FT_FRAME_BYTE ( inkInside ),
FT_FRAME_BYTE ( inkMetrics ),
FT_FRAME_BYTE ( drawDirection ),
FT_FRAME_SKIP_BYTES( 1 ),
FT_FRAME_LONG_LE ( fontAscent ),
FT_FRAME_LONG_LE ( fontDescent ),
FT_FRAME_LONG_LE ( maxOverlap ),
FT_FRAME_END
};
static
const FT_Frame_Field pcf_accel_msb_header[] =
{
#undef FT_STRUCTURE
#define FT_STRUCTURE PCF_AccelRec
FT_FRAME_START( 20 ),
FT_FRAME_BYTE ( noOverlap ),
FT_FRAME_BYTE ( constantMetrics ),
FT_FRAME_BYTE ( terminalFont ),
FT_FRAME_BYTE ( constantWidth ),
FT_FRAME_BYTE ( inkInside ),
FT_FRAME_BYTE ( inkMetrics ),
FT_FRAME_BYTE ( drawDirection ),
FT_FRAME_SKIP_BYTES( 1 ),
FT_FRAME_LONG ( fontAscent ),
FT_FRAME_LONG ( fontDescent ),
FT_FRAME_LONG ( maxOverlap ),
FT_FRAME_END
};
static FT_Error
pcf_get_accel( FT_Stream stream,
PCF_Face face,
FT_ULong type )
{
FT_ULong format, size;
FT_Error error = PCF_Err_Ok;
PCF_Accel accel = &face->accel;
error = pcf_seek_to_table_type( stream,
face->toc.tables,
face->toc.count,
type,
&format,
&size );
if ( error )
goto Bail;
if ( FT_READ_ULONG_LE( format ) )
goto Bail;
if ( !PCF_FORMAT_MATCH( format, PCF_DEFAULT_FORMAT ) &&
!PCF_FORMAT_MATCH( format, PCF_ACCEL_W_INKBOUNDS ) )
goto Bail;
if ( PCF_BYTE_ORDER( format ) == MSBFirst )
{
if ( FT_STREAM_READ_FIELDS( pcf_accel_msb_header, accel ) )
goto Bail;
}
else
{
if ( FT_STREAM_READ_FIELDS( pcf_accel_header, accel ) )
goto Bail;
}
error = pcf_get_metric( stream,
format & ( ~PCF_FORMAT_MASK ),
&(accel->minbounds) );
if ( error )
goto Bail;
error = pcf_get_metric( stream,
format & ( ~PCF_FORMAT_MASK ),
&(accel->maxbounds) );
if ( error )
goto Bail;
if ( PCF_FORMAT_MATCH( format, PCF_ACCEL_W_INKBOUNDS ) )
{
error = pcf_get_metric( stream,
format & ( ~PCF_FORMAT_MASK ),
&(accel->ink_minbounds) );
if ( error )
goto Bail;
error = pcf_get_metric( stream,
format & ( ~PCF_FORMAT_MASK ),
&(accel->ink_maxbounds) );
if ( error )
goto Bail;
}
else
{
accel->ink_minbounds = accel->minbounds; /* I'm not sure about this */
accel->ink_maxbounds = accel->maxbounds;
}
Bail:
return error;
}
static FT_Error
pcf_interpret_style( PCF_Face pcf )
{
FT_Error error = PCF_Err_Ok;
FT_Face face = FT_FACE( pcf );
FT_Memory memory = face->memory;
PCF_Property prop;
size_t nn, len;
char* strings[4] = { NULL, NULL, NULL, NULL };
size_t lengths[4];
face->style_flags = 0;
prop = pcf_find_property( pcf, "SLANT" );
if ( prop && prop->isString &&
( *(prop->value.atom) == 'O' || *(prop->value.atom) == 'o' ||
*(prop->value.atom) == 'I' || *(prop->value.atom) == 'i' ) )
{
face->style_flags |= FT_STYLE_FLAG_ITALIC;
strings[2] = ( *(prop->value.atom) == 'O' ||
*(prop->value.atom) == 'o' ) ? (char *)"Oblique"
: (char *)"Italic";
}
prop = pcf_find_property( pcf, "WEIGHT_NAME" );
if ( prop && prop->isString &&
( *(prop->value.atom) == 'B' || *(prop->value.atom) == 'b' ) )
{
face->style_flags |= FT_STYLE_FLAG_BOLD;
strings[1] = (char *)"Bold";
}
prop = pcf_find_property( pcf, "SETWIDTH_NAME" );
if ( prop && prop->isString &&
*(prop->value.atom) &&
!( *(prop->value.atom) == 'N' || *(prop->value.atom) == 'n' ) )
strings[3] = (char *)(prop->value.atom);
prop = pcf_find_property( pcf, "ADD_STYLE_NAME" );
if ( prop && prop->isString &&
*(prop->value.atom) &&
!( *(prop->value.atom) == 'N' || *(prop->value.atom) == 'n' ) )
strings[0] = (char *)(prop->value.atom);
for ( len = 0, nn = 0; nn < 4; nn++ )
{
lengths[nn] = 0;
if ( strings[nn] )
{
lengths[nn] = ft_strlen( strings[nn] );
len += lengths[nn] + 1;
}
}
if ( len == 0 )
{
strings[0] = (char *)"Regular";
lengths[0] = ft_strlen( strings[0] );
len = lengths[0] + 1;
}
{
char* s;
if ( FT_ALLOC( face->style_name, len ) )
return error;
s = face->style_name;
for ( nn = 0; nn < 4; nn++ )
{
char* src = strings[nn];
len = lengths[nn];
if ( src == NULL )
continue;
/* separate elements with a space */
if ( s != face->style_name )
*s++ = ' ';
ft_memcpy( s, src, len );
/* need to convert spaces to dashes for */
/* add_style_name and setwidth_name */
if ( nn == 0 || nn == 3 )
{
size_t mm;
for ( mm = 0; mm < len; mm++ )
if (s[mm] == ' ')
s[mm] = '-';
}
s += len;
}
*s = 0;
}
return error;
}
FT_LOCAL_DEF( FT_Error )
pcf_load_font( FT_Stream stream,
PCF_Face face )
{
FT_Error error = PCF_Err_Ok;
FT_Memory memory = FT_FACE(face)->memory;
FT_Bool hasBDFAccelerators;
error = pcf_read_TOC( stream, face );
if ( error )
goto Exit;
error = pcf_get_properties( stream, face );
if ( error )
goto Exit;
/* Use the old accelerators if no BDF accelerators are in the file. */
hasBDFAccelerators = pcf_has_table_type( face->toc.tables,
face->toc.count,
PCF_BDF_ACCELERATORS );
if ( !hasBDFAccelerators )
{
error = pcf_get_accel( stream, face, PCF_ACCELERATORS );
if ( error )
goto Exit;
}
/* metrics */
error = pcf_get_metrics( stream, face );
if ( error )
goto Exit;
/* bitmaps */
error = pcf_get_bitmaps( stream, face );
if ( error )
goto Exit;
/* encodings */
error = pcf_get_encodings( stream, face );
if ( error )
goto Exit;
/* BDF style accelerators (i.e. bounds based on encoded glyphs) */
if ( hasBDFAccelerators )
{
error = pcf_get_accel( stream, face, PCF_BDF_ACCELERATORS );
if ( error )
goto Exit;
}
/* XXX: TO DO: inkmetrics and glyph_names are missing */
/* now construct the face object */
{
FT_Face root = FT_FACE( face );
PCF_Property prop;
root->num_faces = 1;
root->face_index = 0;
root->face_flags = FT_FACE_FLAG_FIXED_SIZES |
FT_FACE_FLAG_HORIZONTAL |
FT_FACE_FLAG_FAST_GLYPHS;
if ( face->accel.constantWidth )
root->face_flags |= FT_FACE_FLAG_FIXED_WIDTH;
if ( ( error = pcf_interpret_style( face ) ) != 0 )
goto Exit;
prop = pcf_find_property( face, "FAMILY_NAME" );
if ( prop && prop->isString )
{
if ( FT_STRDUP( root->family_name, prop->value.atom ) )
goto Exit;
}
else
root->family_name = NULL;
/*
* Note: We shift all glyph indices by +1 since we must
* respect the convention that glyph 0 always corresponds
* to the `missing glyph'.
*
* This implies bumping the number of `available' glyphs by 1.
*/
root->num_glyphs = face->nmetrics + 1;
root->num_fixed_sizes = 1;
if ( FT_NEW_ARRAY( root->available_sizes, 1 ) )
goto Exit;
{
FT_Bitmap_Size* bsize = root->available_sizes;
FT_Short resolution_x = 0, resolution_y = 0;
FT_MEM_ZERO( bsize, sizeof ( FT_Bitmap_Size ) );
#if 0
bsize->height = face->accel.maxbounds.ascent << 6;
#endif
bsize->height = (FT_Short)( face->accel.fontAscent +
face->accel.fontDescent );
prop = pcf_find_property( face, "AVERAGE_WIDTH" );
if ( prop )
bsize->width = (FT_Short)( ( prop->value.l + 5 ) / 10 );
else
bsize->width = (FT_Short)( bsize->height * 2/3 );
prop = pcf_find_property( face, "POINT_SIZE" );
if ( prop )
/* convert from 722.7 decipoints to 72 points per inch */
bsize->size =
(FT_Pos)( ( prop->value.l * 64 * 7200 + 36135L ) / 72270L );
prop = pcf_find_property( face, "PIXEL_SIZE" );
if ( prop )
bsize->y_ppem = (FT_Short)prop->value.l << 6;
prop = pcf_find_property( face, "RESOLUTION_X" );
if ( prop )
resolution_x = (FT_Short)prop->value.l;
prop = pcf_find_property( face, "RESOLUTION_Y" );
if ( prop )
resolution_y = (FT_Short)prop->value.l;
if ( bsize->y_ppem == 0 )
{
bsize->y_ppem = bsize->size;
if ( resolution_y )
bsize->y_ppem = bsize->y_ppem * resolution_y / 72;
}
if ( resolution_x && resolution_y )
bsize->x_ppem = bsize->y_ppem * resolution_x / resolution_y;
else
bsize->x_ppem = bsize->y_ppem;
}
/* set up charset */
{
PCF_Property charset_registry = 0, charset_encoding = 0;
charset_registry = pcf_find_property( face, "CHARSET_REGISTRY" );
charset_encoding = pcf_find_property( face, "CHARSET_ENCODING" );
if ( charset_registry && charset_registry->isString &&
charset_encoding && charset_encoding->isString )
{
if ( FT_STRDUP( face->charset_encoding,
charset_encoding->value.atom ) ||
FT_STRDUP( face->charset_registry,
charset_registry->value.atom ) )
goto Exit;
}
}
}
Exit:
if ( error )
{
/* This is done to respect the behaviour of the original */
/* PCF font driver. */
error = PCF_Err_Invalid_File_Format;
}
return error;
}
/* END */
| {
"pile_set_name": "Github"
} |
#!/bin/bash
echo "abi test not implemented"
| {
"pile_set_name": "Github"
} |
#include "heap.h"
#include "pagemap.h"
#include "../ds/fun.h"
#include "ponyassert.h"
#include <string.h>
#include <platform.h>
#include <dtrace.h>
typedef struct chunk_t
{
// immutable
pony_actor_t* actor;
char* m;
size_t size;
// mutable
uint32_t slots;
uint32_t shallow;
uint32_t finalisers;
struct chunk_t* next;
} chunk_t;
typedef char block_t[POOL_ALIGN];
typedef void (*chunk_fn)(chunk_t* chunk, uint32_t mark);
#define SIZECLASS_SIZE(sizeclass) (HEAP_MIN << (sizeclass))
#define SIZECLASS_MASK(sizeclass) (~(SIZECLASS_SIZE(sizeclass) - 1))
#define EXTERNAL_PTR(p, sizeclass) \
((void*)((uintptr_t)p & SIZECLASS_MASK(sizeclass)))
#define FIND_SLOT(ext, base) \
(1 << ((uintptr_t)((char*)(ext) - (char*)(base)) >> HEAP_MINBITS))
static const uint32_t sizeclass_empty[HEAP_SIZECLASSES] =
{
0xFFFFFFFF,
0x55555555,
0x11111111,
0x01010101,
0x00010001
};
static const uint32_t sizeclass_init[HEAP_SIZECLASSES] =
{
0xFFFFFFFE,
0x55555554,
0x11111110,
0x01010100,
0x00010000
};
static const uint8_t sizeclass_table[HEAP_MAX / HEAP_MIN] =
{
0, 1, 2, 2, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4
};
static size_t heap_initialgc = 1 << 14;
static double heap_nextgc_factor = 2.0;
#ifdef USE_MEMTRACK
/** Get the memory used by the heap.
*/
size_t ponyint_heap_mem_size(heap_t* heap)
{
// include memory that is in use by the heap but not counted as part of
// `used` like `chunk_t`. also, don't include "fake used" for purposes of
// triggering GC.
return heap->mem_used;
}
/** Get the memory allocated by the heap.
*/
size_t ponyint_heap_alloc_size(heap_t* heap)
{
return heap->mem_allocated;
}
#endif
static void large_pagemap(char* m, size_t size, chunk_t* chunk)
{
ponyint_pagemap_set_bulk(m, chunk, size);
}
static void clear_chunk(chunk_t* chunk, uint32_t mark)
{
chunk->slots = mark;
chunk->shallow = mark;
}
static void final_small(chunk_t* chunk, uint32_t mark)
{
// run any finalisers that need to be run
void* p = NULL;
uint32_t finalisers = chunk->finalisers;
uint64_t bit = 0;
// if there's a finaliser to run for a used slot
while(finalisers != 0)
{
bit = __pony_ctz(finalisers);
p = chunk->m + (bit << HEAP_MINBITS);
// run finaliser
pony_assert((*(pony_type_t**)p)->final != NULL);
(*(pony_type_t**)p)->final(p);
// clear finaliser in chunk
chunk->finalisers &= ~((uint32_t)1 << bit);
// clear bit just found in our local finaliser map
finalisers &= (finalisers - 1);
}
(void)mark;
}
static void final_small_freed(chunk_t* chunk)
{
// run any finalisers that need to be run for any newly freed slots
void* p = NULL;
uint32_t finalisers = chunk->finalisers & chunk->slots;
chunk->finalisers = chunk->finalisers & ~chunk->slots;
uint64_t bit = 0;
// if there's a finaliser to run for a used slot
while(finalisers != 0)
{
bit = __pony_ctz(finalisers);
p = chunk->m + (bit << HEAP_MINBITS);
// run finaliser
pony_assert((*(pony_type_t**)p)->final != NULL);
(*(pony_type_t**)p)->final(p);
// clear bit just found in our local finaliser map
finalisers &= (finalisers - 1);
}
}
static void final_large(chunk_t* chunk, uint32_t mark)
{
if(chunk->finalisers == 1)
{
// run finaliser
pony_assert((*(pony_type_t**)chunk->m)->final != NULL);
(*(pony_type_t**)chunk->m)->final(chunk->m);
chunk->finalisers = 0;
}
(void)mark;
}
static void destroy_small(chunk_t* chunk, uint32_t mark)
{
(void)mark;
// run any finalisers that need running
final_small(chunk, mark);
ponyint_pagemap_set(chunk->m, NULL);
POOL_FREE(block_t, chunk->m);
POOL_FREE(chunk_t, chunk);
}
static void destroy_large(chunk_t* chunk, uint32_t mark)
{
(void)mark;
// run any finalisers that need running
final_large(chunk, mark);
large_pagemap(chunk->m, chunk->size, NULL);
if(chunk->m != NULL)
ponyint_pool_free_size(chunk->size, chunk->m);
POOL_FREE(chunk_t, chunk);
}
static size_t sweep_small(chunk_t* chunk, chunk_t** avail, chunk_t** full,
#ifdef USE_MEMTRACK
uint32_t empty, size_t size, size_t* mem_allocated, size_t* mem_used)
#else
uint32_t empty, size_t size)
#endif
{
size_t used = 0;
chunk_t* next;
while(chunk != NULL)
{
next = chunk->next;
chunk->slots &= chunk->shallow;
if(chunk->slots == 0)
{
#ifdef USE_MEMTRACK
*mem_allocated += POOL_ALLOC_SIZE(chunk_t);
*mem_allocated += POOL_ALLOC_SIZE(block_t);
*mem_used += sizeof(chunk_t);
*mem_used += sizeof(block_t);
#endif
used += sizeof(block_t);
chunk->next = *full;
*full = chunk;
} else if(chunk->slots == empty) {
destroy_small(chunk, 0);
} else {
#ifdef USE_MEMTRACK
*mem_allocated += POOL_ALLOC_SIZE(chunk_t);
*mem_allocated += POOL_ALLOC_SIZE(block_t);
*mem_used += sizeof(chunk_t);
*mem_used += sizeof(block_t);
#endif
used += (sizeof(block_t) -
(__pony_popcount(chunk->slots) * size));
// run finalisers for freed slots
final_small_freed(chunk);
// make chunk available for allocations only after finalisers have been
// run to prevent premature reuse of memory slots by an allocation
// required for finaliser execution
chunk->next = *avail;
*avail = chunk;
}
chunk = next;
}
return used;
}
#ifdef USE_MEMTRACK
static chunk_t* sweep_large(chunk_t* chunk, size_t* used, size_t* mem_allocated,
size_t* mem_used)
#else
static chunk_t* sweep_large(chunk_t* chunk, size_t* used)
#endif
{
chunk_t* list = NULL;
chunk_t* next;
while(chunk != NULL)
{
next = chunk->next;
chunk->slots &= chunk->shallow;
if(chunk->slots == 0)
{
chunk->next = list;
list = chunk;
#ifdef USE_MEMTRACK
*mem_allocated += POOL_ALLOC_SIZE(chunk_t);
*mem_allocated += ponyint_pool_used_size(chunk->size);
*mem_used += sizeof(chunk_t);
*mem_used += chunk->size;
#endif
*used += chunk->size;
} else {
destroy_large(chunk, 0);
}
chunk = next;
}
return list;
}
static void chunk_list(chunk_fn f, chunk_t* current, uint32_t mark)
{
chunk_t* next;
while(current != NULL)
{
next = current->next;
f(current, mark);
current = next;
}
}
uint32_t ponyint_heap_index(size_t size)
{
// size is in range 1..HEAP_MAX
// change to 0..((HEAP_MAX / HEAP_MIN) - 1) and look up in table
return sizeclass_table[(size - 1) >> HEAP_MINBITS];
}
void ponyint_heap_setinitialgc(size_t size)
{
heap_initialgc = (size_t)1 << size;
}
void ponyint_heap_setnextgcfactor(double factor)
{
if(factor < 1.0)
factor = 1.0;
DTRACE1(GC_THRESHOLD, factor);
heap_nextgc_factor = factor;
}
void ponyint_heap_init(heap_t* heap)
{
memset(heap, 0, sizeof(heap_t));
heap->next_gc = heap_initialgc;
}
void ponyint_heap_destroy(heap_t* heap)
{
chunk_list(destroy_large, heap->large, 0);
for(int i = 0; i < HEAP_SIZECLASSES; i++)
{
chunk_list(destroy_small, heap->small_free[i], 0);
chunk_list(destroy_small, heap->small_full[i], 0);
}
}
void ponyint_heap_final(heap_t* heap)
{
chunk_list(final_large, heap->large, 0);
for(int i = 0; i < HEAP_SIZECLASSES; i++)
{
chunk_list(final_small, heap->small_free[i], 0);
chunk_list(final_small, heap->small_full[i], 0);
}
}
void* ponyint_heap_alloc(pony_actor_t* actor, heap_t* heap, size_t size)
{
if(size == 0)
{
return NULL;
} else if(size <= HEAP_MAX) {
return ponyint_heap_alloc_small(actor, heap, ponyint_heap_index(size));
} else {
return ponyint_heap_alloc_large(actor, heap, size);
}
}
void* ponyint_heap_alloc_final(pony_actor_t* actor, heap_t* heap, size_t size)
{
if(size == 0)
{
return NULL;
} else if(size <= HEAP_MAX) {
return ponyint_heap_alloc_small_final(actor, heap,
ponyint_heap_index(size));
} else {
return ponyint_heap_alloc_large_final(actor, heap, size);
}
}
void* ponyint_heap_alloc_small(pony_actor_t* actor, heap_t* heap,
uint32_t sizeclass)
{
chunk_t* chunk = heap->small_free[sizeclass];
void* m;
// If there are none in this size class, get a new one.
if(chunk != NULL)
{
// Clear and use the first available slot.
uint32_t slots = chunk->slots;
uint32_t bit = __pony_ctz(slots);
slots &= ~(1 << bit);
m = chunk->m + (bit << HEAP_MINBITS);
chunk->slots = slots;
if(slots == 0)
{
heap->small_free[sizeclass] = chunk->next;
chunk->next = heap->small_full[sizeclass];
heap->small_full[sizeclass] = chunk;
}
} else {
chunk_t* n = (chunk_t*) POOL_ALLOC(chunk_t);
n->actor = actor;
n->m = (char*) POOL_ALLOC(block_t);
n->size = sizeclass;
#ifdef USE_MEMTRACK
heap->mem_used += sizeof(chunk_t);
heap->mem_used += POOL_ALLOC_SIZE(block_t);
heap->mem_used -= SIZECLASS_SIZE(sizeclass);
heap->mem_allocated += POOL_ALLOC_SIZE(chunk_t);
heap->mem_allocated += POOL_ALLOC_SIZE(block_t);
#endif
// note that no finaliser needs to run
n->finalisers = 0;
// Clear the first bit.
n->shallow = n->slots = sizeclass_init[sizeclass];
n->next = NULL;
ponyint_pagemap_set(n->m, n);
heap->small_free[sizeclass] = n;
chunk = n;
// Use the first slot.
m = chunk->m;
}
#ifdef USE_MEMTRACK
heap->mem_used += SIZECLASS_SIZE(sizeclass);
#endif
heap->used += SIZECLASS_SIZE(sizeclass);
return m;
}
void* ponyint_heap_alloc_small_final(pony_actor_t* actor, heap_t* heap,
uint32_t sizeclass)
{
chunk_t* chunk = heap->small_free[sizeclass];
void* m;
// If there are none in this size class, get a new one.
if(chunk != NULL)
{
// Clear and use the first available slot.
uint32_t slots = chunk->slots;
uint32_t bit = __pony_ctz(slots);
slots &= ~((uint32_t)1 << bit);
m = chunk->m + (bit << HEAP_MINBITS);
chunk->slots = slots;
// note that a finaliser needs to run
chunk->finalisers |= ((uint32_t)1 << bit);
if(slots == 0)
{
heap->small_free[sizeclass] = chunk->next;
chunk->next = heap->small_full[sizeclass];
heap->small_full[sizeclass] = chunk;
}
} else {
chunk_t* n = (chunk_t*) POOL_ALLOC(chunk_t);
n->actor = actor;
n->m = (char*) POOL_ALLOC(block_t);
n->size = sizeclass;
#ifdef USE_MEMTRACK
heap->mem_used += sizeof(chunk_t);
heap->mem_used += POOL_ALLOC_SIZE(block_t);
heap->mem_used -= SIZECLASS_SIZE(sizeclass);
heap->mem_allocated += POOL_ALLOC_SIZE(chunk_t);
heap->mem_allocated += POOL_ALLOC_SIZE(block_t);
#endif
// note that a finaliser needs to run
n->finalisers = 1;
// Clear the first bit.
n->shallow = n->slots = sizeclass_init[sizeclass];
n->next = NULL;
ponyint_pagemap_set(n->m, n);
heap->small_free[sizeclass] = n;
chunk = n;
// Use the first slot.
m = chunk->m;
}
#ifdef USE_MEMTRACK
heap->mem_used += SIZECLASS_SIZE(sizeclass);
#endif
heap->used += SIZECLASS_SIZE(sizeclass);
return m;
}
void* ponyint_heap_alloc_large(pony_actor_t* actor, heap_t* heap, size_t size)
{
size = ponyint_pool_adjust_size(size);
chunk_t* chunk = (chunk_t*) POOL_ALLOC(chunk_t);
chunk->actor = actor;
chunk->size = size;
chunk->m = (char*) ponyint_pool_alloc_size(size);
#ifdef USE_MEMTRACK
heap->mem_used += sizeof(chunk_t);
heap->mem_used += chunk->size;
heap->mem_allocated += POOL_ALLOC_SIZE(chunk_t);
heap->mem_allocated += ponyint_pool_used_size(size);
#endif
chunk->slots = 0;
chunk->shallow = 0;
// note that no finaliser needs to run
chunk->finalisers = 0;
large_pagemap(chunk->m, size, chunk);
chunk->next = heap->large;
heap->large = chunk;
heap->used += chunk->size;
return chunk->m;
}
void* ponyint_heap_alloc_large_final(pony_actor_t* actor, heap_t* heap,
size_t size)
{
size = ponyint_pool_adjust_size(size);
chunk_t* chunk = (chunk_t*) POOL_ALLOC(chunk_t);
chunk->actor = actor;
chunk->size = size;
chunk->m = (char*) ponyint_pool_alloc_size(size);
#ifdef USE_MEMTRACK
heap->mem_used += sizeof(chunk_t);
heap->mem_used += chunk->size;
heap->mem_allocated += POOL_ALLOC_SIZE(chunk_t);
heap->mem_allocated += ponyint_pool_used_size(size);
#endif
chunk->slots = 0;
chunk->shallow = 0;
// note that a finaliser needs to run
chunk->finalisers = 1;
large_pagemap(chunk->m, size, chunk);
chunk->next = heap->large;
heap->large = chunk;
heap->used += chunk->size;
return chunk->m;
}
void* ponyint_heap_realloc(pony_actor_t* actor, heap_t* heap, void* p,
size_t size)
{
if(p == NULL)
return ponyint_heap_alloc(actor, heap, size);
chunk_t* chunk = ponyint_pagemap_get(p);
// We can't realloc memory that wasn't pony_alloc'ed since we can't know how
// much to copy from the previous location.
pony_assert(chunk != NULL);
size_t oldsize;
if(chunk->size < HEAP_SIZECLASSES)
{
// Previous allocation was a ponyint_heap_alloc_small.
void* ext = EXTERNAL_PTR(p, chunk->size);
// If the new allocation is a ponyint_heap_alloc_small and the pointer is
// not an internal pointer, we may be able to reuse this memory. If it is
// an internal pointer, we know where the old allocation begins but not
// where it ends, so we cannot reuse this memory.
if((size <= HEAP_MAX) && (p == ext))
{
uint32_t sizeclass = ponyint_heap_index(size);
// If the new allocation is the same size or smaller, return the old
// one.
if(sizeclass <= chunk->size)
return p;
}
oldsize = SIZECLASS_SIZE(chunk->size) - ((uintptr_t)p - (uintptr_t)ext);
} else {
// Previous allocation was a ponyint_heap_alloc_large.
if((size <= chunk->size) && (p == chunk->m))
{
// If the new allocation is the same size or smaller, and this is not an
// internal pointer, return the old one. We can't reuse internal
// pointers in large allocs for the same reason as small ones.
return p;
}
oldsize = chunk->size - ((uintptr_t)p - (uintptr_t)chunk->m);
}
// Determine how much memory to copy.
if(oldsize > size)
oldsize = size;
// Get new memory and copy from the old memory.
void* q = ponyint_heap_alloc(actor, heap, size);
memcpy(q, p, oldsize);
return q;
}
void ponyint_heap_used(heap_t* heap, size_t size)
{
heap->used += size;
}
bool ponyint_heap_startgc(heap_t* heap)
{
if(heap->used <= heap->next_gc)
return false;
for(int i = 0; i < HEAP_SIZECLASSES; i++)
{
uint32_t mark = sizeclass_empty[i];
chunk_list(clear_chunk, heap->small_free[i], mark);
chunk_list(clear_chunk, heap->small_full[i], mark);
}
chunk_list(clear_chunk, heap->large, 1);
// reset used to zero
heap->used = 0;
#ifdef USE_MEMTRACK
heap->mem_allocated = 0;
heap->mem_used = 0;
#endif
return true;
}
bool ponyint_heap_mark(chunk_t* chunk, void* p)
{
// If it's an internal pointer, we shallow mark it instead. This will
// preserve the external pointer, but allow us to mark and recurse the
// external pointer in the same pass.
bool marked;
if(chunk->size >= HEAP_SIZECLASSES)
{
marked = chunk->slots == 0;
if(p == chunk->m)
chunk->slots = 0;
else
chunk->shallow = 0;
} else {
// Calculate the external pointer.
void* ext = EXTERNAL_PTR(p, chunk->size);
// Shift to account for smallest allocation size.
uint32_t slot = FIND_SLOT(ext, chunk->m);
// Check if it was already marked.
marked = (chunk->slots & slot) == 0;
// A clear bit is in-use, a set bit is available.
if(p == ext)
chunk->slots &= ~slot;
else
chunk->shallow &= ~slot;
}
return marked;
}
void ponyint_heap_mark_shallow(chunk_t* chunk, void* p)
{
if(chunk->size >= HEAP_SIZECLASSES)
{
chunk->shallow = 0;
} else {
// Calculate the external pointer.
void* ext = EXTERNAL_PTR(p, chunk->size);
// Shift to account for smallest allocation size.
uint32_t slot = FIND_SLOT(ext, chunk->m);
// A clear bit is in-use, a set bit is available.
chunk->shallow &= ~slot;
}
}
bool ponyint_heap_ismarked(chunk_t* chunk, void* p)
{
if(chunk->size >= HEAP_SIZECLASSES)
return (chunk->slots & chunk->shallow) == 0;
// Shift to account for smallest allocation size.
uint32_t slot = FIND_SLOT(p, chunk->m);
// Check if the slot is marked or shallow marked.
return (chunk->slots & chunk->shallow & slot) == 0;
}
void ponyint_heap_free(chunk_t* chunk, void* p)
{
if(chunk->size >= HEAP_SIZECLASSES)
{
if(p == chunk->m)
{
// run finaliser if needed
final_large(chunk, 0);
ponyint_pool_free_size(chunk->size, chunk->m);
chunk->m = NULL;
chunk->slots = 1;
}
return;
}
// Calculate the external pointer.
void* ext = EXTERNAL_PTR(p, chunk->size);
if(p == ext)
{
// Shift to account for smallest allocation size.
uint32_t slot = FIND_SLOT(ext, chunk->m);
// check if there's a finaliser to run
if((chunk->finalisers & slot) != 0)
{
// run finaliser
(*(pony_type_t**)p)->final(p);
// clear finaliser
chunk->finalisers &= ~slot;
}
// free slot
chunk->slots |= slot;
}
}
void ponyint_heap_endgc(heap_t* heap)
{
size_t used = 0;
#ifdef USE_MEMTRACK
size_t mem_allocated = 0;
size_t mem_used = 0;
#endif
for(int i = 0; i < HEAP_SIZECLASSES; i++)
{
chunk_t* list1 = heap->small_free[i];
chunk_t* list2 = heap->small_full[i];
heap->small_free[i] = NULL;
heap->small_full[i] = NULL;
chunk_t** avail = &heap->small_free[i];
chunk_t** full = &heap->small_full[i];
size_t size = SIZECLASS_SIZE(i);
uint32_t empty = sizeclass_empty[i];
#ifdef USE_MEMTRACK
used += sweep_small(list1, avail, full, empty, size,
&mem_allocated, &mem_used);
used += sweep_small(list2, avail, full, empty, size,
&mem_allocated, &mem_used);
#else
used += sweep_small(list1, avail, full, empty, size);
used += sweep_small(list2, avail, full, empty, size);
#endif
}
#ifdef USE_MEMTRACK
heap->large = sweep_large(heap->large, &used, &mem_allocated, &mem_used);
#else
heap->large = sweep_large(heap->large, &used);
#endif
// Foreign object sizes will have been added to heap->used already. Here we
// add local object sizes as well and set the next gc point for when memory
// usage has increased.
heap->used += used;
#ifdef USE_MEMTRACK
heap->mem_allocated += mem_allocated;
heap->mem_used += mem_used;
#endif
heap->next_gc = (size_t)((double)heap->used * heap_nextgc_factor);
if(heap->next_gc < heap_initialgc)
heap->next_gc = heap_initialgc;
}
pony_actor_t* ponyint_heap_owner(chunk_t* chunk)
{
// FIX: false sharing
// reading from something that will never be written
// but is on a cache line that will often be written
// called during tracing
// actual chunk only needed for GC tracing
// all other tracing only needs the owner
// so the owner needs the chunk and everyone else just needs the owner
return chunk->actor;
}
size_t ponyint_heap_size(chunk_t* chunk)
{
if(chunk->size >= HEAP_SIZECLASSES)
return chunk->size;
return SIZECLASS_SIZE(chunk->size);
}
// C99 requires inline symbols to be present in a compilation unit for un-
// optimized code
uint32_t __pony_clzzu(size_t x);
uint32_t __pony_ffszu(size_t x);
uint32_t __pony_ctz(uint32_t x);
uint32_t __pony_ffsll(uint64_t x);
uint32_t __pony_clzll(uint64_t x);
uint32_t __pony_popcount(uint32_t x);
| {
"pile_set_name": "Github"
} |
/*!
* @license
* Copyright 2019 Alfresco, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Injectable } from '@angular/core';
import { ProjectApi } from '../../api/project-api.interface';
import { Observable } from 'rxjs';
import { Project, PROJECT, Release, Pagination, ReleaseEntry, ServerSideSorting, SearchQuery, CollaboratorEntry, FetchQueries } from '../../api/types';
import { map } from 'rxjs/operators';
import { RequestApiHelper } from './request-api.helper';
import { ValidationErrors } from '../../interfaces/validation-errors.interface';
import { PaginatedEntries } from '@alfresco/js-api';
import { IdentityUserModel } from '@alfresco/adf-core';
export interface BackendProject {
id: string;
name: string;
description: string;
creationDate: Date;
createdBy: string;
lastModifiedDate: Date;
lastModifiedBy: string;
version: string;
}
@Injectable()
export class ACMProjectApi implements ProjectApi {
constructor(
private requestApiHelper: RequestApiHelper
) {}
public get(projectId: string): Observable<Project> {
return this.requestApiHelper
.get(`/modeling-service/v1/projects/${projectId}`)
.pipe(
map((response: any) => response.entry),
map(this.createProject.bind(this))
);
}
public validate(projectId: string): Observable<void | ValidationErrors> {
return this.requestApiHelper.get(`/modeling-service/v1/projects/${projectId}/validate`, { responseType: 'blob' });
}
public create(project: Partial<Project>): Observable<Project> {
return this.requestApiHelper
.post('/modeling-service/v1/projects/', { bodyParam: project })
.pipe(
map((response: any) => response.entry),
map(this.createProject.bind(this))
);
}
public update(projectId: string, project: Partial<Project>): Observable<Project> {
return this.requestApiHelper
.put(`/modeling-service/v1/projects/${projectId}`, { bodyParam: project })
.pipe(
map((response: any) => response.entry),
map(this.createProject.bind(this))
);
}
public delete(projectId: string): Observable<void> {
return this.requestApiHelper.delete(`/modeling-service/v1/projects/${projectId}`);
}
public import(file: File, name?: string): Observable<Partial<Project>> {
const postData: any = { formParams: { 'file': file }, contentTypes: ['multipart/form-data'] };
if (name) {
postData.formParams.name = name;
}
return this.requestApiHelper
.post(`/modeling-service/v1/projects/import`, postData)
.pipe(
map((response: any) => response.entry),
map(this.createProject.bind(this))
);
}
public export(projectId: string): Observable<Blob> {
return this.requestApiHelper.get(
`/modeling-service/v1/projects/${projectId}/export`,
{ queryParams: { 'attachment': false }, responseType: 'blob' }
);
}
public getAll(
fetchQueries: FetchQueries = {},
sorting: ServerSideSorting = { key: 'name', direction: 'asc' },
search: SearchQuery = { key: 'name', value: '' }
): Observable<PaginatedEntries<Project>> {
const queryParams = {
...fetchQueries,
sort: `${sorting.key},${sorting.direction}`,
[search.key]: search.value
};
return this.requestApiHelper
.get('/modeling-service/v1/projects', { queryParams })
.pipe(
map((nodePaging: any) => {
return {
pagination: nodePaging.list.pagination,
entries: nodePaging.list.entries.map(entry => this.createProject(entry.entry))
};
})
);
}
private createProject(backendProject: BackendProject): Project {
const type = PROJECT;
return {
type,
...backendProject
};
}
public release(projectId: string): Observable<Release> {
return this.requestApiHelper
.post(`/modeling-service/v1/projects/${projectId}/releases`)
.pipe(
map((response: any) => response.entry)
);
}
public getProjectReleases(
projectId: string,
pagination: Partial<Pagination> = {},
sorting: ServerSideSorting = { key: 'creationDate', direction: 'desc' },
showAllVersions: boolean = true
): Observable<PaginatedEntries<ReleaseEntry>> {
const queryParams = {
showAllVersions: showAllVersions,
...pagination,
sort: `${sorting.key},${sorting.direction}`
};
return this.requestApiHelper
.get(`/modeling-service/v1/projects/${projectId}/releases`, { queryParams: queryParams })
.pipe(
map((nodePaging: any) => {
return {
pagination: nodePaging.list.pagination,
entries: nodePaging.list.entries
};
})
);
}
public getCollaborators(projectId: string): Observable<PaginatedEntries<CollaboratorEntry>> {
return this.requestApiHelper
.get(`/modeling-service/v1/projects/${projectId}/collaborators`)
.pipe(
map((nodePaging: any) => {
return {
pagination: nodePaging.list.pagination,
entries: nodePaging.list.entries
};
})
);
}
public addCollaborator(projectId: string, collaborator: IdentityUserModel): Observable<CollaboratorEntry> {
return this.requestApiHelper
.put(`/modeling-service/v1/projects/${projectId}/collaborators/${collaborator.username}`);
}
public removeCollaborator(projectId: string, collaborator: IdentityUserModel): Observable<void> {
return this.requestApiHelper
.delete(`/modeling-service/v1/projects/${projectId}/collaborators/${collaborator.username}`);
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<shape xmlns:android="http://schemas.android.com/apk/res/android"
android:shape="rectangle">
<!-- 填充颜色 -->
<solid android:color="@color/ios_dialog_button_press"></solid>
<!-- 矩形的圆角半径 -->
<corners
android:topRightRadius="11dp"
android:topLeftRadius="11dp"/>
</shape> | {
"pile_set_name": "Github"
} |
'use strict';
const fs = require('fs');
const path = require('path');
const fastGlob = require('fast-glob');
const gitIgnore = require('ignore');
const pify = require('pify');
const slash = require('slash');
const DEFAULT_IGNORE = [
'**/node_modules/**',
'**/bower_components/**',
'**/flow-typed/**',
'**/coverage/**',
'**/.git'
];
const readFileP = pify(fs.readFile);
const mapGitIgnorePatternTo = base => ignore => {
if (ignore.startsWith('!')) {
return '!' + path.posix.join(base, ignore.slice(1));
}
return path.posix.join(base, ignore);
};
const parseGitIgnore = (content, options) => {
const base = slash(path.relative(options.cwd, path.dirname(options.fileName)));
return content
.split(/\r?\n/)
.filter(Boolean)
.filter(line => line.charAt(0) !== '#')
.map(mapGitIgnorePatternTo(base));
};
const reduceIgnore = files => {
return files.reduce((ignores, file) => {
ignores.add(parseGitIgnore(file.content, {
cwd: file.cwd,
fileName: file.filePath
}));
return ignores;
}, gitIgnore());
};
const getIsIgnoredPredecate = (ignores, cwd) => {
return p => ignores.ignores(slash(path.relative(cwd, p)));
};
const getFile = (file, cwd) => {
const filePath = path.join(cwd, file);
return readFileP(filePath, 'utf8')
.then(content => ({
content,
cwd,
filePath
}));
};
const getFileSync = (file, cwd) => {
const filePath = path.join(cwd, file);
const content = fs.readFileSync(filePath, 'utf8');
return {
content,
cwd,
filePath
};
};
const normalizeOptions = (options = {}) => {
const ignore = options.ignore || [];
const cwd = options.cwd || process.cwd();
return {ignore, cwd};
};
module.exports = options => {
options = normalizeOptions(options);
return fastGlob('**/.gitignore', {
ignore: DEFAULT_IGNORE.concat(options.ignore),
cwd: options.cwd
})
.then(paths => Promise.all(paths.map(file => getFile(file, options.cwd))))
.then(files => reduceIgnore(files))
.then(ignores => getIsIgnoredPredecate(ignores, options.cwd));
};
module.exports.sync = options => {
options = normalizeOptions(options);
const paths = fastGlob.sync('**/.gitignore', {
ignore: DEFAULT_IGNORE.concat(options.ignore),
cwd: options.cwd
});
const files = paths.map(file => getFileSync(file, options.cwd));
const ignores = reduceIgnore(files);
return getIsIgnoredPredecate(ignores, options.cwd);
};
| {
"pile_set_name": "Github"
} |
<span class="green"><%= @room.name %></span> (<%= @room.x %>, <%= @room.y %>, layer: <%= @room.map_layer %>)
<%= Format.underline(@room.name) %>
<%= @room |> description() %>
<br />
<span class="white">Who</span>: <%= @room |> who(@conn) %>
<span class="white">Items</span>: <%= @room |> items(@conn) %>
<%= @room |> listen() %>
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_172) on Wed Mar 13 10:37:29 EDT 2019 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>UnhandledParameterTypeException (jrugged-spring 4.0.0-SNAPSHOT API)</title>
<meta name="date" content="2019-03-13">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="UnhandledParameterTypeException (jrugged-spring 4.0.0-SNAPSHOT API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/UnhandledParameterTypeException.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../org/fishwife/jrugged/spring/jmx/MBeanValueConverter.html" title="class in org.fishwife.jrugged.spring.jmx"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../../../org/fishwife/jrugged/spring/jmx/WebMBeanAdapter.html" title="class in org.fishwife.jrugged.spring.jmx"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/fishwife/jrugged/spring/jmx/UnhandledParameterTypeException.html" target="_top">Frames</a></li>
<li><a href="UnhandledParameterTypeException.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#methods.inherited.from.class.java.lang.Throwable">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li>Method</li>
</ul>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">org.fishwife.jrugged.spring.jmx</div>
<h2 title="Class UnhandledParameterTypeException" class="title">Class UnhandledParameterTypeException</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li><a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">java.lang.Object</a></li>
<li>
<ul class="inheritance">
<li><a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true" title="class or interface in java.lang">java.lang.Throwable</a></li>
<li>
<ul class="inheritance">
<li><a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Exception.html?is-external=true" title="class or interface in java.lang">java.lang.Exception</a></li>
<li>
<ul class="inheritance">
<li><a href="http://docs.oracle.com/javase/6/docs/api/javax/management/JMException.html?is-external=true" title="class or interface in javax.management">javax.management.JMException</a></li>
<li>
<ul class="inheritance">
<li>org.fishwife.jrugged.spring.jmx.UnhandledParameterTypeException</li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>All Implemented Interfaces:</dt>
<dd><a href="http://docs.oracle.com/javase/6/docs/api/java/io/Serializable.html?is-external=true" title="class or interface in java.io">Serializable</a></dd>
</dl>
<hr>
<br>
<pre>public class <span class="typeNameLabel">UnhandledParameterTypeException</span>
extends <a href="http://docs.oracle.com/javase/6/docs/api/javax/management/JMException.html?is-external=true" title="class or interface in javax.management">JMException</a></pre>
<div class="block">Thrown when an unhandled parameter type is found.</div>
<dl>
<dt><span class="seeLabel">See Also:</span></dt>
<dd><a href="../../../../../serialized-form.html#org.fishwife.jrugged.spring.jmx.UnhandledParameterTypeException">Serialized Form</a></dd>
</dl>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor.summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colOne" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colOne"><code><span class="memberNameLink"><a href="../../../../../org/fishwife/jrugged/spring/jmx/UnhandledParameterTypeException.html#UnhandledParameterTypeException-java.lang.String-">UnhandledParameterTypeException</a></span>(<a href="http://docs.oracle.com/javase/6/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> reason)</code> </td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.java.lang.Throwable">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.<a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true" title="class or interface in java.lang">Throwable</a></h3>
<code><a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-" title="class or interface in java.lang">addSuppressed</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--" title="class or interface in java.lang">fillInStackTrace</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#getCause--" title="class or interface in java.lang">getCause</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--" title="class or interface in java.lang">getLocalizedMessage</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#getMessage--" title="class or interface in java.lang">getMessage</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--" title="class or interface in java.lang">getStackTrace</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--" title="class or interface in java.lang">getSuppressed</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-" title="class or interface in java.lang">initCause</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace--" title="class or interface in java.lang">printStackTrace</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintStream-" title="class or interface in java.lang">printStackTrace</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintWriter-" title="class or interface in java.lang">printStackTrace</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#setStackTrace-java.lang.StackTraceElement:A-" title="class or interface in java.lang">setStackTrace</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Throwable.html?is-external=true#toString--" title="class or interface in java.lang">toString</a></code></li>
</ul>
<ul class="blockList">
<li class="blockList"><a name="methods.inherited.from.class.java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.<a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true" title="class or interface in java.lang">Object</a></h3>
<code><a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true#clone--" title="class or interface in java.lang">clone</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-" title="class or interface in java.lang">equals</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true#finalize--" title="class or interface in java.lang">finalize</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true#getClass--" title="class or interface in java.lang">getClass</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true#hashCode--" title="class or interface in java.lang">hashCode</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true#notify--" title="class or interface in java.lang">notify</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true#wait--" title="class or interface in java.lang">wait</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true#wait-long-" title="class or interface in java.lang">wait</a>, <a href="http://docs.oracle.com/javase/6/docs/api/java/lang/Object.html?is-external=true#wait-long-int-" title="class or interface in java.lang">wait</a></code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor.detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="UnhandledParameterTypeException-java.lang.String-">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>UnhandledParameterTypeException</h4>
<pre>public UnhandledParameterTypeException(<a href="http://docs.oracle.com/javase/6/docs/api/java/lang/String.html?is-external=true" title="class or interface in java.lang">String</a> reason)</pre>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/UnhandledParameterTypeException.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../org/fishwife/jrugged/spring/jmx/MBeanValueConverter.html" title="class in org.fishwife.jrugged.spring.jmx"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../../../org/fishwife/jrugged/spring/jmx/WebMBeanAdapter.html" title="class in org.fishwife.jrugged.spring.jmx"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/fishwife/jrugged/spring/jmx/UnhandledParameterTypeException.html" target="_top">Frames</a></li>
<li><a href="UnhandledParameterTypeException.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li><a href="#constructor.summary">Constr</a> | </li>
<li><a href="#methods.inherited.from.class.java.lang.Throwable">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor.detail">Constr</a> | </li>
<li>Method</li>
</ul>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2019. All Rights Reserved.</small></p>
</body>
</html>
| {
"pile_set_name": "Github"
} |
/* Oriented zero-offset migration. */
/*
Copyright (C) 2009 University of Texas at Austin
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <rsf.h>
#include <assert.h>
float ** t0, *m0, *minit, *m;
int read4file_ref(char *fname, int nx, int nz, float **s)
{
int i, NM;
FILE *fp;
if((fp=fopen(fname, "rb"))==NULL) {
printf("Cannot open file.\n");
}
/* slowness */
if(fread(s[0], sizeof(float), nx*nz, fp) != nx*nz) {
if (feof(fp))
printf("File read error - too small.");
}
/* nm */
if(fread(&i, sizeof(int), 1, fp) != 1) {
if (feof(fp))
printf("File read error - nm.");
}
NM = i;
m0 = sf_floatalloc(NM);
minit = sf_floatalloc(NM);
m = sf_floatalloc(NM);
/* m0 */
if(fread(m0, sizeof(float), NM, fp) != NM) {
if (feof(fp))
printf("File read error - too small.");
}
if(fread(minit, sizeof(float), NM, fp) != NM) {
if (feof(fp))
printf("File read error - too small.");
}
if(fread(m, sizeof(float), NM, fp) != NM) {
if (feof(fp))
printf("File read error - too small.");
}
fclose(fp);
return NM;
}
/*************************************************/
void putf(sf_file so1, int nx, int nz, float dx, float dz)
{
sf_putint (so1, "n3", 1);
sf_putint (so1, "n2", nx);
sf_putint (so1, "n1", nz);
sf_putfloat (so1, "d3", 0);
sf_putfloat (so1, "d2", dx);
sf_putfloat (so1, "d1", dz);
sf_putfloat (so1, "o3", 0);
sf_putfloat (so1, "o2", 0);
sf_putfloat (so1, "o1", 0);
}
int main(int argc, char* argv[])
{
int nm = 1e6;//1234567890;//1e10
int n, nx, nz;
float dx, dz;
sf_file so, so1, so2, so3;
char * fname = 0;
sf_init(argc,argv);
so = sf_output("out");
so1 = sf_output("correct");
so2 = sf_output("init");
so3 = sf_output("final");
if (!sf_getint("N",&n)) sf_error("No N= ");
nx = nz = n;
dx = dz = 1.f / (n - 1);
fname = sf_getstring ("sample");
t0 = sf_floatalloc2(nz,nx);
// sprintf(fname,"sample%-3d",S.nx);
nm=read4file_ref(fname, nx, nz, t0);
putf(so, nx, nz, dx, dz);
putf(so1, 1, nm, dx, dz);
putf(so2, 1, nm, dx, dz);
putf(so3, 1, nm, dx, dz);
sf_floatwrite(t0[0], nx*nz, so);
sf_floatwrite(m0, nm, so1);
sf_floatwrite(minit, nm, so2);
sf_floatwrite(m, nm, so3);
sf_close();
free(m0);
free(m);
free(minit);
exit(0);
}
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
import binascii
import logging
import re
import sys
from django.http import HttpResponseBadRequest
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse
from desktop.lib.rest.http_client import HttpClient, RestException
from desktop.lib.rest import resource
from desktop.conf import VCS
from desktop.lib.vcs.apis.base_api import Api, GIT_READ_ONLY
from desktop.lib.vcs.github_client import GithubClientException
if sys.version_info[0] > 2:
import urllib.request, urllib.error
from urllib.parse import unquote as urllib_unquote, urlsplit as lib_urlsplit, urlunsplit as lib_urlunsplit
else:
from urllib import unquote as urllib_unquote
from urlparse import urlsplit as lib_urlsplit, urlunsplit as lib_urlunsplit
LOG = logging.getLogger(__name__)
class GithubReadOnlyApi(Api):
"""
https://developer.github.com/v3/
"""
OWNER_RE = "(?P<owner>[A-Za-z0-9](?:-?[A-Za-z0-9]){0,38})"
REPO_RE = "(?P<repo>[\w\.@\:\-~]+)"
BRANCH_RE = "(?P<branch>[\w\.@\:\-~]+)"
DEFAULT_SCOPES = ['repo', 'user']
def __init__(self):
self._remote_url = VCS[GIT_READ_ONLY].REMOTE_URL.get().strip('/')
self._api_url = VCS[GIT_READ_ONLY].API_URL.get().strip('/')
self._client = HttpClient(self._api_url, logger=LOG)
self._root = resource.Resource(self._client)
def contents(self, request):
"""
GET /repos/:owner/:repo/contents/:path
https://developer.github.com/v3/repos/contents/#get-contents
"""
response = {'status': -1}
response['fileType'] = filetype = request.GET.get('fileType', 'dir')
filepath = request.GET.get('path', '/')
filepath = self._clean_path(filepath)
if self._remote_url:
owner, repo, branch = self.parse_github_url(self._remote_url)
blob = self._get_contents(owner, repo, filepath)
if filetype == 'dir':
response['files'] = _massage_content(blob)
response['status'] = 0
elif filetype == 'file':
try:
response['content'] = blob['content'].decode('base64')
response['status'] = 0
except binascii.Error as e:
raise GithubClientException('Failed to decode file contents, check if file content is properly base64-encoded: %s' % e)
except KeyError as e:
raise GithubClientException('Failed to find expected content object in blob object: %s' % e)
else:
return HttpResponseBadRequest(_('url param is required'))
return JsonResponse(response)
def authorize(self, request):
pass
def parse_github_url(self, url):
"""
Given a base URL to a Github repository, return a tuple of the owner, repo, branch
:param url: base URL to repo (e.g. - https://github.com/cloudera/hue/tree/master)
:return: tuple of strings (e.g. - ('cloudera', 'hue', 'master'))
"""
match = self.github_url_regex.search(url)
if match:
return match.group('owner'), match.group('repo'), match.group('branch')
else:
raise ValueError('GitHub URL is not formatted correctly: %s' % url)
@property
def github_url_regex(self):
return re.compile('%s/%s/%s/tree/%s' % (self._get_base_url(), self.OWNER_RE, self.REPO_RE, self.BRANCH_RE))
def _get_base_url(self):
split_url = lib_urlsplit(self._remote_url)
return lib_urlunsplit((split_url.scheme, split_url.netloc, '', "", ""))
def _clean_path(self, filepath):
cleaned_path = filepath.strip('/')
cleaned_path = urllib_unquote(cleaned_path)
return cleaned_path
def _get_contents(self, owner, repo, path):
try:
return self._root.get('repos/%s/%s/contents/%s' % (owner, repo, path))
except RestException as e:
raise GithubClientException('Could not find GitHub object, check owner, repo or path: %s' % e)
def _massage_content(blob):
response = []
for file in blob:
file['stats'] = {
'size': file.get('size', 0),
'path': file.get('path', '')
}
response.append(file)
return response | {
"pile_set_name": "Github"
} |
Tutorial: TodoSOA
=================
[<a href="http://httplocal.com/todosoa">Live Demo</a> | <a href="https://github.com/pfraze/todosoa">Source Code</a>]
---
TodoSOA is the Local.js version of <a href="http://todomvc.com">TodoMVC</a>. We're going to step through it briefly to introduce Local.js.
### The Tao of Local.js
The Tao of Local.js is to define your components as servers.
- Rather than call functions, the components send each other requests.
- Rather than hold references, the components keep URLs to each other.
TodoSOA has three servers:
- `httpl://storage` is a LocalStorage wrapper.
- `httpl://view.js` is a template renderer.
- `httpl://todo` is the main application.
Of the three of them, only `httpl://todo` must live in the document. The other two could be moved into the document, Web Workers, or remote hosts.
### Initialization
```javascript
local.setDispatchWrapper(function(req, res, dispatch) {
// Dispatch the request, wait for a response, then log both
dispatch(req, res).always(console.log.bind(console, req));
});
local.spawnWorkerServer('js/view.js', { domain: 'view.js' });
local.addServer('storage', new app.Store(name));
local.addServer('todo', new app.Todo('httpl://storage', 'httpl://view.js'));
var todoApi = local.agent('httpl://todo');
```
The dispatch wrapper is an optional middleware that is injected between `dispatch()` and delivery of the request. It's used here for logging.
<a href="#docs/en/0.6.2/api/setdispatchwrapper.md">» setDispatchWrapper()</a>
The `view.js` worker will take a moment to initialize, but you can send requests to it immediately after `spawnWorkerServer` is called. Local.js will buffer the messages until the worker signals "ready."
<a href="#docs/en/0.6.2/managing_servers.md">» Managing Servers</a><br>
Agents are like database cursors for Web APIs. They request links from their current location, query against the link keyvalues, and construct a new location URI from the top match.
<a href="#docs/en/0.6.2/api/agent.md">» agent()</a>
### app.Todo Server
```javascript
function Todo(storageUrl, viewUrl) {
// Call the local.Server constructor
local.Server.call(this);
// Generate agents which point toward the Storage server and View items
var viewApi = local.agent(viewUrl);
this.storageApi = local.agent(storageUrl);
this.listItemView = viewApi.follow({ rel: 'item', id: 'listitem' });
this.counterView = viewApi.follow({ rel: 'item', id: 'counter' });
this.clearBtnView = viewApi.follow({ rel: 'item', id: 'clearbtn' });
}
// Inherit from the local.Server prototype
Todo.prototype = Object.create(local.Server.prototype);
Todo.prototype.handleLocalRequest = function(req, res) {
var self = this;
/*
Toplevel Resource
*/
if (req.path == '/') {
// Set the link header
res.setHeader('link', [
{ href: '/', rel: 'self service collection', title: 'TodoSOA App Todo' },
{ href: '/active', rel: 'item', id: 'active' },
{ href: '/completed', rel: 'item', id: 'completed' },
{ href: '/{id}', rel: 'item' },
]);
// Route by method
switch (req.method) {
case 'HEAD':
// Send back the link header
res.writeHead(204, 'ok, no content').end();
break;
case 'POST':
// Create a new item and add it to the UI
req.on('end', function() {
// Add to storage
self.storageApi.POST(req.body).then(function () {
// Redraw
self._filter(true);
res.writeHead(204, 'ok, no content').end();
}, function() {
// Failure
res.writeHead(500, 'internal error').end();
});
});
break;
default:
res.writeHead(405, 'bad method').end();
}
}
/*
Individual Todo Items
*/
} else {
// Extract the ID
var id = req.path.slice(1);
// ...
}
};
```
Servers can be defined as either functions or objects that descend from `local.Server`. They behave similarly to Node.js servers in that:
- Requests and responses are streams.
- The handler is called before the request stream ends.
- Naming conventions are similar.
However, the API is not an exact match and includes some key differences:
- Query parameters are automatically extracted into `.query`.
- Request headers with parsers will automatically deserialize and store in `.parsedHeaders`.
- Response headers with serializers will automatically serialize on `.writeHead()`.
- Content types with parsers will automatically deserialize `.body` on 'end'.
<a href="#docs/en/0.6.2/api/server.md">» Server</a>, <a href="#docs/en/0.6.2/api/httpheaders.md">» httpHeaders</a>, <a href="#docs/en/0.6.2/api/contenttypes.md">» contentTypes</a>
### Sending Requests with Agents
```javascript
// When the enter key is pressed fire the addItem process.
$$('#new-todo').addEventListener('keypress', function (e) {
var title = e.target.value.trim();
if (e.keyCode === 13 && title !== '') {
// Send a POST to create a new item.
todoApi.POST({ title: title, completed: 0 });
e.target.value = '';
}
});
```
As you can see, the `todoApi` agent saves us the trouble of specifying a URL. Since we initialized it to `httpl://todo`, the requests will automatically take that URI. It also defaults the content-type to JSON, since we're sending an object.
```javascript
function lookupResource(target) {
while (target.nodeName !== 'LI') {
target = target.parentNode;
}
// Find the URI of the todo item and create a new agent that points to it
return todoApi.follow({ rel: 'item', id: target.dataset.id });
}
// A delegation event. Will check what item was clicked whenever you click on any
// part of a list item.
$$('#todo-list').addEventListener('click', function (e) {
// If you click a destroy button
if (e.target.className.indexOf('destroy') > -1) {
// Find the matching resource and send a DELETE request
lookupResource(e.target).DELETE();
}
// If you click the checkmark
if (e.target.className.indexOf('toggle') > -1) {
// Find the matching resource and send a CHECK/UNCHECK request
var request = { method: (e.target.checked) ? 'CHECK' : 'UNCHECK' };
lookupResource(e.target).dispatch(request);
}
});
```
Agent navigations work by issuing HEAD requests to their current location and searching through the responses' Link headers. The searches are specified in queries given to `follow()`, as above.
The `lookupResource()` query will find the first link with a `rel` that *includes* `'item'` and an `id` that *equals* `target.dataset.id`. The `rel` attribute is handled specially because it's a set of "relation types." Successfully matching against `rel` means that the link's reference will behave a certain way.
> **Why bother with reltypes?**
> Specific behaviors can be guaranteed by using URL reltypes, as the URLs can host documentation for what the reltype means. Facebook, for instance, might use `facebook.com/rel/profile` to label links with behaviors for GET, POST, PUT, etc. When configuring together components by different authors, this helps each component recognize the others and reason about their options. For internal use, however, the <a href="http://www.iana.org/assignments/link-relations/link-relations.xhtml#link-relations-1" target="_top">broadly-defined</a> 'item' is fine.
If you refer back to the Todo server's definition, you'll notice that the last entry is `{ href: '/{id}', rel: 'item' }`. This is an example of a templated link. To avoid bloating responses with full indexes, URI Templates can be used to act as "catchalls."
<a href="#docs/en/0.6.2/api/agent.md">» Agents</a>
### Rendering HTML
The main rendering behavior is defined in the Todo server. It's invoked by sending a SHOW method to `/all`, `/active`, or `/completed`.
Experienced Web developers may find it odd that:
- The atypical "SHOW" method is used,
- The DOM is manipulated from a server.
However, remember that `httpl://todo` is a server for operating the document - not for hosting content. Were the page networked (eg with WebRTC) then access to `http://todo` would allow remote operation of the page.
```javascript
/* Within the SHOW handler */
// Fetch the items from storage, filtered down to the set implied by our ID
var query = {};
if (id == 'active') { query.completed = 0; }
else if (id == 'completed') { query.completed = 1; }
this.storageApi.dispatch({ method: 'GET', query: query })
.then(function(res2) {
var items = res2.body;
var responses_ = [];
items.forEach(function(item) {
var query = { item_id: item.id, title: item.title, completed: item.completed };
// Send to view.js to be rendered
responses_.push(self.listItemView.GET({ query: query }));
});
// Bundle the responses into one promise that will fulfill when all promises fulfill or reject
return local.promise.bundle(responses_);
})
.then(function(res3s) {
// Render the HTML to the page
self.$todoList.innerHTML = res3s.map(function(res3) { return res3.body; }).join('');
res.writeHead(204, 'ok, no content').end();
});
```
This code is made inefficient to illustrate the 'bundling' feature of promises: rather than send all the items in one request to be rendered, they are sent individually and combined into one promise. This is a common pattern for syncing multiple requests.
<a href="#docs/en/0.6.2/api/promises.md">» Promises</a>
The view server runs in a Worker (also for illustrative purposes):
```javascript
importScripts('local.js');
var listItemTemplate
= '<li data-id="{{item_id}}" class="{{completed}}">'
+ '<div class="view">'
+ '<input class="toggle" type="checkbox" {{checked}}>'
+ '<label>{{title}}</label>'
+ '<button class="destroy"></button>'
+ '</div>'
+ '</li>';
local.worker.setServer(function (req, res, page) {
// Only accept HEAD and GET requests
if (req.method != 'HEAD' && req.method != 'GET') {
return res.writeHead(405, 'bad method').end();
}
// Route by path
switch (req.path) {
/* ... */
case '/listitem':
// Creates an <li> HTML string and returns it for placement in your app
res.setHeader('link', [
{ href: '/', rel: 'up collection service', title: 'TodoSOA HTML Generator' },
{ href: '/listitem{?item_id,title,completed}', rel: 'self item', id: 'listitem' }
]);
if (req.method == 'HEAD') {
return res.writeHead(204, 'ok, no content').end();
}
template = listItemTemplate
.replace('{{item_id}}', req.query.item_id)
.replace('{{title}}', req.query.title)
.replace('{{completed}}', (req.query.completed) ? 'completed' : '')
.replace('{{checked}}', (req.query.completed) ? 'checked' : '');
res.writeHead(200, 'ok', { 'content-type': 'text/html' }).end(template);
break;
/* ... */
default:
res.writeHead(404, 'not found').end();
}
});
```
Under light load, workers will typically respond to requests (roundtrip) within 1 ms. Because they are sandboxed in their own VMs and kept in a separate thread from the document, they are ideal for hosting user-submitted (untrusted) components. If we wanted, we could let the user set the code for view.js and let them choose how to render the page.
Any access to the page from a worker occurs through a server function in the document thread. However, because TodoSOA did not set a server function for view.js in `spawnWorkerServer()`, the worker is only able to host.
### Storing Data
To help solidify your concept of how Local.js servers behave, have a look at the storage server:
```javascript
Store.prototype.handleLocalRequest = function(req, res) {
/*
Toplevel Resource
*/
if (req.path == '/') {
// Set the link header
res.setHeader('link', [
{ href: '/{?completed}', rel: 'self service collection', title: 'TodoSOA Storage' },
{ href: '/{id}', rel: 'item' }
]);
// Route by method
switch (req.method) {
case 'HEAD':
// Send back the link header
res.writeHead(204, 'ok, no content').end();
break;
case 'GET':
// Fetch all items. Can be filtered with ?query=[1|0]
this.findAll(function(data) {
if (typeof req.query.completed != 'undefined') {
data = data.filter(function(item) {
return item.completed == req.query.completed;
});
}
res.writeHead(200, 'ok', {'content-type': 'application/json'}).end(data);
});
break;
case 'COUNT':
// Count all items
var counts = {
active: 0,
completed: 0,
total: 0
};
this.findAll(function (data) {
data.each(function (todo) {
if (todo.completed) {
counts.completed++;
} else {
counts.active++;
}
counts.total++;
});
});
res.writeHead(200, 'ok', {'content-type': 'application/json'}).end(counts);
break;
case 'POST':
// Add a new item
req.on('end', (function() { // wait until the stream has finished.
this.save(req.body, function(newTodo) {
res.writeHead(201, 'created', { location: '/'+newTodo.id }).end();
});
}).bind(this));
break;
case 'DELETE':
// Delete all items
this.drop();
res.writeHead(204, 'ok, no content').end();
break;
default:
res.writeHead(405, 'bad method').end();
break;
}
}
/*
Item Resource
*/
else {
// Extract the id from the request path.
var id = req.path.slice(1);
// Set the link header
res.setHeader('link', [
{ href: '/{?completed}', rel: 'up service collection', title: 'TodoSOA Storage' },
{ href: '/'+id, rel: 'self item', id: id }
]);
// Route by method
switch (req.method) {
case 'HEAD':
// Send back the link header
res.writeHead(204, 'ok, no content').end();
break;
case 'GET':
// Get the content of the item
this.find({ id: id }, function(data) {
if (data[0]) {
res.writeHead(200, 'ok', {'content-type': 'application/json'}).end(data[0]);
} else {
res.writeHead(404, 'not found').end();
}
});
break;
case 'PUT':
// Update the item
req.on('end', (function() {
this.save(id, req.body, function() {
res.writeHead(204, 'ok, no content').end();
});
}).bind(this));
break;
case 'DELETE':
// Delete the item
this.remove(id, function() {
res.writeHead(204, 'ok, no content').end();
});
break;
default:
res.writeHead(405, 'bad method').end();
break;
}
}
};
```
As with most HTTP servers, building the server with the base API is tedious. For something a little nicer, try the <a href="https://github.com/pfraze/servware" title="Servware">Servware framework</a>.
### Summary
To review, TodoSOA uses 3 servers - one for app logic, one for data storage, and one for template rendering. We used Agents to communicate between components, Link headers to automate URI construction, and a Worker to parallelize HTML generation.
Compared to some of the MVC approaches in Javascript, TodoSOA is not as simple or convenient. Local.js has very different goals than Backbone or Knockout. It is designed to decouple the application into components which can be changed at runtime by users. However, closed-development applications can still benefit of reusability and reconfigurability gained by message passing.
> A recommended addition since the writing of this tutorial is <a href="#docs/en/0.6.2/api/bindrequestevents.md">Request Events</a>, which offer a convenient alternative to event listening. | {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.common.exceptions;
/**
* Provides utilities (such as retrieving hints) to add more context to UserExceptions.
*/
public class UserExceptionUtils {
public static final String USER_DOES_NOT_EXIST =
"Username is absent in connection URL or doesn't exist on Drillbit node." +
" Please specify a username in connection URL which is present on Drillbit node.";
private UserExceptionUtils() {
//Restrict instantiation
}
private static String decorateHint(final String text) {
return String.format("[Hint: %s]", text);
}
public static String getUserHint(final Throwable ex) {
if (ex.getMessage().startsWith("Error getting user info for current user")) {
//User does not exist hint
return decorateHint(USER_DOES_NOT_EXIST);
} else {
//No hint can be provided
return "";
}
}
}
| {
"pile_set_name": "Github"
} |
#include <iostream>
#include <nlohmann/json.hpp>
using json = nlohmann::json;
int main()
{
// create an array value
json array = {1, 2, 3, 4, 5};
// get an iterator to the reverse-end
json::const_reverse_iterator it = array.crend();
// increment the iterator to point to the first element
--it;
// serialize the element that the iterator points to
std::cout << *it << '\n';
}
| {
"pile_set_name": "Github"
} |
-----BEGIN CERTIFICATE REQUEST-----
MIIBuDCCASECAQAweDELMAkGA1UEBhMCR0IxGDAWBgNVBAgMD05vdHRpbmdoYW1z
aGlyZTETMBEGA1UEBwwKTm90dGluZ2hhbTEPMA0GA1UECgwGU2VydmVyMRMwEQYD
VQQLDApQcm9kdWN0aW9uMRQwEgYDVQQDDAt0ZXN0IGNsaWVudDCBnzANBgkqhkiG
9w0BAQEFAAOBjQAwgYkCgYEAmvC+cVdROE4a3jUdPDdmatZadxd9+WZVL8W4FwQ8
WeaPqq4WucFkoaA7ygztNekqhem2NmXWrmJx0YkU5joYwQsoyHfIJuL8+VF2biFw
QihOMoCcXqZYJrJsQLmvlyPB/kvBf7YF0o71kDTMCijtMddxW9xtL/9Da3gaxW9C
Ax8CAwEAAaAAMA0GCSqGSIb3DQEBBQUAA4GBAH5l2eVGP+//MBFAT+ne3/KQvoRQ
yF4xlDjvKUlK3LHjT+js/fxGQJWmXqea5jRmEZjAxNnjDcjf828jaFkaQGsoajym
ebNL5RvrPykwaXjdhHgavDiM/LCRR6bDCUYzS5akjZx2ENQ1TM7BVThOJQ2W+KPn
xdxeRH8KxKGJ3wp0
-----END CERTIFICATE REQUEST-----
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<Configuration status="INFO" name="carrera" monitorInterval="60" shutdownHook="disable">
<Appenders>
<Console name="stdout" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ABSOLUTE} %5p %c{1}:%L - %m%n"/>
</Console>
<RollingRandomAccessFile name="CarreraAppender" fileName="logs/carrera.log"
filePattern="logs/old/carrera.log-%d{yyyy-MM-dd-HH}.%i.gz">
<PatternLayout pattern="%d{DEFAULT} [ %p ] %c{1} - %m%n"/>
<Policies>
<TimeBasedTriggeringPolicy/>
</Policies>
<DefaultRolloverStrategy compressionLevel="1">
<Delete basePath="logs" maxDepth="2">
<IfAny>
<IfAccumulatedFileSize exceeds="${env:LOG_SAVE_SIZE}"/>
<IfLastModified age="${env:LOG_SAVE_TIME}"/>
</IfAny>
</Delete>
</DefaultRolloverStrategy>
</RollingRandomAccessFile>
<RollingRandomAccessFile name="DropAppender" fileName="logs/drop.log"
filePattern="logs/old/drop.log-%d{yyyy-MM-dd}.%i.gz">
<PatternLayout pattern="%d{DEFAULT} [ %p ] %c{1} - %m%n"/>
<Policies>
<TimeBasedTriggeringPolicy/>
</Policies>
<DefaultRolloverStrategy compressionLevel="9"/>
</RollingRandomAccessFile>
<RollingRandomAccessFile name="WarnAppender" fileName="logs/error.log"
filePattern="logs/old/error.log-%d{yyyy-MM-dd}.%i">
<PatternLayout pattern="%d{DEFAULT} [ %p ] %c{1} - %m%n"/>
<Policies>
<TimeBasedTriggeringPolicy/>
</Policies>
<DefaultRolloverStrategy/>
</RollingRandomAccessFile>
<RollingRandomAccessFile name="MetricAppender" fileName="logs/metric.log"
filePattern="logs/old/metric.log-%d{yyyy-MM-dd-HH}.%i">
<PatternLayout pattern="%d{DEFAULT} [ %p ] %c{1} - %m%n"/>
<Policies>
<TimeBasedTriggeringPolicy/>
</Policies>
<DefaultRolloverStrategy/>
</RollingRandomAccessFile>
<RollingRandomAccessFile name="MainAppender" fileName="logs/main.log"
filePattern="logs/old/main.log-%d{yyyy-MM-dd}.%i">
<PatternLayout pattern="%d{DEFAULT} [ %p ] %c{1} - %m%n"/>
<Policies>
<TimeBasedTriggeringPolicy/>
</Policies>
<DefaultRolloverStrategy/>
</RollingRandomAccessFile>
</Appenders>
<Loggers>
<Root level="WARN">
<AppenderRef ref="CarreraAppender"/>
<AppenderRef ref="WarnAppender"/>
</Root>
<Logger name="com.xiaojukeji.carrera.cproxy" additivity="false" level="INFO">
<AppenderRef ref="CarreraAppender"/>
<AppenderRef ref="WarnAppender" level="error"/>
</Logger>
<Logger name="com.xiaojukeji.carrera.cproxy.consumer.BaseCarreraConsumer" additivity="false" level="${env:LOG_BASE_CONSUMER_LEVEL}">
<AppenderRef ref="CarreraAppender"/>
</Logger>
<Logger name="org.apache.thrift.server.AbstractNonblockingServer$AsyncFrameBuffer" additivity="false"
level="ERROR">
<AppenderRef ref="WarnAppender" level="warn"/>
</Logger>
<Logger name="DropLogger" level="INFO" additivity="false">
<AppenderRef ref="DropAppender"/>
</Logger>
<Logger name="OffsetLogger" level="INFO" additivity="false">
<AppenderRef ref="MetricAppender"/>
</Logger>
<Logger name="MetricLogger" level="INFO" additivity="false">
<AppenderRef ref="MetricAppender"/>
</Logger>
<Logger name="MainLogger" level="INFO" additivity="false">
<AppenderRef ref="MainAppender"/>
<AppenderRef ref="WarnAppender" level="error"/>
</Logger>
<AsyncLogger name="com.xiaojukeji.carrera" level="INFO" additivity="false">
<AppenderRef ref="CarreraAppender"/>
<AppenderRef ref="WarnAppender" level="WARN"/>
</AsyncLogger>
</Loggers>
</Configuration>
| {
"pile_set_name": "Github"
} |
client
dev tun
proto udp
remote 23.82.136.225 1194
resolv-retry infinite
remote-random
nobind
tun-mtu 1500
tun-mtu-extra 32
mssfix 1450
persist-key
persist-tun
ping 15
ping-restart 0
ping-timer-rem
reneg-sec 0
comp-lzo no
remote-cert-tls server
auth-user-pass ../Own_VPN_Config/nordvpnauth.txt
verb 3
pull
fast-io
cipher AES-256-CBC
auth SHA512
<ca>
-----BEGIN CERTIFICATE-----
MIIFCjCCAvKgAwIBAgIBATANBgkqhkiG9w0BAQ0FADA5MQswCQYDVQQGEwJQQTEQ
MA4GA1UEChMHTm9yZFZQTjEYMBYGA1UEAxMPTm9yZFZQTiBSb290IENBMB4XDTE2
MDEwMTAwMDAwMFoXDTM1MTIzMTIzNTk1OVowOTELMAkGA1UEBhMCUEExEDAOBgNV
BAoTB05vcmRWUE4xGDAWBgNVBAMTD05vcmRWUE4gUm9vdCBDQTCCAiIwDQYJKoZI
hvcNAQEBBQADggIPADCCAgoCggIBAMkr/BYhyo0F2upsIMXwC6QvkZps3NN2/eQF
kfQIS1gql0aejsKsEnmY0Kaon8uZCTXPsRH1gQNgg5D2gixdd1mJUvV3dE3y9FJr
XMoDkXdCGBodvKJyU6lcfEVF6/UxHcbBguZK9UtRHS9eJYm3rpL/5huQMCppX7kU
eQ8dpCwd3iKITqwd1ZudDqsWaU0vqzC2H55IyaZ/5/TnCk31Q1UP6BksbbuRcwOV
skEDsm6YoWDnn/IIzGOYnFJRzQH5jTz3j1QBvRIuQuBuvUkfhx1FEwhwZigrcxXu
MP+QgM54kezgziJUaZcOM2zF3lvrwMvXDMfNeIoJABv9ljw969xQ8czQCU5lMVmA
37ltv5Ec9U5hZuwk/9QO1Z+d/r6Jx0mlurS8gnCAKJgwa3kyZw6e4FZ8mYL4vpRR
hPdvRTWCMJkeB4yBHyhxUmTRgJHm6YR3D6hcFAc9cQcTEl/I60tMdz33G6m0O42s
Qt/+AR3YCY/RusWVBJB/qNS94EtNtj8iaebCQW1jHAhvGmFILVR9lzD0EzWKHkvy
WEjmUVRgCDd6Ne3eFRNS73gdv/C3l5boYySeu4exkEYVxVRn8DhCxs0MnkMHWFK6
MyzXCCn+JnWFDYPfDKHvpff/kLDobtPBf+Lbch5wQy9quY27xaj0XwLyjOltpiST
LWae/Q4vAgMBAAGjHTAbMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqG
SIb3DQEBDQUAA4ICAQC9fUL2sZPxIN2mD32VeNySTgZlCEdVmlq471o/bDMP4B8g
nQesFRtXY2ZCjs50Jm73B2LViL9qlREmI6vE5IC8IsRBJSV4ce1WYxyXro5rmVg/
k6a10rlsbK/eg//GHoJxDdXDOokLUSnxt7gk3QKpX6eCdh67p0PuWm/7WUJQxH2S
DxsT9vB/iZriTIEe/ILoOQF0Aqp7AgNCcLcLAmbxXQkXYCCSB35Vp06u+eTWjG0/
pyS5V14stGtw+fA0DJp5ZJV4eqJ5LqxMlYvEZ/qKTEdoCeaXv2QEmN6dVqjDoTAo
k0t5u4YRXzEVCfXAC3ocplNdtCA72wjFJcSbfif4BSC8bDACTXtnPC7nD0VndZLp
+RiNLeiENhk0oTC+UVdSc+n2nJOzkCK0vYu0Ads4JGIB7g8IB3z2t9ICmsWrgnhd
NdcOe15BincrGA8avQ1cWXsfIKEjbrnEuEk9b5jel6NfHtPKoHc9mDpRdNPISeVa
wDBM1mJChneHt59Nh8Gah74+TM1jBsw4fhJPvoc7Atcg740JErb904mZfkIEmojC
VPhBHVQ9LHBAdM8qFI2kRK0IynOmAZhexlP/aT/kpEsEPyaZQlnBn3An1CRz8h0S
PApL8PytggYKeQmRhl499+6jLxcZ2IegLfqq41dzIjwHwTMplg+1pKIOVojpWA==
-----END CERTIFICATE-----
</ca>
key-direction 1
<tls-auth>
#
# 2048 bit OpenVPN static key
#
-----BEGIN OpenVPN Static key V1-----
e685bdaf659a25a200e2b9e39e51ff03
0fc72cf1ce07232bd8b2be5e6c670143
f51e937e670eee09d4f2ea5a6e4e6996
5db852c275351b86fc4ca892d78ae002
d6f70d029bd79c4d1c26cf14e9588033
cf639f8a74809f29f72b9d58f9b8f5fe
fc7938eade40e9fed6cb92184abb2cc1
0eb1a296df243b251df0643d53724cdb
5a92a1d6cb817804c4a9319b57d53be5
80815bcfcb2df55018cc83fc43bc7ff8
2d51f9b88364776ee9d12fc85cc7ea5b
9741c4f598c485316db066d52db4540e
212e1518a9bd4828219e24b20d88f598
a196c9de96012090e333519ae18d3509
9427e7b372d348d352dc4c85e18cd4b9
3f8a56ddb2e64eb67adfc9b337157ff4
-----END OpenVPN Static key V1-----
</tls-auth>
| {
"pile_set_name": "Github"
} |
/*
* rcar_du_drv.c -- R-Car Display Unit DRM driver
*
* Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
#include "rcar_du_regs.h"
/* -----------------------------------------------------------------------------
* Device Information
*/
static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.features = 0,
.num_crtcs = 2,
.routes = {
/* R8A7779 has two RGB outputs and one (currently unsupported)
* TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
.encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1) | BIT(0),
.encoder_type = DRM_MODE_ENCODER_NONE,
.port = 1,
},
},
.num_lvds = 0,
};
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
.quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
.num_crtcs = 3,
.routes = {
/* R8A7790 has one RGB output, two LVDS outputs and one
* (currently unsupported) TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2) | BIT(1) | BIT(0),
.encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS1] = {
.possible_crtcs = BIT(2) | BIT(1),
.encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 2,
},
},
.num_lvds = 2,
};
/* M2-W (r8a7791) and M2-N (r8a7793) are identical */
static const struct rcar_du_device_info rcar_du_r8a7791_info = {
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
.num_crtcs = 2,
.routes = {
/* R8A779[13] has one RGB output, one LVDS output and one
* (currently unsupported) TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(1) | BIT(0),
.encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 1,
},
},
.num_lvds = 1,
};
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
.num_crtcs = 2,
.routes = {
/* R8A7794 has two RGB outputs and one (currently unsupported)
* TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
.encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1),
.encoder_type = DRM_MODE_ENCODER_NONE,
.port = 1,
},
},
.num_lvds = 0,
};
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
{ .compatible = "renesas,du-r8a7793", .data = &rcar_du_r8a7791_info },
{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_du_of_table);
/* -----------------------------------------------------------------------------
* DRM operations
*/
static void rcar_du_lastclose(struct drm_device *dev)
{
struct rcar_du_device *rcdu = dev->dev_private;
drm_fbdev_cma_restore_mode(rcdu->fbdev);
}
static int rcar_du_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct rcar_du_device *rcdu = dev->dev_private;
rcar_du_crtc_enable_vblank(&rcdu->crtcs[pipe], true);
return 0;
}
static void rcar_du_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct rcar_du_device *rcdu = dev->dev_private;
rcar_du_crtc_enable_vblank(&rcdu->crtcs[pipe], false);
}
static const struct file_operations rcar_du_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
.mmap = drm_gem_cma_mmap,
};
static struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
| DRIVER_ATOMIC,
.lastclose = rcar_du_lastclose,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rcar_du_enable_vblank,
.disable_vblank = rcar_du_disable_vblank,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = rcar_du_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
.date = "20130110",
.major = 1,
.minor = 0,
};
/* -----------------------------------------------------------------------------
* Power management
*/
#ifdef CONFIG_PM_SLEEP
static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
drm_kms_helper_poll_disable(rcdu->ddev);
/* TODO Suspend the CRTC */
return 0;
}
static int rcar_du_pm_resume(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
/* TODO Resume the CRTC */
drm_kms_helper_poll_enable(rcdu->ddev);
return 0;
}
#endif
static const struct dev_pm_ops rcar_du_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(rcar_du_pm_suspend, rcar_du_pm_resume)
};
/* -----------------------------------------------------------------------------
* Platform driver
*/
static int rcar_du_remove(struct platform_device *pdev)
{
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
struct drm_device *ddev = rcdu->ddev;
mutex_lock(&ddev->mode_config.mutex);
drm_connector_unplug_all(ddev);
mutex_unlock(&ddev->mode_config.mutex);
drm_dev_unregister(ddev);
if (rcdu->fbdev)
drm_fbdev_cma_fini(rcdu->fbdev);
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
drm_dev_unref(ddev);
return 0;
}
static int rcar_du_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct rcar_du_device *rcdu;
struct drm_connector *connector;
struct drm_device *ddev;
struct resource *mem;
int ret;
if (np == NULL) {
dev_err(&pdev->dev, "no device tree node\n");
return -ENODEV;
}
/* Allocate and initialize the DRM and R-Car device structures. */
rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
if (rcdu == NULL)
return -ENOMEM;
init_waitqueue_head(&rcdu->commit.wait);
rcdu->dev = &pdev->dev;
rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
platform_set_drvdata(pdev, rcdu);
/* I/O resources */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(rcdu->mmio))
return PTR_ERR(rcdu->mmio);
/* DRM/KMS objects */
ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
if (!ddev)
return -ENOMEM;
drm_dev_set_unique(ddev, dev_name(&pdev->dev));
rcdu->ddev = ddev;
ddev->dev_private = rcdu;
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
goto error;
}
ddev->irq_enabled = 1;
/* Register the DRM device with the core and the connectors with
* sysfs.
*/
ret = drm_dev_register(ddev, 0);
if (ret)
goto error;
mutex_lock(&ddev->mode_config.mutex);
drm_for_each_connector(connector, ddev) {
ret = drm_connector_register(connector);
if (ret < 0)
break;
}
mutex_unlock(&ddev->mode_config.mutex);
if (ret < 0)
goto error;
DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
return 0;
error:
rcar_du_remove(pdev);
return ret;
}
static struct platform_driver rcar_du_platform_driver = {
.probe = rcar_du_probe,
.remove = rcar_du_remove,
.driver = {
.name = "rcar-du",
.pm = &rcar_du_pm_ops,
.of_match_table = rcar_du_of_table,
},
};
module_platform_driver(rcar_du_platform_driver);
MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
MODULE_DESCRIPTION("Renesas R-Car Display Unit DRM Driver");
MODULE_LICENSE("GPL");
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python
from gevent import monkey # isort:skip
monkey.patch_all() # isort:skip
import logging.config
import os
import os.path
import signal
import sys
from dataclasses import dataclass
from datetime import datetime
from http import HTTPStatus
from itertools import chain, count, product, repeat
from time import time
from typing import Callable, Dict, Iterable, Iterator, List, NewType, Optional
import gevent
import gevent.os
import requests
import structlog
from eth_utils import is_checksum_address, to_canonical_address, to_checksum_address
from gevent.greenlet import Greenlet
from gevent.pool import Pool
from gevent.subprocess import DEVNULL, STDOUT, Popen
from greenlet import greenlet
from raiden.network.utils import get_free_port
from raiden.transfer.state import NetworkState
from raiden.utils.formatting import pex
from raiden.utils.nursery import Janitor, Nursery
from raiden.utils.typing import Address, Host, Port, TokenAmount
import asyncio # isort:skip # noqa
from raiden.network.transport.matrix.rtc import aiogevent # isort:skip # noqa
asyncio.set_event_loop_policy(aiogevent.EventLoopPolicy()) # isort:skip # noqa
gevent.spawn(asyncio.get_event_loop().run_forever) # isort:skip # noqa
BaseURL = NewType("BaseURL", str)
Amount = NewType("Amount", int)
URL = NewType("URL", str)
TransferPath = List["RunningNode"]
INITIATOR = 0
TARGET = -1
processors = [
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S.%f"),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
]
structlog.reset_defaults()
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"colorized-formatter": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.dev.ConsoleRenderer(colors=True),
"foreign_pre_chain": processors,
}
},
"handlers": {
"colorized-handler": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "colorized-formatter",
}
},
"loggers": {"": {"handlers": ["colorized-handler"], "propagate": True}},
}
)
structlog.configure(
processors=processors + [structlog.stdlib.ProcessorFormatter.wrap_for_formatter],
wrapper_class=structlog.stdlib.BoundLogger,
logger_factory=structlog.stdlib.LoggerFactory(),
)
log = structlog.get_logger(__name__)
log.setLevel("DEBUG")
NO_ROUTE_ERROR = 409
UNBUFERRED = 0
FIRST_VALID_PAYMENT_ID = 1
WAIT_FOR_SOCKET_TO_BE_AVAILABLE = 60
# A partial transfer plan is a list of transfers which is guaranteed to succeed
# (regardless of the order), however the channels won't be restored to their
# initial state after the plan execution, plans of this type MUST be processed
# with `complete_planner_from_partial_planner` to revert the transfers and
# restore the channel state.
PartialTransferPlan = Iterator[Amount]
# A transfer plan is a list of transfer amounts where every transfer WILL be
# **successfully** executed and the channels restored to their initial state.
TransferPlan = Iterator[Amount]
PartialTransferPlanGenerator = Callable[[Amount], Iterator[PartialTransferPlan]]
TransferPlanGenerator = Callable[[Amount], TransferPlan]
Scheduler = Callable[[List[TransferPath], TransferPlan], Iterator["Transfer"]]
@dataclass
class InitialNodeConfig:
"""The configuration of a node provided by the user, this node is not yet
running.
"""
args: List[str]
base_url: BaseURL
@dataclass
class NodeConfig:
"""Configuration of a node after the address has been recovered, this
contains the expected address of the node.
"""
args: List[str]
interface: Host
address: str
data_dir: str
@dataclass
class RunningNode:
"""A running node, this has a Raiden instance running in the background
in a separate process.
"""
process: Popen
config: NodeConfig
url: URL
starting_balances: Dict[Address, TokenAmount]
@dataclass
class StressTestConfiguration:
port_generator: Iterator[Port]
retry_timeout: int
capacity_lower_bound: Amount
token_address: str
iteration_counter: Iterable[int]
profiler_data_directory: Optional[str]
@dataclass
class StressTestPlan:
# These values can NOT be iterables because they will be consumed multiple
# times.
# List of transfers, these must satisfy the following requirements:
#
# - Every channel in the path must have at LEAST `capacity_lower_bound`.
transfers: List[TransferPath]
# Different concurrency levels used to stress the system.
concurrency: List[int]
# List of planners (functions that return a list of transfers) that satisfy
# the following requirements:
#
# - The plan MAY use UP TO the `capacity_lower_bound`, but no more.
planners: List[TransferPlanGenerator]
# List of schedulers (functions that receive a `TransferPath` and a
# `TransferPlan`), and decide the order in which these should be executed.
schedulers: List[Scheduler]
@dataclass
class Transfer:
path: TransferPath
amount: Amount
def is_ready(base_url: str) -> bool:
try:
result = requests.get(f"{base_url}/api/v1/status").json()
except KeyError:
log.info(f"Server {base_url} returned invalid json data.")
except requests.ConnectionError:
log.info(f"Waiting for the server {base_url} to start.")
except requests.RequestException:
log.exception(f"Request to server {base_url} failed.")
else:
if result["status"] == "ready":
log.info(f"Server {base_url} ready.")
return True
log.info(f"Waiting for server {base_url} to become ready, status={result['status']}.")
return False
def wait_for_status_ready(base_url: str, retry_timeout: int) -> None:
"""Keeps polling for the `/status` endpoint until the status is `ready`."""
while not is_ready(base_url):
gevent.sleep(retry_timeout)
def wait_for_reachable(
transfers: List[TransferPath], token_address: str, retry_timeout: int
) -> None:
""" Wait until the nodes used for the transfers can see each other. """
# Deduplicate the URLs for the channels which need reachability testing
channels_not_reachable = set()
for transfer in transfers:
for payer, payee in zip(transfer, transfer[1:]):
channel_url = f"{payer.url}/api/v1/channels/{token_address}/{payee.config.address}"
channels_not_reachable.add(channel_url)
# Now wait until every reachability constraint is satisfied
while channels_not_reachable:
log.info(f"Waiting for reachability of partner nodes: {channels_not_reachable}")
for url in channels_not_reachable.copy():
response = requests.get(url, headers={"Content-Type": "application/json"})
data = response.json()
# The return data **may** be `None`, this looks like a race
# condition in the Raiden client REST API.
if data and data.get("network_state") == NetworkState.REACHABLE.value:
channels_not_reachable.remove(url)
if channels_not_reachable:
gevent.sleep(retry_timeout)
def start_and_wait_for_server(
nursery: Nursery, port_generator: Iterator[Port], node: NodeConfig, retry_timeout: int
) -> Optional[RunningNode]:
"""Start the Raiden node and waits for the REST API to be available,
returns None if the script is being shutdown.
"""
# redirect the process output for debugging
os.makedirs(os.path.expanduser(node.data_dir), exist_ok=True)
stdout = open(os.path.join(node.data_dir, "stress_test.out"), "a")
port = next(port_generator)
api_url = f"{node.interface}:{port}"
running_url = URL(f"http://{api_url}")
process_args = node.args + ["--api-address", api_url]
process = nursery.exec_under_watch(
process_args, bufsize=UNBUFERRED, stdout=stdout, stderr=STDOUT
)
if process is not None:
wait_for_status_ready(running_url, retry_timeout)
return RunningNode(process, node, running_url, get_balance_for_node(running_url))
return None
def start_and_wait_for_all_servers(
nursery: Nursery,
port_generator: Iterator[Port],
nodes_config: List[NodeConfig],
retry_timeout: int,
) -> Optional[List[RunningNode]]:
"""Starts all nodes under the nursery, returns a list of `RunningNode`s or
None if the script is shuting down.
Important Note:
`None` is not always returned if the script is shutting down! Due to race
conditions it is possible for all processes to be spawned, and only
afterwards the nursery is closed. IOW: At this stage `None` will only be
returned if spawning the process fails (e.g. the binary name is wrong),
however, if the subprocess is spawned and runs for some time, and *then*
crashes, `None` will **not** be returned here (e.g. if the ethereum node is
not available). For the second case, the `stop_event` will be set.
Because of the above, for proper error handling, checking only the return
value is **not** sufficient. The most reliable approach is to execute new
logic in greenlets spawned with `spawn_under_watch` and let errors fall
through.
"""
greenlets = set(
nursery.spawn_under_watch(
start_and_wait_for_server, nursery, port_generator, node, retry_timeout
)
for node in nodes_config
)
all_running_nodes = []
for g in gevent.joinall(greenlets, raise_error=True):
running_node = g.get()
if running_node is None:
return None
all_running_nodes.append(running_node)
return all_running_nodes
def restart_and_wait_for_server(
nursery: Nursery, port_generator: Iterator[Port], node: RunningNode, retry_timeout: int
) -> Optional[RunningNode]:
"""Stop `RunningNode` and start it again under the nursery, returns None if
the script is shuting down.
"""
node.process.send_signal(signal.SIGINT)
# Wait for the process to completely shutdown, this is necessary because
# concurrent usage of the database is not allowed.
exit_code = node.process.result.get()
if exit_code != 0:
raise Exception(f"Node did not shut down cleanly {node!r}")
return start_and_wait_for_server(nursery, port_generator, node.config, retry_timeout)
def restart_network(
nursery: Nursery,
port_generator: Iterator[Port],
running_nodes: List[RunningNode],
retry_timeout: int,
) -> Optional[List[RunningNode]]:
"""Stop all `RunningNode`s and start them again under the nursery, returns
None if the script is shuting down.
"""
greenlets = set(
nursery.spawn_under_watch(
restart_and_wait_for_server, nursery, port_generator, node, retry_timeout
)
for node in running_nodes
)
all_running_nodes = []
for g in gevent.joinall(greenlets, raise_error=True):
running_node = g.get()
if running_node is None:
return None
all_running_nodes.append(running_node)
return all_running_nodes
def transfer_and_assert_successful(
base_url: str, token_address: str, target_address: str, payment_identifier: int, amount: int
) -> None:
# TODO: Add an UUID to the transfer, change Raiden to log the UUID and for
# it to forward the data to the PFS, which also should log the UUID. This
# should make debugging easier.
post_url = f"{base_url}/api/v1/payments/{token_address}/{target_address}"
json = {"amount": amount, "identifier": payment_identifier}
log.debug("Payment request", url=post_url, json=json)
start = time()
response = requests.post(post_url, json=json)
elapsed = time() - start
assert response is not None, "request.post returned None"
is_json = response.headers["Content-Type"] == "application/json"
assert is_json, (response.headers["Content-Type"], response.text)
assert response.status_code == HTTPStatus.OK, response.json()
log.debug("Payment done", url=post_url, json=json, time=elapsed)
def do_fifty_transfer_up_to(capacity_lower_bound: Amount) -> TransferPlan:
"""Generates a plan with 50 transfers of the same value.
>>> len(do_fifty_transfer_up_to(500))
... 50
>>> do_fifty_transfer_up_to(500)
... [10, 10, 10 ..., 10]
"""
qty_of_transfers = 50
amount = Amount(capacity_lower_bound // qty_of_transfers)
for _ in range(qty_of_transfers):
yield amount
def do_transfers(
transfers: List[Transfer],
token_address: str,
identifier_generator: Iterator[int],
pool_size: int = None,
) -> None:
"""Concurrently execute `transfers`.
Note:
To force serial transfers just provide `pool_size=1`.
"""
pool = Pool(size=pool_size)
# The usage of `greenlet` and `Greenlet` is not a mistake. `getcurrent` is
# a `greenlet` interface, whereas `Greenlet` is a `gevent` interface.
#
# Note: Capture the parent thread to propagate the exception, this must not
# be called inside of `propagate_error`.
current: greenlet = gevent.getcurrent()
# This can not use `throw`, `propagate_error` is linked with a
# `FailureSpawnedLink`, which means the code is not executed inside the
# Hub.
def propagate_error(result: Greenlet) -> None:
current.kill(result.exception)
# TODO: This should return a dictionary, were the key is `(from, to)` and
# the amount is the sum of all transfer values, this can then be used to
# assert on the change of capacity from each running node.
for transfer in transfers:
task: Greenlet = pool.spawn(
transfer_and_assert_successful,
base_url=transfer.path[INITIATOR].url,
token_address=token_address,
target_address=transfer.path[TARGET].config.address,
payment_identifier=next(identifier_generator),
amount=transfer.amount,
)
# Failure detection. Without linking the exception this loop would have
# to complete before `pool.join` can be called, since the loop can be
# considerably large (in the tens of thousands) the delay is
# perceptible, linking the exception will break the loop as soon as
# possible, this means the only use of the `join` bellow is to wait for
# all the greenlets to finish before returning.
#
# TODO: Consider abstracting by adding to the nursery a Pool
# implementation. The pool would spawn new greenlets as slots became
# available (just like the gevent's implementation), but it would stop
# if any of the spawned grenlets fails with an exception.
task.link_exception(propagate_error)
pool.join(raise_error=True)
# TODO: Expand `paths_direct_transfers` to work with graphs. Any sequence of
# paths from a graph that preserve the `capacity_lower_bound` will work.
def paths_direct_transfers(running_nodes: List[RunningNode]) -> List[TransferPath]:
"""Given the list of `running_nodes`, where each adjacent pair has a channel open,
return a list of `(from, to)` which will do a direct transfer using each
channel.
"""
forward = [[from_, to_] for from_, to_ in zip(running_nodes[:-1], running_nodes[1:])]
backward = [[to_, from_] for from_, to_ in zip(running_nodes[:-1], running_nodes[1:])]
return forward + backward
# TODO: Expand `paths_for_mediated_transfers` to work with graphs. Any sequence
# of paths from a graph that *do not* overlap will work with the current
# assumptions. Overlapping paths are acceptable, iff the channels that overlap
# have twice the `capacity_lower_bound`
def paths_for_mediated_transfers(running_nodes: List[RunningNode]) -> List[TransferPath]:
"""Given the list of `running_nodes`, where each adjacent pair has a channel open,
return the a list with the pair `(from, to)` which are the furthest apart.
"""
msg = (
"This function needs to be improved to generate all mediator paths for "
"a chain with more than 3 running_nodes"
)
assert len(running_nodes) == 3, msg
return [list(running_nodes)] + [list(reversed(running_nodes))]
def scheduler_preserve_order(paths: List[TransferPath], plan: TransferPlan) -> Iterator[Transfer]:
"""Execute the same plan for each path, in order.
E.g.:
>>> paths = [[a, b], [b, c]]
>>> transfer_plan = [1,1]
>>> scheduler_preserve_order(paths, transfer_plan)
... [Transfer([a, b], amount=1),
... Transfer([a, b], amount=1),
... Transfer([b, c], amount=1),
... Transfer([b, c], amount=1)]
"""
# product works fine with generators
for path, transfer in product(paths, plan):
yield Transfer(path, Amount(transfer))
def run_profiler(
nursery: Nursery, running_nodes: List[RunningNode], profiler_data_directory: str
) -> List[Popen]:
os.makedirs(os.path.expanduser(profiler_data_directory), exist_ok=True)
profiler_processes: List[Popen] = list()
for node in running_nodes:
args = [
"py-spy",
"record",
"--pid",
str(node.process.pid),
"--output",
os.path.join(
profiler_data_directory,
f"{node.config.address}-{datetime.utcnow().isoformat()}.data",
),
]
profiler = Popen(args, stdout=DEVNULL, stderr=DEVNULL)
nursery.exec_under_watch(profiler)
return profiler_processes
def get_balance_for_node(url: URL) -> Dict[Address, TokenAmount]:
response = requests.get(f"{url}/api/v1/channels")
assert response.headers["Content-Type"] == "application/json", response.headers["Content-Type"]
assert response.status_code == HTTPStatus.OK, response.json()
response_data = response.json()
return {channel["partner_address"]: channel["balance"] for channel in response_data}
def wait_for_balance(running_nodes: List[RunningNode]) -> None:
""" Wait until the nodes have `starting_balance`, again
This makes sure that we can run another iteration of the stress test
"""
for node in running_nodes:
balances = get_balance_for_node(node.url)
while any(bal < start_bal for bal, start_bal in zip(balances, node.starting_balances)):
gevent.sleep(0.1)
balances = get_balance_for_node(node.url)
def wait_for_user_input() -> None:
print("All nodes are ready! Press Enter to continue and perform the stress tests.")
gevent.os.tp_read(sys.stdin.fileno(), n=1)
def run_stress_test(
nursery: Nursery, running_nodes: List[RunningNode], config: StressTestConfiguration
) -> None:
identifier_generator = count(start=FIRST_VALID_PAYMENT_ID)
profiler_processes: List[Popen] = list()
# TODO: Add tests with fees. This may require changes to the transfer plan,
# since ATM it depends only in the `capacity_lower_bound` settings.
for iteration in config.iteration_counter:
log.info(f"Starting iteration {iteration}")
# The configuration has to be re-created on every iteration because the
# port numbers change
plan = StressTestPlan(
transfers=paths_for_mediated_transfers(running_nodes),
concurrency=[50],
planners=[do_fifty_transfer_up_to],
schedulers=[scheduler_preserve_order],
)
# TODO: Before running the first plan each node should be queried for
# their channel status. The script should assert the open channels have
# at least `capacity_lower_bound` together.
for concurent_paths, concurrency, transfer_planner, scheduler in zip(
repeat(plan.transfers), plan.concurrency, plan.planners, plan.schedulers
):
log.info(
f"Starting run {concurent_paths}, {concurrency}, {transfer_planner}, {scheduler}"
)
# The plan MUST be executed successfully until exhaustion,
# otherwise the next plan may try to use an amount that is not
# available.
transfer_plan = transfer_planner(config.capacity_lower_bound)
transfers = list(scheduler(concurent_paths, transfer_plan))
if config.profiler_data_directory:
profiler_processes = run_profiler(
nursery, running_nodes, config.profiler_data_directory
)
wait_for_reachable(plan.transfers, config.token_address, config.retry_timeout)
# TODO: `do_transfers` should return the amount of tokens
# transferred with each `(from, to)` pair, and the total amount
# must be lower than the `capacity_lower_bound`.
do_transfers(
transfers=transfers,
token_address=config.token_address,
identifier_generator=identifier_generator,
pool_size=concurrency,
)
wait_for_balance(running_nodes)
# After each `do_transfers` the state of the system must be
# reset, otherwise there is a bug in the planner or Raiden.
restarted_nodes = restart_network(
nursery, config.port_generator, running_nodes, config.retry_timeout
)
if restarted_nodes is None:
return
else:
running_nodes = restarted_nodes
for profiler in profiler_processes:
profiler.send_signal(signal.SIGINT)
def main() -> None:
import argparse
import configparser
import re
NODE_SECTION_RE = re.compile("^node[0-9]+")
parser = argparse.ArgumentParser()
parser.add_argument("--nodes-data-dir", default=os.getcwd())
parser.add_argument("--wait-after-first-sync", default=False, action="store_true")
parser.add_argument("--profiler-data-directory", default=None)
parser.add_argument("--interface", default="127.0.0.1")
parser.add_argument("--iterations", default=5, type=int)
parser.add_argument("config")
args = parser.parse_args()
if args.profiler_data_directory is not None and os.geteuid() != 0:
raise RuntimeError("To enable profiling the script has to be executed with root.")
config = configparser.ConfigParser()
config.read(args.config)
datadir = args.nodes_data_dir
interface = Host(args.interface)
port_generator = get_free_port(5000)
retry_timeout = 1
nodes_config: List[NodeConfig] = list()
token_address = config.defaults()["token-address"]
if not is_checksum_address(token_address):
raise ValueError(f"Invalid token address {token_address}, check it is checksummed.")
defaults = {
"--log-config": "raiden:DEBUG",
"--environment-type": "development",
"--datadir": datadir,
}
for section in config:
if NODE_SECTION_RE.match(section):
node_config = config[section]
address = node_config["address"]
node = defaults.copy()
node.update(
{
"--keystore-path": node_config["keystore-path"],
"--password-file": node_config["password-file"],
"--eth-rpc-endpoint": node_config["eth-rpc-endpoint"],
"--network-id": node_config["network-id"],
"--address": address,
}
)
pathfinding_url = node_config.get("pathfinding-service-address")
if pathfinding_url is not None:
node["--pathfinding-service-address"] = pathfinding_url
raiden_args = [
"raiden",
"--accept-disclaimer",
"--log-json",
"--disable-debug-logfile",
"--flat-fee",
token_address,
"0",
"--proportional-fee",
token_address,
"0",
"--proportional-imbalance-fee",
token_address,
"0",
]
raiden_args.extend(chain.from_iterable(node.items()))
# The REST interface uses checksummed address. Normalize it here.
address = to_checksum_address(address)
nodedir = os.path.join(datadir, f"node_{pex(to_canonical_address(address))}")
nodes_config.append(NodeConfig(raiden_args, interface, address, nodedir))
# TODO: Determine the `capacity_lower_bound` by querying the nodes.
capacity_lower_bound = 1130220
profiler_data_directory = args.profiler_data_directory
iterations = args.iterations
if iterations is None:
iteration_counter = count()
else:
iteration_counter = iter(range(iterations))
# def stop_on_signal(sig=None, _frame=None):
# stop.set()
# gevent.signal(signal.SIGQUIT, stop_on_signal)
# gevent.signal(signal.SIGTERM, stop_on_signal)
# gevent.signal(signal.SIGINT, stop_on_signal)
# TODO: If any of the processes crashes the script should collect and
# bundle the logs.
#
# Cleanup with the Janitor is not strictily necessary for the stress test,
# since once can assume a bug happened and the state of the node is
# inconsistent, however it is nice to have.
with Janitor() as nursery:
nodes_running = start_and_wait_for_all_servers(
nursery, port_generator, nodes_config, retry_timeout
)
if nodes_running is None:
return
if args.wait_after_first_sync:
nursery.spawn_under_watch(wait_for_user_input).get()
test_config = StressTestConfiguration(
port_generator,
retry_timeout,
Amount(capacity_lower_bound),
token_address,
iteration_counter,
profiler_data_directory,
)
nursery.spawn_under_watch(run_stress_test, nursery, nodes_running, test_config)
nursery.wait(timeout=None)
if __name__ == "__main__":
# TODO:
# - The script should quit if the vpn is closed (and therefore the raiden
# process is killed)
# - With the janitor the database is properly closed (sqlite's lock
# goes away), however the filelock's file is not cleared.
main()
| {
"pile_set_name": "Github"
} |
/*
* netlink/fib_lookup/request.h FIB Lookup Request
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation version 2.1
* of the License.
*
* Copyright (c) 2003-2006 Thomas Graf <[email protected]>
*/
#ifndef NETLINK_FIB_LOOKUP_REQUEST_H_
#define NETLINK_FIB_LOOKUP_REQUEST_H_
#include <netlink/netlink.h>
#include <netlink/addr.h>
#ifdef __cplusplus
extern "C" {
#endif
struct flnl_request;
#define REQUEST_CAST(ptr) ((struct flnl_request *) (ptr))
extern struct flnl_request * flnl_request_alloc(void);
extern void flnl_request_set_fwmark(struct flnl_request *,
uint64_t);
extern uint64_t flnl_request_get_fwmark(struct flnl_request *);
extern void flnl_request_set_tos(struct flnl_request *,
int);
extern int flnl_request_get_tos(struct flnl_request *);
extern void flnl_request_set_scope(struct flnl_request *,
int);
extern int flnl_request_get_scope(struct flnl_request *);
extern void flnl_request_set_table(struct flnl_request *,
int);
extern int flnl_request_get_table(struct flnl_request *);
extern int flnl_request_set_addr(struct flnl_request *,
struct nl_addr *);
extern struct nl_addr * flnl_request_get_addr(struct flnl_request *);
extern int flnl_request_cmp(struct flnl_request *,
struct flnl_request *);
#ifdef __cplusplus
}
#endif
#endif
| {
"pile_set_name": "Github"
} |
// license:BSD-3-Clause
// copyright-holders:Curt Coder
/*********************************************************************
formats/ql_dsk.c
Sinclair QL disk image formats
*********************************************************************/
#include <cassert>
#include "formats/ql_dsk.h"
ql_format::ql_format() : wd177x_format(formats)
{
}
const char *ql_format::name() const
{
return "ql";
}
const char *ql_format::description() const
{
return "Sinclair QL disk image";
}
const char *ql_format::extensions() const
{
return "dsk,img";
}
// unverified gaps
const ql_format::format ql_format::formats[] = {
{ // QDOS 800KB
floppy_image::FF_35, floppy_image::SSSD, floppy_image::MFM,
2000, 5, 80, 2, 1024, {}, 1, {}, 80, 22, 24
},
{ // 720KB DSDD
floppy_image::FF_35, floppy_image::SSSD, floppy_image::MFM,
2000, 9, 80, 2, 512, {}, 1, {}, 80, 22, 24
},
{ // 1.44MB DSHD
floppy_image::FF_35, floppy_image::SSSD, floppy_image::MFM,
2000, 18, 80, 2, 512, {}, 1, {}, 80, 22, 24
},
{ // 2.88MB DSED
floppy_image::FF_35, floppy_image::SSSD, floppy_image::MFM,
2000, 40, 80, 2, 512, {}, 1, {}, 80, 41, 24
},
{}
};
const floppy_format_type FLOPPY_QL_FORMAT = &floppy_image_format_creator<ql_format>;
| {
"pile_set_name": "Github"
} |
--- com.fasterxml.jackson.core.read.NonStandardUnquotedNamesTest::testUnquotedIssue510
java.lang.ArrayIndexOutOfBoundsException: 256
at com.fasterxml.jackson.core.json.ReaderBasedJsonParser._handleOddName2(ReaderBasedJsonParser.java:1964)
at com.fasterxml.jackson.core.json.ReaderBasedJsonParser._handleOddName(ReaderBasedJsonParser.java:1807)
at com.fasterxml.jackson.core.json.ReaderBasedJsonParser.nextToken(ReaderBasedJsonParser.java:692)
at com.fasterxml.jackson.core.read.NonStandardUnquotedNamesTest.testUnquotedIssue510(NonStandardUnquotedNamesTest.java:49)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at junit.framework.TestCase.runTest(TestCase.java:176)
at junit.framework.TestCase.runBare(TestCase.java:141)
at junit.framework.TestResult$1.protect(TestResult.java:122)
at junit.framework.TestResult.runProtected(TestResult.java:142)
at junit.framework.TestResult.run(TestResult.java:125)
at junit.framework.TestCase.run(TestCase.java:129)
at junit.framework.TestSuite.runTest(TestSuite.java:255)
at junit.framework.TestSuite.run(TestSuite.java:250)
at org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:520)
at org.apache.tools.ant.taskdefs.optional.junit.JUnitTask.executeInVM(JUnitTask.java:1484)
at org.apache.tools.ant.taskdefs.optional.junit.JUnitTask.execute(JUnitTask.java:872)
at org.apache.tools.ant.taskdefs.optional.junit.JUnitTask.executeOrQueue(JUnitTask.java:1972)
at org.apache.tools.ant.taskdefs.optional.junit.JUnitTask.execute1(JUnitTask.java:824)
at org.apache.tools.ant.taskdefs.optional.junit.JUnitTask.execute(JUnitTask.java:2277)
at org.apache.tools.ant.UnknownElement.execute(UnknownElement.java:291)
at sun.reflect.GeneratedMethodAccessor4.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.tools.ant.dispatch.DispatchUtils.execute(DispatchUtils.java:106)
at org.apache.tools.ant.Task.perform(Task.java:348)
at org.apache.tools.ant.Target.execute(Target.java:392)
at org.apache.tools.ant.Target.performTasks(Target.java:413)
at org.apache.tools.ant.Project.executeSortedTargets(Project.java:1399)
at org.apache.tools.ant.Project.executeTarget(Project.java:1368)
at org.apache.tools.ant.helper.DefaultExecutor.executeTargets(DefaultExecutor.java:41)
at org.apache.tools.ant.Project.executeTargets(Project.java:1251)
at org.apache.tools.ant.Main.runBuild(Main.java:811)
at org.apache.tools.ant.Main.startAnt(Main.java:217)
at org.apache.tools.ant.launch.Launcher.run(Launcher.java:280)
at org.apache.tools.ant.launch.Launcher.main(Launcher.java:109)
| {
"pile_set_name": "Github"
} |
approvers:
- pmorie
- saad-ali
- thockin
- matchstick
- SandeepPissay
- divyenpatel
- BaluDontu
- abrarshivani
reviewers:
- abithap
- abrarshivani
- saad-ali
- justinsb
- jsafrane
- rootfs
- jingxu97
- msau42
- SandeepPissay
- divyenpatel
- BaluDontu
| {
"pile_set_name": "Github"
} |
/**
This example demonstrates some possibilities of registerWebInterface in conjunction with AJAX requests.
The tableview.dt uses JavaScript for just replacing the table if filter-data changes if no JavaScript is available
the whole site gets reloaded.
*/
module app;
import vibe.appmain;
import vibe.http.router;
import vibe.http.server;
import vibe.web.web;
import std.array;
import std.algorithm;
import std.conv;
import std.stdio;
import std.string;
struct Address {
string street;
int door;
int zip_code;
}
/**
This class serves its users array as html table. Also have a look at views/createTable.dt.
It's intended to be used by AJAX queries to provide HTML snippets to use as replacements for
DOM parts.
*/
class DataProvider {
enum Fields {
nameid,
surnameid,
addressid
}
private {
string[][] users = [
["Tina", "Muster", "Wassergasse 12"],
["Martina", "Maier", "Broadway 6"],
["John", "Foo", "Church Street 7"]
];
}
// GET /data_provider/data
void getData()
{
auto table = users;
render!("createTable.dt", table)();
}
// GET /data_provider/data_filtered
/**
Overload that takes an enumeration for indexing the users array in a secure way and a value to filter on.
Method code does not have to care about validating user data, no need to check that a field is actually present, no manual conversion, ...
Say that this is not ingenious, I love D.
*/
void getDataFiltered(Fields field, string value)
{
auto table = users.filter!((a) => value.length==0 || a[field]==value)().array();
render!("createTable.dt", table)();
}
// POST /data_provider/add_user
/// Add a new user to the array, using this method from JavaScript is left as an exercise.
void postAddUser(string name, string surname, string address)
{
users ~= [name, surname, address];
}
// POST /data_provider/add_user_structured
/// Add user with structured address
void postAddUserStructured(string name, string surname, Address address)
{
users ~= [name, surname, address.street~" "~to!string(address.door)~"\n"~to!string(address.zip_code)];
}
}
class App {
private {
DataProvider m_provider;
}
this()
{
m_provider = new DataProvider;
}
// the methods of DataProvider will be available at /data_provider/*
@property DataProvider dataProvider() { return m_provider; }
// GET /
void get()
{
redirect("/table");
}
// GET /table
void getTable()
{
auto table = m_provider.users;
render!("tableview.dt", table)();
}
// GET /table?field=...&value=...
void getTable(DataProvider.Fields field, string value)
{
auto table = m_provider.users.filter!((a) => value.length==0 || a[field]==value)().array();
render!("tableview.dt", table, field, value);
}
// POST /add_user
void postAddUser(string name, string surname, string address)
{
dataProvider.postAddUser(name, surname, address);
redirect("/");
}
// POST /add_user_structured
void postAddUserStructured(string name, string surname, Address address)
{
dataProvider.postAddUserStructured(name, surname, address);
redirect("/");
}
// static methods are ignored.
static void getSomethingStatic()
{
return;
}
}
shared static this()
{
auto router = new URLRouter;
router.registerWebInterface(new App);
auto settings = new HTTPServerSettings;
settings.port = 8080;
settings.bindAddresses = ["::1", "127.0.0.1"];
listenHTTP(settings, router);
}
| {
"pile_set_name": "Github"
} |
#include <QtScript/QScriptExtensionPlugin>
#include <QtScript/QScriptValue>
#include <QtScript/QScriptEngine>
void qtscript_initialize_com_trolltech_qt_widgets_bindings(QScriptValue &);
class com_trolltech_qt_widgets_ScriptPlugin : public QScriptExtensionPlugin
{
Q_OBJECT
Q_PLUGIN_METADATA(IID "com_trolltech_qt_widgets_ScriptPlugin")
public:
QStringList keys() const;
void initialize(const QString &key, QScriptEngine *engine);
};
| {
"pile_set_name": "Github"
} |
#
# WL#12261 Control (enforce and disable) table encryption
#
# Pre-define user u1, which is used in different tests below.
CREATE USER u1@localhost;
GRANT ALL ON db1.* TO u1@localhost;
GRANT CREATE TABLESPACE, PROCESS, SYSTEM_VARIABLES_ADMIN ON *.* TO u1@localhost;
SET GLOBAL debug= '+d,skip_table_encryption_admin_check_for_set';
# The test cases run ALTER TABLESPACE to check its encryption mode.
# The importance of this test is to check the way ALTER TABLESPACE
# updates the ENCRYPTION clause of tables in it.
# We run this command in various configuration as,
#
# - Setting table_encryption_privilege_check to true/false.
# - Setting per database default encryption to true/false.
# - With and without user holding TABLE_ENCRYPTION_ADMIN privilege.
# - Check for warnings generated.
#
`````````````````````````````````````````````````````````
# Unencrypted TABLESPACE to Unencrypted TABLESPACE (Nop)
# [ALTER TABLESPACE] Case 1 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=false;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=false;
ALTER TABLESPACE ts1 ENCRYPTION='n';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 2 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=false;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='n';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
`````````````````````````````````````````````````````````
# Encrypted TABLESPACE to Encrypted TABLESPACE (Nop)
# [ALTER TABLESPACE] Case 3 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='y';
CREATE DATABASE db1 DEFAULT ENCRYPTION='y';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=false;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 4 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='y';
CREATE DATABASE db1 DEFAULT ENCRYPTION='y';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 5 )
`````````````````````````````````````````````````````````
# Grant user with TABLE_ENCRYPTION_ADMIN
GRANT TABLE_ENCRYPTION_ADMIN ON *.* TO u1@localhost;
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='y';
CREATE DATABASE db1 DEFAULT ENCRYPTION='y';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
# Revoke TABLE_ENCRYPTION_ADMIN from user
REVOKE TABLE_ENCRYPTION_ADMIN ON *.* FROM u1@localhost;
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
`````````````````````````````````````````````````````````
# Unencrypted TABLESPACE to encrypted TABLESPACE
# with database encryption default 'n'
# [ALTER TABLESPACE] Case 6 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=false;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
`````````````````````````````````````````````````````````
# Reject creating encrypted table in database with default encryption='n'
# [ALTER TABLESPACE] Case 7 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='y';
ERROR HY000: This tablespace can't be encrypted, because one of table's schema has default encryption OFF and user doesn't have enough privilege.
SHOW WARNINGS;
Level Code Message
Error 3829 This tablespace can't be encrypted, because one of table's schema has default encryption OFF and user doesn't have enough privilege.
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 8 )
`````````````````````````````````````````````````````````
# Grant user with TABLE_ENCRYPTION_ADMIN
GRANT TABLE_ENCRYPTION_ADMIN ON *.* TO u1@localhost;
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
# Revoke TABLE_ENCRYPTION_ADMIN from user
REVOKE TABLE_ENCRYPTION_ADMIN ON *.* FROM u1@localhost;
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# With some tables without ENCRYPTION clause;
# [ALTER TABLESPACE] Case 9 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1;
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=false;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
`````````````````````````````````````````````````````````
# Reject creating encrypted table in database with default encryption='n'
# [ALTER TABLESPACE] Case 10 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1;
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='y';
ERROR HY000: This tablespace can't be encrypted, because one of table's schema has default encryption OFF and user doesn't have enough privilege.
SHOW WARNINGS;
Level Code Message
Error 3829 This tablespace can't be encrypted, because one of table's schema has default encryption OFF and user doesn't have enough privilege.
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 11 )
`````````````````````````````````````````````````````````
# Grant user with TABLE_ENCRYPTION_ADMIN
GRANT TABLE_ENCRYPTION_ADMIN ON *.* TO u1@localhost;
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1;
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
# Revoke TABLE_ENCRYPTION_ADMIN from user
REVOKE TABLE_ENCRYPTION_ADMIN ON *.* FROM u1@localhost;
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# With all tables without ENCRYPTION clause;
# [ALTER TABLESPACE] Case 12 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1;
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1;
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=false;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
`````````````````````````````````````````````````````````
# Request to create encrypted tablespace with default_table_encryption='n'
# [ALTER TABLESPACE] Case 13 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1;
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1;
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='y';
ERROR HY000: This tablespace can't be encrypted, because one of table's schema has default encryption OFF and user doesn't have enough privilege.
SHOW WARNINGS;
Level Code Message
Error 3829 This tablespace can't be encrypted, because one of table's schema has default encryption OFF and user doesn't have enough privilege.
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 14 )
`````````````````````````````````````````````````````````
# Grant user with TABLE_ENCRYPTION_ADMIN
GRANT TABLE_ENCRYPTION_ADMIN ON *.* TO u1@localhost;
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1;
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1;
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
# Revoke TABLE_ENCRYPTION_ADMIN from user
REVOKE TABLE_ENCRYPTION_ADMIN ON *.* FROM u1@localhost;
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
`````````````````````````````````````````````````````````
# Unencrypted TABLESPACE to encrypted TABLESPACE
# with database encryption 'y'
# [ALTER TABLESPACE] Case 15 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='y';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
Warnings:
Warning 3824 Creating an unencrypted table in a database with default encryption enabled.
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
Warnings:
Warning 3824 Creating an unencrypted table in a database with default encryption enabled.
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='n' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='n' */
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=false;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 16 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='y';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
Warnings:
Warning 3824 Creating an unencrypted table in a database with default encryption enabled.
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
Warnings:
Warning 3824 Creating an unencrypted table in a database with default encryption enabled.
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='n' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='n' */
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 17 )
`````````````````````````````````````````````````````````
# Grant user with TABLE_ENCRYPTION_ADMIN
GRANT TABLE_ENCRYPTION_ADMIN ON *.* TO u1@localhost;
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='n';
CREATE DATABASE db1 DEFAULT ENCRYPTION='y';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
Warnings:
Warning 3824 Creating an unencrypted table in a database with default encryption enabled.
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='n';
Warnings:
Warning 3824 Creating an unencrypted table in a database with default encryption enabled.
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='n' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='n' */
SET SESSION default_table_encryption=true;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='y';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='Y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
# Revoke TABLE_ENCRYPTION_ADMIN from user
REVOKE TABLE_ENCRYPTION_ADMIN ON *.* FROM u1@localhost;
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# Case 17.1 With some tables without ENCRYPTION clause;
# CREATE table using unencrypted tablespace without
# ENCRYPTION clause would inherit ENCRYPTION from database.
# This makes CREATE TABLE fail because the ENCRYPTION clause
# and the tablespace encryption type mismatches. The test
# encryption.create_table does test this.
# Case 17.2 With all tables without ENCRYPTION clause;
# Behavior would be same as described in Case 17.1
`````````````````````````````````````````````````````````
# Encrypted TABLESPACE to unencrypted TABLESPACE
# with database encryption 'n'
# [ALTER TABLESPACE] Case 18 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='y';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET SESSION default_table_encryption=false;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=false;
ALTER TABLESPACE ts1 ENCRYPTION='n';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 19 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='y';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET SESSION default_table_encryption=false;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='n';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 20 )
`````````````````````````````````````````````````````````
# Grant user with TABLE_ENCRYPTION_ADMIN
GRANT TABLE_ENCRYPTION_ADMIN ON *.* TO u1@localhost;
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='y';
CREATE DATABASE db1 DEFAULT ENCRYPTION='n';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET SESSION default_table_encryption=false;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='n';
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
# Revoke TABLE_ENCRYPTION_ADMIN from user
REVOKE TABLE_ENCRYPTION_ADMIN ON *.* FROM u1@localhost;
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
`````````````````````````````````````````````````````````
# Encrypted TABLESPACE to unencrypted TABLESPACE
# with database encryption 'y'
# [ALTER TABLESPACE] Case 21 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='y';
CREATE DATABASE db1 DEFAULT ENCRYPTION='y';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET SESSION default_table_encryption=false;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=false;
ALTER TABLESPACE ts1 ENCRYPTION='n';
Warnings:
Warning 3824 Creating an unencrypted table in a database with default encryption enabled.
SHOW WARNINGS;
Level Code Message
Warning 3824 Creating an unencrypted table in a database with default encryption enabled.
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='N' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='N' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
`````````````````````````````````````````````````````````
# Request to create unencrypted tablespace with default_table_encryption='y'
# [ALTER TABLESPACE] Case 22 )
`````````````````````````````````````````````````````````
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='y';
CREATE DATABASE db1 DEFAULT ENCRYPTION='y';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET SESSION default_table_encryption=false;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='n';
ERROR HY000: This tablespace can't be decrypted, because one of table's schema has default encryption ON and user doesn't have enough privilege.
SHOW WARNINGS;
Level Code Message
Error 3830 This tablespace can't be decrypted, because one of table's schema has default encryption ON and user doesn't have enough privilege.
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# [ALTER TABLESPACE] Case 23 )
`````````````````````````````````````````````````````````
# Grant user with TABLE_ENCRYPTION_ADMIN
GRANT TABLE_ENCRYPTION_ADMIN ON *.* TO u1@localhost;
# Create required schema to run ALTER TABLESPACE.
CREATE TABLESPACE ts1 ADD DATAFILE 'df_u.ibd' ENCRYPTION='y';
CREATE DATABASE db1 DEFAULT ENCRYPTION='y';
CREATE TABLE db1.t1 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
CREATE TABLE db1.t2 (f1 int) TABLESPACE=ts1 ENCRYPTION='y';
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='y' */
SET SESSION default_table_encryption=false;
# Run ALTER TABLESPACE
SET GLOBAL table_encryption_privilege_check=true;
ALTER TABLESPACE ts1 ENCRYPTION='n';
Warnings:
Warning 3824 Creating an unencrypted table in a database with default encryption enabled.
SHOW WARNINGS;
Level Code Message
Warning 3824 Creating an unencrypted table in a database with default encryption enabled.
SHOW CREATE TABLE db1.t1;
Table Create Table
t1 CREATE TABLE `t1` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='N' */
SHOW CREATE TABLE db1.t2;
Table Create Table
t2 CREATE TABLE `t2` (
`f1` int DEFAULT NULL
) /*!50100 TABLESPACE `ts1` */ ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci /*!80016 ENCRYPTION='N' */
SET GLOBAL table_encryption_privilege_check=false;
SET SESSION default_table_encryption=false;
# clean up
# Revoke TABLE_ENCRYPTION_ADMIN from user
REVOKE TABLE_ENCRYPTION_ADMIN ON *.* FROM u1@localhost;
DROP DATABASE db1;
DROP TABLESPACE ts1;
#
# Clean up.
DROP USER u1@localhost;
SET GLOBAL debug= '-d,skip_table_encryption_admin_check_for_set';
| {
"pile_set_name": "Github"
} |
CMAKE_INSTALL_NAME_DIR
----------------------
macOS directory name for installed targets.
``CMAKE_INSTALL_NAME_DIR`` is used to initialize the
:prop_tgt:`INSTALL_NAME_DIR` property on all targets. See that target
property for more information.
| {
"pile_set_name": "Github"
} |
/*
* Tencent is pleased to support the open source community by making Angel available.
*
* Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* https://opensource.org/licenses/Apache-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*
*/
package com.tencent.angel.ml.psf.columns;
import com.tencent.angel.PartitionKey;
import com.tencent.angel.ml.math2.utils.ArrayCopy;
import com.tencent.angel.ml.math2.vector.IntIntVector;
import com.tencent.angel.ml.math2.vector.IntLongVector;
import com.tencent.angel.ml.math2.vector.Vector;
import com.tencent.angel.ml.matrix.psf.get.base.GetParam;
import com.tencent.angel.ml.matrix.psf.get.base.PartitionGetParam;
import com.tencent.angel.ps.server.data.request.InitFunc;
import com.tencent.angel.psagent.PSAgentContext;
import java.util.ArrayList;
import java.util.List;
public class GetColsParam extends GetParam {
int[] rows;
long[] cols;
InitFunc func;
public GetColsParam(int matId, int[] rows, long[] cols, InitFunc func) {
super(matId);
this.rows = rows;
this.cols = cols;
this.func = func;
}
public GetColsParam(int matId, int[] rows, long[] cols) {
this(matId, rows, cols, null);
}
public GetColsParam(int matId, int[] rows, Vector cols, InitFunc func) {
this(matId, rows, getCols(cols), func);
}
public GetColsParam(int matId, int[] rows, Vector cols) {
this(matId, rows, getCols(cols), null);
}
// TODO: optimize int key indices
static long [] getCols(Vector colVec) {
if (colVec instanceof IntLongVector) {
return ((IntLongVector) colVec).getStorage().getValues();
} else {
int[] values = ((IntIntVector) colVec).getStorage().getValues();
long [] cols = new long[values.length];
ArrayCopy.copy(values, cols);
return cols;
}
}
@Override public List<PartitionGetParam> split() {
List<PartitionKey> pkeys = PSAgentContext.get().getMatrixMetaManager().getPartitions(matrixId);
List<PartitionGetParam> params = new ArrayList<>();
int start = 0, end = 0;
for (PartitionKey pkey : pkeys) {
long startCol = pkey.getStartCol();
long endCol = pkey.getEndCol();
if (start < cols.length && cols[start] >= startCol) {
while (end < cols.length && cols[end] < endCol)
end++;
long[] part = new long[end - start];
System.arraycopy(cols, start, part, 0, end - start);
params.add(new PartitionGetColsParam(matrixId, pkey, rows, part, func));
start = end;
}
}
return params;
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="14.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProjectGuid>{09B47150-EEB5-4416-8D9A-7258CB0C717B}</ProjectGuid>
<OutputType>Exe</OutputType>
<AppDesignerFolder>Properties</AppDesignerFolder>
<RootNamespace>XgbFeatureInteractions</RootNamespace>
<AssemblyName>XgbFeatureInteractions</AssemblyName>
<TargetFrameworkVersion>v4.5.2</TargetFrameworkVersion>
<FileAlignment>512</FileAlignment>
<AutoGenerateBindingRedirects>true</AutoGenerateBindingRedirects>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>bin\Debug\</OutputPath>
<DefineConstants>DEBUG;TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<PlatformTarget>x64</PlatformTarget>
<DebugType>pdbonly</DebugType>
<Optimize>true</Optimize>
<OutputPath>bin\Release\</OutputPath>
<DefineConstants>TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<ItemGroup>
<Reference Include="EPPlus, Version=4.0.4.0, Culture=neutral, PublicKeyToken=ea159fdaa78159a1, processorArchitecture=MSIL">
<HintPath>..\packages\EPPlus.4.0.4\lib\net20\EPPlus.dll</HintPath>
<Private>True</Private>
</Reference>
<Reference Include="NGenerics, Version=1.4.1.0, Culture=neutral, PublicKeyToken=e4b41be133ea7faf, processorArchitecture=MSIL">
<HintPath>..\packages\NGenerics.1.4.1.0\lib\net35\NGenerics.dll</HintPath>
<Private>True</Private>
</Reference>
<Reference Include="System" />
<Reference Include="System.Core" />
<Reference Include="System.Xml.Linq" />
<Reference Include="System.Data.DataSetExtensions" />
<Reference Include="Microsoft.CSharp" />
<Reference Include="System.Data" />
<Reference Include="System.Net.Http" />
<Reference Include="System.Xml" />
</ItemGroup>
<ItemGroup>
<Compile Include="FeatureInteraction.cs" />
<Compile Include="FeatureInteractions.cs" />
<Compile Include="FIScoreComparer.cs" />
<Compile Include="GlobalSettings.cs" />
<Compile Include="Program.cs" />
<Compile Include="Properties\AssemblyInfo.cs" />
<Compile Include="Properties\Settings.Designer.cs">
<AutoGen>True</AutoGen>
<DesignTimeSharedInput>True</DesignTimeSharedInput>
<DependentUpon>Settings.settings</DependentUpon>
</Compile>
<Compile Include="GlobalStats.cs" />
<Compile Include="SplitValueHistogram.cs" />
<Compile Include="XgbModel.cs" />
<Compile Include="XgbModelParser.cs" />
<Compile Include="XgbTree.cs" />
<Compile Include="XgbTreeNode.cs" />
</ItemGroup>
<ItemGroup>
<None Include="App.config" />
<None Include="packages.config" />
<None Include="Properties\Settings.settings">
<Generator>SettingsSingleFileGenerator</Generator>
<LastGenOutput>Settings.Designer.cs</LastGenOutput>
</None>
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
<PropertyGroup>
<PreBuildEvent>rd /S /Q md $(TargetDir)lib</PreBuildEvent>
</PropertyGroup>
<PropertyGroup>
<PostBuildEvent>md $(TargetDir)lib
move /Y "$(TargetDir)*.dll" "$(TargetDir)lib"</PostBuildEvent>
</PropertyGroup>
<!-- To modify your build process, add your task inside one of the targets below and uncomment it.
Other similar extension points exist, see Microsoft.Common.targets.
<Target Name="BeforeBuild">
</Target>
<Target Name="AfterBuild">
</Target>
-->
</Project> | {
"pile_set_name": "Github"
} |
{
"images" : [
{
"idiom" : "universal",
"scale" : "1x"
},
{
"idiom" : "universal",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"idiom" : "universal",
"scale" : "3x"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
} | {
"pile_set_name": "Github"
} |
#!/usr/bin/env bash
set -e
if [ $# -ne 1 ]; then
echo >&2 "Usage: $0 PATH"
echo >&2 "Show the primary and secondary maintainers for a given path"
exit 1
fi
set -e
DEST=$1
DESTFILE=""
if [ ! -d $DEST ]; then
DESTFILE=$(basename $DEST)
DEST=$(dirname $DEST)
fi
MAINTAINERS=()
cd $DEST
while true; do
if [ -e ./MAINTAINERS ]; then
{
while read line; do
re='^([^:]*): *(.*)$'
file=$(echo $line | sed -E -n "s/$re/\1/p")
if [ ! -z "$file" ]; then
if [ "$file" = "$DESTFILE" ]; then
echo "Override: $line"
maintainer=$(echo $line | sed -E -n "s/$re/\2/p")
MAINTAINERS=("$maintainer" "${MAINTAINERS[@]}")
fi
else
MAINTAINERS+=("$line");
fi
done;
} < MAINTAINERS
break
fi
if [ -d .git ]; then
break
fi
if [ "$(pwd)" = "/" ]; then
break
fi
cd ..
done
PRIMARY="${MAINTAINERS[0]}"
PRIMARY_FIRSTNAME=$(echo $PRIMARY | cut -d' ' -f1)
LGTM_COUNT=${#MAINTAINERS[@]}
LGTM_COUNT=$((LGTM_COUNT%2 +1))
firstname() {
echo $1 | cut -d' ' -f1
}
echo "A pull request in $1 will need $LGTM_COUNT LGTM's to be merged."
echo "--- $PRIMARY is the PRIMARY MAINTAINER of $1."
for SECONDARY in "${MAINTAINERS[@]:1}"; do
echo "--- $SECONDARY"
done
| {
"pile_set_name": "Github"
} |
// Copyright (C) 2011 Davis E. King ([email protected])
// License: Boost Software License See LICENSE.txt for the full license.
#undef DLIB_SQLiTE_TOOLS_ABSTRACT_H_
#ifdef DLIB_SQLiTE_TOOLS_ABSTRACT_H_
#include "sqlite_abstract.h"
// ----------------------------------------------------------------------------------------
namespace dlib
{
class transaction : noncopyable
{
/*!
WHAT THIS OBJECT REPRESENTS
This object is a tool for creating exception safe
database transactions.
!*/
public:
transaction (
database& db
);
/*!
ensures
- Begins a database transaction which will be rolled back
if commit() isn't called eventually.
- In particular, performs: db.exec("begin transaction");
!*/
void commit (
);
/*!
ensures
- if (commit() hasn't already been called) then
- Commits all changes made during this database transaction.
- In particular, performs: db.exec("commit");
- else
- does nothing
!*/
~transaction(
);
/*!
ensures
- if (commit() was never called) then
- rolls back any changes made to the database during this transaction.
- In particular, performs: db.exec("rollback");
- else
- does nothing
!*/
};
// ----------------------------------------------------------------------------------------
template <
typename T
>
void query_object (
database& db,
const std::string& query,
T& item
);
/*!
ensures
- executes the given SQL query against db. If the query results in a
single row and column being returned then the data in the column is
interpreted as a binary BLOB and deserialized into item.
throws
- sqlite_error or serialization_error if an error occurs which prevents
this operation from succeeding.
!*/
// ----------------------------------------------------------------------------------------
std::string query_text (
database& db,
const std::string& query
);
/*!
ensures
- executes the given SQL query against db. If the query results in a
single row and column being returned then the data in the column is
converted to text and returned.
throws
- sqlite_error if an error occurs which prevents this operation from
succeeding.
!*/
// ----------------------------------------------------------------------------------------
double query_double (
database& db,
const std::string& query
);
/*!
ensures
- executes the given SQL query against db. If the query results in a
single row and column being returned then the data in the column is
converted to a double and returned.
throws
- sqlite_error if an error occurs which prevents this operation from
succeeding.
!*/
// ----------------------------------------------------------------------------------------
int query_int (
database& db,
const std::string& query
);
/*!
ensures
- executes the given SQL query against db. If the query results in a
single row and column being returned then the data in the column is
converted to an int and returned.
throws
- sqlite_error if an error occurs which prevents this operation from
succeeding.
!*/
// ----------------------------------------------------------------------------------------
int64 query_int64 (
database& db,
const std::string& query
);
/*!
ensures
- executes the given SQL query against db. If the query results in a
single row and column being returned then the data in the column is
converted to an int64 and returned.
throws
- sqlite_error if an error occurs which prevents this operation from
succeeding.
!*/
// ----------------------------------------------------------------------------------------
const std::vector<char> query_blob (
database& db,
const std::string& query
);
/*!
ensures
- executes the given SQL query against db. If the query results in a
single row and column being returned then the data in the column is
returned as a binary BLOB.
throws
- sqlite_error if an error occurs which prevents this operation from
succeeding.
!*/
// ----------------------------------------------------------------------------------------
}
#endif // DLIB_SQLiTE_TOOLS_H_
| {
"pile_set_name": "Github"
} |
=pod
=head1 NAME
gendsa - generate a DSA private key from a set of parameters
=head1 SYNOPSIS
B<openssl> B<gendsa>
[B<-out filename>]
[B<-des>]
[B<-des3>]
[B<-idea>]
[B<-rand file(s)>]
[B<-engine id>]
[B<paramfile>]
=head1 DESCRIPTION
The B<gendsa> command generates a DSA private key from a DSA parameter file
(which will be typically generated by the B<openssl dsaparam> command).
=head1 OPTIONS
=over 4
=item B<-des|-des3|-idea>
These options encrypt the private key with the DES, triple DES, or the
IDEA ciphers respectively before outputting it. A pass phrase is prompted for.
If none of these options is specified no encryption is used.
=item B<-rand file(s)>
a file or files containing random data used to seed the random number
generator, or an EGD socket (see L<RAND_egd(3)|RAND_egd(3)>).
Multiple files can be specified separated by a OS-dependent character.
The separator is B<;> for MS-Windows, B<,> for OpenVMS, and B<:> for
all others.
=item B<-engine id>
specifying an engine (by it's unique B<id> string) will cause B<req>
to attempt to obtain a functional reference to the specified engine,
thus initialising it if needed. The engine will then be set as the default
for all available algorithms.
=item B<paramfile>
This option specifies the DSA parameter file to use. The parameters in this
file determine the size of the private key. DSA parameters can be generated
and examined using the B<openssl dsaparam> command.
=back
=head1 NOTES
DSA key generation is little more than random number generation so it is
much quicker that RSA key generation for example.
=head1 SEE ALSO
L<dsaparam(1)|dsaparam(1)>, L<dsa(1)|dsa(1)>, L<genrsa(1)|genrsa(1)>,
L<rsa(1)|rsa(1)>
=cut
| {
"pile_set_name": "Github"
} |
"Filed out from Dolphin Smalltalk 7"!
Magnitude subclass: #TimeStamp
instanceVariableNames: 'date time'
classVariableNames: ''
poolDictionaries: ''
classInstanceVariableNames: ''!
TimeStamp guid: (GUID fromString: '{87b4c656-026e-11d3-9fd7-00a0cc3e4a32}')!
TimeStamp comment: '`TimeStamp` represents a `Time` of day on a particular `Date` in an unspecified time zone.
`TimeStamp` is present for backwards compatibility only. For most purposes the ANSI compliant and time zone offset aware `DateAndTime` class should be preferred.'!
!TimeStamp categoriesForClass!Kernel-Chronology! !
!TimeStamp methodsFor!
< aTimeStamp
"Answer whether the receiver precedes the argument, aTimeStamp."
^self date < aTimeStamp date
or: [self date = aTimeStamp date and: [self time < aTimeStamp time]]!
= aTimeStamp
"Answer whether the receiver represents the same time and date as the argument."
^self species == aTimeStamp species
and: [self date = aTimeStamp date and: [self time = aTimeStamp time]]!
addSeconds: anInteger
"Answer a new TimeStamp, anInteger seconds after the receiver."
^self class fromSeconds: self asSeconds + anInteger!
asDateAndTime
^DateAndTime fromDate: date time: time!
asMilliseconds
"Answer the number of milliseconds between January 1, 1901, and the time and date
represented by the receiver (this will likely be a LargeInteger). Can be used to perform
arithmetic with Dates and Times."
^self date asMilliseconds + self time asMilliseconds!
asParameter
"Answer the receiver in external system representation for passing to an external function call."
| timeStampSt timeSt |
timeStampSt := self date asParameter.
timeSt := self time asParameter.
timeStampSt
wHour: timeSt wHour;
wMinute: timeSt wMinute;
wSecond: timeSt wSecond.
^timeStampSt!
asSeconds
"Answer the number of seconds between January 1, 1901, and the time and date
represented by the receiver (this will likely be a LargeInteger). Can be used to perform
arithmetic with Dates and Times."
^self date asSeconds + self time asSeconds!
asTimeStamp
"Answer the receiver as a <TimeStamp>."
^self!
date
"Answer the <Date> of the receiver."
^date
!
dayOfMonth
"Answer an <integer> between 1 and 31, inclusive, representing the day of the month of the date of the receiver."
^date dayOfMonth!
dayOfWeek
^date dayOfWeek!
hash
"Answer the SmallInteger hash value for the receiver."
^(self date hash bitShift: 2) bitXor: self time hash!
hour
"Answer an <integer> between 0 and 23, inclusive, representing the hour of the day on the 24-hour clock of time of the receiver."
^time hour!
hour12
"Answer an <integer> between 1 and 12, inclusive, representing the hour of the day on the 12-hour clock of time of the receiver."
^time hour12!
hour24
"Answer an <integer> between 0 and 23, inclusive, representing the hour of the day on the 24-hour clock of time of the receiver."
^time hour24!
minute
"Answer an <integer> between 0 and 59, inclusive, representing the minute of the hour of the time of the receiver."
^time minute!
month
"Answer the one-based <integer> index of the month represented by the receiver; 1 for January, 2 for February, and so on."
^date month!
printOn: aStream
"Append a short textual description of the receiver to aStream."
"Maintain the format that Dolphin has historically used for displaying TimeStamps because Ian's Chunk Browser relies on this to identify image saves, e.g. 11:06:39, 03 April 2002"
Locale smalltalk printDateTime: self on: aStream format: 'HH:mm:ss, dd MMMM yyyy'!
printOn: aStream format: aString
"Append a short textual description of the receiver to the <puttableStream> argument, aStream. The format is defined by the <readableString> argument, aString, the format characters of which are as described Locale>>dateFormat and Locale>>timeFormat methods. If the format argument is nil then the receiver is printed to the stream in the currently configured default date and time formats of the host OS."
Locale default printDateTime: self on: aStream format: aString
!
printStringFormat: aString
"Answer a short textual description of the receiver. The format is defined by the <readableString> argument, aString, the format characters of which are as described in the comments of the Locale>>#dateFormat and Locale>>#timeFormat methods. Settings from the default locale are used."
| stream |
stream := String writeStream: aString size.
self printOn: stream format: aString.
^stream contents!
second
^time second!
setDate: aDate time: aTime
date := aDate.
time := aTime.
self isImmutable: true.
^self
!
storeOn: aStream
aStream
display: self class;
nextPutAll: ' fromString: '.
self displayString printOn: aStream!
time
"Answer the <Time> of the receiver."
^time
!
year
^date year! !
!TimeStamp categoriesFor: #<!comparing!public! !
!TimeStamp categoriesFor: #=!comparing!public! !
!TimeStamp categoriesFor: #addSeconds:!arithmetic!comparing!public! !
!TimeStamp categoriesFor: #asDateAndTime!public! !
!TimeStamp categoriesFor: #asMilliseconds!converting!public! !
!TimeStamp categoriesFor: #asParameter!converting!public! !
!TimeStamp categoriesFor: #asSeconds!converting!public! !
!TimeStamp categoriesFor: #asTimeStamp!converting!public! !
!TimeStamp categoriesFor: #date!accessing!public! !
!TimeStamp categoriesFor: #dayOfMonth!accessing!public! !
!TimeStamp categoriesFor: #dayOfWeek!public! !
!TimeStamp categoriesFor: #hash!comparing!public! !
!TimeStamp categoriesFor: #hour!accessing!public! !
!TimeStamp categoriesFor: #hour12!accessing!public! !
!TimeStamp categoriesFor: #hour24!accessing!public! !
!TimeStamp categoriesFor: #minute!accessing!public! !
!TimeStamp categoriesFor: #month!accessing!public! !
!TimeStamp categoriesFor: #printOn:!printing!public! !
!TimeStamp categoriesFor: #printOn:format:!printing!public! !
!TimeStamp categoriesFor: #printStringFormat:!printing!public! !
!TimeStamp categoriesFor: #second!accessing!public! !
!TimeStamp categoriesFor: #setDate:time:!accessing!initializing!private! !
!TimeStamp categoriesFor: #storeOn:!printing!public! !
!TimeStamp categoriesFor: #time!accessing!public! !
!TimeStamp categoriesFor: #year!accessing!public! !
!TimeStamp class methodsFor!
current
"Answer a new instance of the receiver representing the current date and time."
"Note that the clock precision is currently limited to milliseconds because of the use of SYSTEMTIMEs."
^self fromSYSTEMTIME: SYSTEMTIME now!
currentUTC
"Answer a new instance of the receiver representing the current UTC date and time."
^self fromSYSTEMTIME: SYSTEMTIME nowUTC!
date: aDate
"Answer a new instance of the receiver based on aDate."
^self date: aDate time: (Time fromSeconds: 0)!
date: aDate time: aTime
"Answer a new instance of the receiver based on aDate and aTime"
<primitive: 157>
^super new setDate: aDate time: aTime!
fromDateAndTime: aDateAndTime
"Answer a new instance of the receiver representing the same date and time as the local time of the <DateAndTime> argument."
^self date: aDateAndTime asDate time: aDateAndTime asTime!
fromMilliseconds: anInteger
| date msInDay |
date := Date fromSeconds: anInteger // 1000.
msInDay := anInteger - (date asSeconds * 1000).
^self date: date time: (Time fromMilliseconds: msInDay)!
fromSeconds: anInteger
"Answer a new instance of the receiver representing the point in time
anInteger seconds after the epoch."
| date |
date := Date fromSeconds: anInteger.
^self
date: date
time: (Time fromSeconds: anInteger - date asSeconds)!
fromString: aString
| stream time date |
stream := aString readStream.
[time := Time readFrom: stream.
stream next.
date := Date readFrom: stream] on: InvalidFormat
do:
[:x |
stream reset.
date := Date readFrom: stream.
stream next.
time := Time readFrom: stream].
^self date: date time: time!
fromSYSTEMTIME: aSYSTEMTIME
"Answer a new instance of the receiver representing the current
date and time"
^self
date: (Date fromSYSTEMTIME: aSYSTEMTIME)
time: (Time fromSYSTEMTIME: aSYSTEMTIME)!
new
"Answer a new instance of the receiver representing the current Time."
^self current!
time: aTime
"Answer a new instance of the receiver based on aTime."
^self date: (Date fromDays: 0) time: aTime! !
!TimeStamp class categoriesFor: #current!instance creation!public! !
!TimeStamp class categoriesFor: #currentUTC!instance creation!public! !
!TimeStamp class categoriesFor: #date:!instance creation!public! !
!TimeStamp class categoriesFor: #date:time:!instance creation!public! !
!TimeStamp class categoriesFor: #fromDateAndTime:!instance creation!public! !
!TimeStamp class categoriesFor: #fromMilliseconds:!public! !
!TimeStamp class categoriesFor: #fromSeconds:!instance creation!public! !
!TimeStamp class categoriesFor: #fromString:!public! !
!TimeStamp class categoriesFor: #fromSYSTEMTIME:!instance creation!public! !
!TimeStamp class categoriesFor: #new!instance creation!public! !
!TimeStamp class categoriesFor: #time:!instance creation!public! !
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<ui version="4.0">
<class>CommandLine</class>
<widget class="QWidget" name="commandLine">
<property name="minimumSize">
<size>
<width>120</width>
<height>400</height>
</size>
</property>
<property name="sizePolicy">
<sizepolicy hsizetype="MinimumExpanding" vsizetype="Fixed">
<horstretch>1</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<layout class="QVBoxLayout" name="verticalLayout">
<item>
<widget class="ScriptCanvasEditor::Widget::CommandLineEdit" name="commandText">
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>261</width>
<height>20</height>
</rect>
</property>
<property name="sizePolicy">
<sizepolicy hsizetype="MinimumExpanding" vsizetype="Fixed">
<horstretch>1</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>120</width>
<height>20</height>
</size>
</property>
</widget>
</item>
<item>
<widget class="ScriptCanvasEditor::Widget::CommandLineList" name="commandList">
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>261</width>
<height>20</height>
</rect>
</property>
<property name="styleSheet">
<string notr="true">
QTableView { background-color: rgba(15,15,15,0.1); }
</string>
</property>
<property name="selectionBehavior">
<enum>QAbstractItemView::SelectRows</enum>
</property>
<property name="showGrid">
<bool>false</bool>
</property>
<property name="cornerButtonEnabled">
<bool>false</bool>
</property>
<attribute name="verticalHeaderVisible">
<bool>false</bool>
</attribute>
<attribute name="horizontalHeaderVisible">
<bool>false</bool>
</attribute>
<property name="frameShape">
<enum>QFrame::NoFrame</enum>
</property>
<property name="lineWidth">
<number>0</number>
</property>
<property name="sizePolicy">
<sizepolicy hsizetype="MinimumExpanding" vsizetype="Minimum">
<horstretch>1</horstretch>
<verstretch>0</verstretch>
</sizepolicy>
</property>
<property name="minimumSize">
<size>
<width>120</width>
<height>220</height>
</size>
</property>
<property name="showGrid">
<bool>false</bool>
</property>
<property name="alternatingRowColors">
<bool>true</bool>
</property>
<property name="selectionMode">
<enum>QAbstractItemView::ExtendedSelection</enum>
</property>
<property name="selectionBehavior">
<enum>QAbstractItemView::SelectRows</enum>
</property>
<property name="sortingEnabled">
<bool>true</bool>
</property>
<attribute name="headerStretchLastSection">
<bool>true</bool>
</attribute>
</widget>
</item>
</layout>
</widget>
<customwidgets>
<customwidget>
<class>ScriptCanvasEditor::Widget::CommandLineEdit</class>
<extends>QLineEdit</extends>
<header>Editor/View/Widgets/CommandLine.h</header>
</customwidget>
<customwidget>
<class>ScriptCanvasEditor::Widget::CommandLineList</class>
<extends>QTableView</extends>
<header>Editor/View/Widgets/CommandLine.h</header>
</customwidget>
</customwidgets>
</ui> | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>en</string>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>$(PRODUCT_NAME)</string>
<key>CFBundlePackageType</key>
<string>BNDL</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>1</string>
</dict>
</plist>
| {
"pile_set_name": "Github"
} |
// Copyright (c) .NET Foundation and contributors. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Reflection;
namespace System.CommandLine.Binding
{
public class ModelDescriptor
{
private const BindingFlags CommonBindingFlags =
BindingFlags.IgnoreCase
| BindingFlags.Public
| BindingFlags.Instance;
private static readonly ConcurrentDictionary<Type, ModelDescriptor> _modelDescriptors = new ConcurrentDictionary<Type, ModelDescriptor>();
private List<PropertyDescriptor>? _propertyDescriptors;
private List<ConstructorDescriptor>? _constructorDescriptors;
protected ModelDescriptor(Type modelType)
{
ModelType = modelType ??
throw new ArgumentNullException(nameof(modelType));
}
public IReadOnlyList<ConstructorDescriptor> ConstructorDescriptors =>
_constructorDescriptors ??=
ModelType.GetConstructors(CommonBindingFlags)
.Select(i => new ConstructorDescriptor(i, this))
.ToList();
public IReadOnlyList<IValueDescriptor> PropertyDescriptors =>
_propertyDescriptors ??=
ModelType.GetProperties(CommonBindingFlags)
.Where(p => p.CanWrite && p.SetMethod.IsPublic)
.Select(i => new PropertyDescriptor(i, this))
.ToList();
public Type ModelType { get; }
public override string ToString() => $"{ModelType.Name}";
public static ModelDescriptor FromType<T>() =>
_modelDescriptors.GetOrAdd(
typeof(T),
_ => new ModelDescriptor(typeof(T)));
public static ModelDescriptor FromType(Type type) =>
_modelDescriptors.GetOrAdd(
type,
_ => new ModelDescriptor(type));
}
} | {
"pile_set_name": "Github"
} |
# coding: utf-8
class Solution:
# @param k & A a integer and an array
# @return ans a integer
def kthLargestElement(self, k, A):
start, end, _k = 0, len(A), k
while True:
ret, ret_index = self.partition(A, start, end)
if ret_index != k:
if ret_index > k:
start = len(A) - ret_index + 1
else:
end = len(A) - ret_index
else:
return ret
'''
因为1/2 + 1/4 + ... + 1/2^n = 1,
所以最终是O(n)的复杂度。
'''
def partition(self, array, start, end):
ret, ret_index = array[start], start
for i in xrange(start + 1, end):
if array[i] < ret:
ret_index += 1
if ret_index != i:
array[ret_index], array[i] = array[i], array[ret_index]
array[start], array[ret_index] = array[ret_index], array[start]
return ret, len(array) - ret_index # 如果不减就是第x + 1小!
# medium: http://lintcode.com/zh-cn/problem/kth-largest-element/
| {
"pile_set_name": "Github"
} |
// SPDX-License-Identifier: GPL-2.0
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <linux/bpf.h>
#include <sys/types.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "test_tcpbpf.h"
#define EXPECT_EQ(expected, actual, fmt) \
do { \
if ((expected) != (actual)) { \
printf(" Value of: " #actual "\n" \
" Actual: %" fmt "\n" \
" Expected: %" fmt "\n", \
(actual), (expected)); \
goto err; \
} \
} while (0)
int verify_result(const struct tcpbpf_globals *result)
{
__u32 expected_events;
expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) |
(1 << BPF_SOCK_OPS_RWND_INIT) |
(1 << BPF_SOCK_OPS_TCP_CONNECT_CB) |
(1 << BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB) |
(1 << BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB) |
(1 << BPF_SOCK_OPS_NEEDS_ECN) |
(1 << BPF_SOCK_OPS_STATE_CB) |
(1 << BPF_SOCK_OPS_TCP_LISTEN_CB));
EXPECT_EQ(expected_events, result->event_map, "#" PRIx32);
EXPECT_EQ(501ULL, result->bytes_received, "llu");
EXPECT_EQ(1002ULL, result->bytes_acked, "llu");
EXPECT_EQ(1, result->data_segs_in, PRIu32);
EXPECT_EQ(1, result->data_segs_out, PRIu32);
EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32);
EXPECT_EQ(0, result->good_cb_test_rv, PRIu32);
EXPECT_EQ(1, result->num_listen, PRIu32);
return 0;
err:
return -1;
}
int verify_sockopt_result(int sock_map_fd)
{
__u32 key = 0;
int res;
int rv;
/* check setsockopt for SAVE_SYN */
rv = bpf_map_lookup_elem(sock_map_fd, &key, &res);
EXPECT_EQ(0, rv, "d");
EXPECT_EQ(0, res, "d");
key = 1;
/* check getsockopt for SAVED_SYN */
rv = bpf_map_lookup_elem(sock_map_fd, &key, &res);
EXPECT_EQ(0, rv, "d");
EXPECT_EQ(1, res, "d");
return 0;
err:
return -1;
}
static int bpf_find_map(const char *test, struct bpf_object *obj,
const char *name)
{
struct bpf_map *map;
map = bpf_object__find_map_by_name(obj, name);
if (!map) {
printf("%s:FAIL:map '%s' not found\n", test, name);
return -1;
}
return bpf_map__fd(map);
}
int main(int argc, char **argv)
{
const char *file = "test_tcpbpf_kern.o";
int prog_fd, map_fd, sock_map_fd;
struct tcpbpf_globals g = {0};
const char *cg_path = "/foo";
int error = EXIT_FAILURE;
struct bpf_object *obj;
int cg_fd = -1;
__u32 key = 0;
int rv;
if (setup_cgroup_environment())
goto err;
cg_fd = create_and_get_cgroup(cg_path);
if (cg_fd < 0)
goto err;
if (join_cgroup(cg_path))
goto err;
if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
printf("FAILED: load_bpf_file failed for: %s\n", file);
goto err;
}
rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0);
if (rv) {
printf("FAILED: bpf_prog_attach: %d (%s)\n",
error, strerror(errno));
goto err;
}
if (system("./tcp_server.py")) {
printf("FAILED: TCP server\n");
goto err;
}
map_fd = bpf_find_map(__func__, obj, "global_map");
if (map_fd < 0)
goto err;
sock_map_fd = bpf_find_map(__func__, obj, "sockopt_results");
if (sock_map_fd < 0)
goto err;
rv = bpf_map_lookup_elem(map_fd, &key, &g);
if (rv != 0) {
printf("FAILED: bpf_map_lookup_elem returns %d\n", rv);
goto err;
}
if (verify_result(&g)) {
printf("FAILED: Wrong stats\n");
goto err;
}
if (verify_sockopt_result(sock_map_fd)) {
printf("FAILED: Wrong sockopt stats\n");
goto err;
}
printf("PASSED!\n");
error = 0;
err:
bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
close(cg_fd);
cleanup_cgroup_environment();
return error;
}
| {
"pile_set_name": "Github"
} |
# coding: utf-8
import chainer
import chainer.functions as F
class ExpandDims(chainer.Chain):
def __init__(self):
super(ExpandDims, self).__init__()
def forward(self, x):
y = F.expand_dims(x, axis=1)
y2 = F.expand_dims(x, 1)
return y, y2
# ======================================
from chainer_compiler.elichika import testtools
def main():
import numpy as np
np.random.seed(314)
model = ExpandDims()
x = np.random.rand(6, 4).astype(np.float32) - 0.5
testtools.generate_testcase(model, [x])
if __name__ == '__main__':
main()
| {
"pile_set_name": "Github"
} |
from __future__ import absolute_import
from .base import * # NOQA
| {
"pile_set_name": "Github"
} |
{% load i18n %}
{% include 'authentication/_access_key_modal.html' %}
<div class="row border-bottom">
<nav class="navbar navbar-static-top white-bg" role="navigation" style="margin-bottom: 0">
<div class="navbar-header">
<a class="navbar-minimalize minimalize-styl-2 btn btn-primary " href="#"><i class="fa fa-bars"></i> </a>
</div>
<ul class="nav navbar-top-links navbar-right">
<li class="dropdown">
<a class="count-info dropdown-toggle" data-toggle="dropdown" href="#" target="_blank">
<i class="fa fa-handshake-o"></i>
<span class="m-r-sm text-muted welcome-message">{% trans 'Help' %} <b class="caret"></b></span>
</a>
<ul class="dropdown-menu animated fadeInRight m-t-xs profile-dropdown">
<li>
<a class="count-info" href="http://docs.jumpserver.org/" target="_blank">
<i class="fa fa-file-text"></i>
<span class="m-r-sm text-muted welcome-message">{% trans 'Docs' %}</span>
</a>
</li>
<li>
<a class="count-info" href="https://market.aliyun.com/products/53690006/cmgj026011.html?spm=5176.730005.0.0.cY2io1" target="_blank">
<i class="fa fa-suitcase"></i>
<span class="m-r-sm text-muted welcome-message">{% trans 'Commercial support' %}</span>
</a>
</li>
</ul>
</li>
<li class="dropdown">
<a class="count-info dropdown-toggle" data-toggle="dropdown" href="#" target="_blank">
<i class="fa fa-globe"></i>
{% ifequal request.COOKIES.django_language 'en' %}
<span class="m-r-sm text-muted welcome-message">English<b class="caret"></b></span>
{% else %}
<span class="m-r-sm text-muted welcome-message">中文<b class="caret"></b></span>
{% endifequal %}
</a>
<ul class="dropdown-menu animated fadeInRight m-t-xs profile-dropdown">
<li>
<a id="switch_cn" href="{% url 'i18n-switch' lang='zh-hans' %}">
<i class="fa fa-flag"></i>
<span> 中文</span>
</a>
</li>
<li>
<a id="switch_en" href="{% url 'i18n-switch' lang='en' %}">
<i class="fa fa-flag-checkered"></i>
<span> English</span>
</a>
</li>
</ul>
</li>
<li class="dropdown">
{% if request.user.is_authenticated %}
<a data-toggle="dropdown" class="dropdown-toggle" href="#">
<span class="m-r-sm text-muted welcome-message">
<img alt="image" class="img-circle" width="30" height="30" src="{{ request.user.avatar_url }}"/>
<span style="font-size: 13px;font-weight: 400"> {{ request.user.name }}
<b class="caret"></b>
</span>
</span>
</a>
<ul class="dropdown-menu animated fadeInRight m-t-xs profile-dropdown">
<li><a href="{% url 'users:user-profile' %}"><i class="fa fa-cogs"> </i><span> {% trans 'Profile' %}</span></a></li>
{% if request.user.can_admin_or_audit_current_org %}
{% if request.COOKIES.IN_ADMIN_PAGE == 'No' %}
<li><a id="switch_admin"><i class="fa fa-exchange"></i><span> {% trans 'Admin page' %}</span></a></li>
{% else %}
<li><a id="switch_user"><i class="fa fa-exchange"></i><span> {% trans 'User page' %}</span></a></li>
{% endif %}
{% endif %}
<li><a href="#" data-toggle="modal" data-target="#access_key_modal" tabindex="0"><i class="fa fa-key"></i> {% trans 'API Key' %}</a></li>
<li><a href="{% url 'authentication:logout' %}"><i class="fa fa-sign-out"></i> {% trans 'Logout' %}</a></li>
</ul>
{% else %}
<a href="{% url 'authentication:login' %}">
<i class="fa fa-sign-in"></i>{% trans 'Login' %}
</a>
{% endif %}
</li>
</ul>
</nav>
</div>
<div class="row wrapper border-bottom white-bg page-heading">
<div class="col-sm-10">
<h2></h2>
<ol class="breadcrumb">
{% if app %}
<li>
<a>{{ app }}</a>
</li>
{% endif %}
{% if action %}
<li class="active">
<strong>{{ action }}</strong>
</li>
{% endif %}
</ol>
</div>
<div class="col-sm-2">
</div>
</div>
<script>
$(document).ready(function () {
})
.on('click', '#switch_admin', function () {
var cookieName = "IN_ADMIN_PAGE";
setTimeout(function () {
delCookie(cookieName);
setCookie(cookieName, "Yes");
window.location = "/"
}, 100)
})
.on('click', '#switch_user', function () {
var cookieName = "IN_ADMIN_PAGE";
setTimeout(function () {
delCookie(cookieName);
setCookie(cookieName, "No");
window.location = "{% url 'assets:user-asset-list' %}"
}, 100);
})
</script>
| {
"pile_set_name": "Github"
} |
/**
* global variables
*/
#include "pedometer_defs.h"
| {
"pile_set_name": "Github"
} |
/* See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Esri Inc. licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gc.base.util;
import java.util.UUID;
/**
* UUID utilities.
*/
public class UuidUtil {
/**
* Generates a uuid.
* @param lowCaseRemoveDashes if true, ensure lower case and remove dashes
* @return the uuid
*/
public static String makeUuid(boolean lowCaseRemoveDashes) {
String s = UUID.randomUUID().toString();
if (lowCaseRemoveDashes) {
s = s.toLowerCase().replaceAll("-","");
}
return s;
}
/**
* Normalize a GPT document uuid.
* <br/>(remove curly braces {}, remove dashes, ensure lower case)
* @return the uuid
*/
public static String normalizeGptUuid(String docuuid) {
String id = docuuid;
id = id.replaceAll("\\{","").replaceAll("}","").replaceAll("-","").toLowerCase();
return id;
}
}
| {
"pile_set_name": "Github"
} |
# Angr doesn't currently support reading multiple things with scanf (Ex:
# scanf("%u %u).) You will have to tell the simulation engine to begin the
# program after scanf is called and manually inject the symbols into registers.
import angr
import claripy
import sys
def main(argv):
path_to_binary = argv[1]
project = angr.Project(path_to_binary)
# Sometimes, you want to specify where the program should start. The variable
# start_address will specify where the symbolic execution engine should begin.
# Note that we are using blank_state, not entry_state.
# (!)
start_address = ??? # :integer (probably hexadecimal)
initial_state = project.factory.blank_state(addr=start_address)
# Create a symbolic bitvector (the datatype Angr uses to inject symbolic
# values into the binary.) The first parameter is just a name Angr uses
# to reference it.
# You will have to construct multiple bitvectors. Copy the two lines below
# and change the variable names. To figure out how many (and of what size)
# you need, dissassemble the binary and determine the format parameter passed
# to scanf.
# (!)
password0_size_in_bits = ??? # :integer
password0 = claripy.BVS('password0', password0_size_in_bits)
...
# Set a register to a symbolic value. This is one way to inject symbols into
# the program.
# initial_state.regs stores a number of convenient attributes that reference
# registers by name. For example, to set eax to password0, use:
#
# initial_state.regs.eax = password0
#
# You will have to set multiple registers to distinct bitvectors. Copy and
# paste the line below and change the register. To determine which registers
# to inject which symbol, dissassemble the binary and look at the instructions
# immediately following the call to scanf.
# (!)
initial_state.regs.??? = password0
...
simulation = project.factory.simgr(initial_state)
def is_successful(state):
stdout_output = state.posix.dumps(sys.stdout.fileno())
return ???
def should_abort(state):
stdout_output = state.posix.dumps(sys.stdout.fileno())
return ???
simulation.explore(find=is_successful, avoid=should_abort)
if simulation.found:
solution_state = simulation.found[0]
# Solve for the symbolic values. If there are multiple solutions, we only
# care about one, so we can use eval, which returns any (but only one)
# solution. Pass eval the bitvector you want to solve for.
# (!)
solution0 = solution_state.se.eval(password0)
...
# Aggregate and format the solutions you computed above, and then print
# the full string. Pay attention to the order of the integers, and the
# expected base (decimal, octal, hexadecimal, etc).
solution = ??? # :string
print solution
else:
raise Exception('Could not find the solution')
if __name__ == '__main__':
main(sys.argv)
| {
"pile_set_name": "Github"
} |
// Modified by Princeton University on June 9th, 2015
/*
* ========== Copyright Header Begin ==========================================
*
* OpenSPARC T1 Processor File: fp_simple_all0.s
* Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
*
* The above named program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
*
* The above named program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this work; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ========== Copyright Header End ============================================
*/
/***********************************************************************
* Name: fp_simple_all0.s
*
* Description:
* Simple test with no exceptions or state updates
*
**********************************************************************/
#include "boot.s"
.global sam_fast_immu_miss
.global sam_fast_dmmu_miss
.text
.global main
main:
wr %g0, 0x7, %fprs /* make sure fef is 1 */
setx data0, %l0, %l1
setx data1, %l0, %l2
/*******************************************************
Initialize regs
*******************************************************/
ldd [%l1+0x50], %f0
ldd [%l1+0x50], %f2
ldd [%l1+0x50], %f4
ldd [%l1+0x50], %f6
ldd [%l1+0x50], %f8
ldd [%l1+0x50], %f10
ldd [%l1+0x50], %f12
ldd [%l1+0x50], %f14
ldd [%l1+0x50], %f16
ldd [%l1+0x50], %f18
ldd [%l1+0x50], %f20
ldd [%l1+0x50], %f22
ldd [%l1+0x50], %f24
ldd [%l1+0x50], %f26
ldd [%l1+0x50], %f28
ldd [%l1+0x50], %f30
ldd [%l1+0x50], %f32
/*******************************************************
* Simple ld/st
*******************************************************/
ld [%l1+0x0], %f0
ld [%l1+0x4], %f1
ld [%l1+0x8], %f2
ld [%l1+0xc], %f3
ldd [%l1+0x0], %f4
ldd [%l1+0x8], %f6
ldd [%l1+0x0], %f8
ldd [%l1+0x8], %f10
st %f8, [%l1+0x10]
st %f9, [%l1+0x14]
st %f10, [%l1+0x18]
st %f11, [%l1+0x1c]
std %f8, [%l1+0x20]
std %f10, [%l1+0x28]
std %f8, [%l1+0x30]
std %f10, [%l1+0x38]
ld [%l1+0x30], %f0
ld [%l1+0x34], %f1
ld [%l1+0x38], %f2
ld [%l1+0x3c], %f3
ldd [%l1+0x10], %f4
ldd [%l1+0x18], %f6
/*******************************************************
* Arithmetic and CTI
*******************************************************/
! Just a few integer inst. to set cc and registers for moves
subcc %g0, 0x1, %g0 ! This should set n and c
set 0x1, %g1
setx 0x80000000, %g2, %g3
fadds %f0, %f1, %f20
faddd %f0, %f2, %f22
fsubs %f20, %f1, %f23
fsubd %f22, %f2, %f22
fsubs %f23, %f1, %f25
fsubd %f22, %f2, %f24
fsubs %f25, %f1, %f27
fsubd %f24, %f2, %f26
fcmps %fcc0, %f0, %f1
fcmpd %fcc1, %f0, %f2
fcmps %fcc2, %f4, %f5
fcmpd %fcc3, %f4, %f6
fcmps %fcc1, %f0, %f1
fcmpd %fcc2, %f0, %f2
fcmps %fcc3, %f4, %f5
fcmpd %fcc0, %f4, %f6
fcmps %fcc2, %f0, %f1
fcmpd %fcc3, %f0, %f2
fcmps %fcc0, %f4, %f5
fcmpd %fcc1, %f4, %f6
fcmps %fcc3, %f0, %f1
fcmpd %fcc0, %f0, %f2
fcmps %fcc1, %f4, %f5
fcmpd %fcc2, %f4, %f6
fba,a %fcc0, target1
fba,a test_fail ! This shouldnt be executed
fcmpd %fcc3, %f4, %f6 ! This shouldnt be executed
target1:
fbn,a %fcc0, target2
fbn,a test_fail ! This shouldnt be executed
target2:
fba,a,pt %fcc1, target3
fcmpd %fcc2, %f4, %f6 ! This shouldnt be executed
target3:
fbn,a,pt %fcc2, test_fail ! This is not cauing trap (??)
fbu,pt %fcc3, target4
target4:
fbg,a,pt %fcc0, target5
fcmps %fcc1, %f4, %f5
target5:
fbug,pt %fcc2, target6
fcmps %fcc3, %f2, %f3
target6:
fbl,a,pt %fcc3, target7
fcmps %fcc0, %f1, %f1
target7:
fbul,pt %fcc0, target8
fcmps %fcc3, %f0, %f1
target8:
fblg,a,pt %fcc1, target9
target9:
fbne,pt %fcc2, target10
target10:
fbe,pt %fcc3, target11
nop
target11:
fbue,pt %fcc0, target12
fcmps %fcc1, %f4, %f5
target12:
fbge,pt %fcc1, target13
nop
target13:
fbuge,a,pt %fcc2, target14
fcmps %fcc3, %f4, %f5
target14:
fble,pt %fcc3, target15
target15:
fbule,pt %fcc0, target16
target16:
fbo,pt %fcc1, target17
target17:
fmovsa %icc, %f1, %f2
fmovsn %icc, %f2, %f1
fmovsne %icc, %f22, %f23
fmovsg %icc, %f24, %f23
fmovsle %icc, %f3, %f2
fmovsge %icc, %f4, %f3
fmovsl %icc, %f5, %f4
fmovsgu %icc, %f6, %f5
fmovsleu %icc, %f7, %f6
fmovscc %icc, %f21, %f20
fmovscs %icc, %f22, %f21
fmovspos %icc, %f23, %f22
fmovsneg %icc, %f24, %f23
fmovsvc %icc, %f25, %f24
fmovsvs %icc, %f26, %f25
fmovda %xcc, %f0, %f2
fmovdn %xcc, %f2, %f0
fmovdne %xcc, %f22, %f24
fmovdg %xcc, %f24, %f24
fmovdle %xcc, %f2, %f2
fmovdge %xcc, %f4, %f2
fmovdl %xcc, %f6, %f4
fmovdgu %xcc, %f6, %f6
fmovdleu %xcc, %f8, %f6
fmovdcc %xcc, %f22, %f20
fmovdcs %xcc, %f22, %f22
fmovdpos %xcc, %f24, %f22
fmovdneg %xcc, %f24, %f24
fmovdvc %xcc, %f26, %f24
fmovdvs %xcc, %f26, %f26
fmovrse %g0, %f25, %f26
fmovrde %g0, %f24, %f26
fmovrslez %g1, %f25, %f28
fmovrdlez %g1, %f24, %f28
fmovrslz %g3, %f25, %f30
fmovrdlz %g3, %f24, %f30
fstox %f0, %f10
fdtox %f2, %f10
fstoi %f4, %f12
fdtox %f4, %f12
fxtos %f10, %f0
fxtod %f10, %f0
fitos %f12, %f4
fitod %f12, %f4
/*******************************************************
* Exit code
*******************************************************/
test_pass:
ta T_GOOD_TRAP
test_fail:
ta T_BAD_TRAP
/*******************************************************
* Data section
*******************************************************/
.data
data0:
.word 0x80000010
.word 0x80000011
.word 0x80000012
.word 0x80000013
.word 0x80000014
.word 0x80000015
.word 0x80000016
.word 0x80000017
.word 0x80000018
.word 0x80000019
.word 0x8000001a
.word 0x8000001b
.word 0x8000001c
.word 0x8000001d
.word 0x8000001e
.word 0x8000001f
.word 0x00000000, 0x00000000
.align 256
data1:
.word 0x00000011
.word 0x00000012
.word 0x00000013
.word 0x00000014
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2013 The WebRTC@AnyRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
#define WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
#include <memory>
#include "webrtc/typedefs.h"
namespace webrtc {
class PushSincResampler;
// Wraps PushSincResampler to provide stereo support.
// TODO(ajm): add support for an arbitrary number of channels.
template <typename T>
class PushResampler {
public:
PushResampler();
virtual ~PushResampler();
// Must be called whenever the parameters change. Free to be called at any
// time as it is a no-op if parameters have not changed since the last call.
int InitializeIfNeeded(int src_sample_rate_hz, int dst_sample_rate_hz,
size_t num_channels);
// Returns the total number of samples provided in destination (e.g. 32 kHz,
// 2 channel audio gives 640 samples).
int Resample(const T* src, size_t src_length, T* dst, size_t dst_capacity);
private:
std::unique_ptr<PushSincResampler> sinc_resampler_;
std::unique_ptr<PushSincResampler> sinc_resampler_right_;
int src_sample_rate_hz_;
int dst_sample_rate_hz_;
size_t num_channels_;
std::unique_ptr<T[]> src_left_;
std::unique_ptr<T[]> src_right_;
std::unique_ptr<T[]> dst_left_;
std::unique_ptr<T[]> dst_right_;
};
} // namespace webrtc
#endif // WEBRTC_COMMON_AUDIO_RESAMPLER_INCLUDE_PUSH_RESAMPLER_H_
| {
"pile_set_name": "Github"
} |
diff --git a/package.json b/package.json
index 826ad87f..1ac6cee3 100644
--- a/package.json
+++ b/package.json
@@ -22,14 +22,14 @@
"clean:test": "rimraf build && rimraf test/config/node_modules",
"docs": "node scripts/buildDocs.js",
"compile": "npm-run-all -p compile:core compile:test -s compile:scripts",
- "compile:core": "tsc -p src",
- "compile:scripts": "tsc -p scripts",
- "compile:test": "tsc -p test",
+ "compile:core": "sucrase ./src -d ./lib --transforms typescript,imports --enable-legacy-typescript-module-interop",
+ "compile:scripts": "sucrase ./scripts -d ./scripts --transforms typescript,imports --enable-legacy-typescript-module-interop",
+ "compile:test": "mkdir -p build && sucrase ./test -d ./build/test --exclude-dirs files,rules --transforms typescript,imports --enable-legacy-typescript-module-interop && sucrase ./src -d ./build/src --transforms typescript,imports --enable-legacy-typescript-module-interop",
"lint": "npm-run-all -p lint:global lint:from-bin",
"lint:global": "tslint --project test/tsconfig.json --format stylish # test includes 'src' too",
"lint:from-bin": "node bin/tslint --project test/tsconfig.json --format stylish",
"publish:local": "./scripts/npmPublish.sh",
- "test": "npm-run-all test:pre -p test:mocha test:rules",
+ "test": "npm-run-all compile test:pre -p test:mocha test:rules",
"test:pre": "cd ./test/config && npm install --no-save",
"test:mocha": "mocha --reporter spec --colors \"build/test/**/*Tests.js\"",
"test:rules": "node ./build/test/ruleTestRunner.js",
diff --git a/test/executable/executableTests.ts b/test/executable/executableTests.ts
index a5affd2b..c5481e45 100644
--- a/test/executable/executableTests.ts
+++ b/test/executable/executableTests.ts
@@ -141,6 +141,7 @@ describe("Executable", function(this: Mocha.ISuiteCallbackContext) {
cwd: "./test/config",
},
(err, stdout) => {
+ console.log(`err is ${err}`);
assert.isNotNull(err, "process should exit with error");
assert.strictEqual(err.code, 2, "error code should be 2");
assert.include(stdout, "hello from custom formatter", "stdout should contain output of custom formatter");
diff --git a/test/files/custom-rules/alwaysFailRule.js b/test/files/custom-rules/alwaysFailRule.js
index 4f1b0bf7..049d4f31 100644
--- a/test/files/custom-rules/alwaysFailRule.js
+++ b/test/files/custom-rules/alwaysFailRule.js
@@ -1,27 +1,12 @@
-var __extends = (this && this.__extends) || function (d, b) {
- for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p];
- function __() { this.constructor = d; }
- d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
-};
var Lint = require('../../../lib/index');
-var Rule = (function (_super) {
- __extends(Rule, _super);
- function Rule() {
- _super.apply(this, arguments);
- }
- Rule.prototype.apply = function (sourceFile) {
+class Rule extends Lint.Rules.AbstractRule {
+ apply(sourceFile) {
return this.applyWithWalker(new AlwaysFailWalker(sourceFile, this.getOptions()));
- };
- return Rule;
-})(Lint.Rules.AbstractRule);
-exports.Rule = Rule;
-var AlwaysFailWalker = (function (_super) {
- __extends(AlwaysFailWalker, _super);
- function AlwaysFailWalker() {
- _super.apply(this, arguments);
}
- AlwaysFailWalker.prototype.visitSourceFile = function (node) {
+}
+exports.Rule = Rule;
+class AlwaysFailWalker extends Lint.RuleWalker {
+ visitSourceFile(node) {
this.addFailure(this.createFailure(node.getStart(), node.getWidth(), "failure"));
- };
- return AlwaysFailWalker;
-})(Lint.RuleWalker);
+ }
+}
diff --git a/tsconfig.json b/tsconfig.json
new file mode 100644
index 00000000..60dc8db4
--- /dev/null
+++ b/tsconfig.json
@@ -0,0 +1,18 @@
+{
+ "compilerOptions": {
+ "module": "commonjs",
+ "noImplicitAny": true,
+ "noImplicitReturns": true,
+ "noImplicitThis": true,
+ "noUnusedParameters": true,
+ "noUnusedLocals": true,
+ "strictNullChecks": true,
+ "strictFunctionTypes": true,
+ "importHelpers": true,
+ "declaration": true,
+ "sourceMap": false,
+ "target": "es2017",
+ "lib": ["es6"],
+ "outDir": "../lib"
+ }
+}
| {
"pile_set_name": "Github"
} |
/* androidfde.c
*
* hashkill - a hash cracking tool
* Copyright (C) 2010 Milen Rangelov <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define _LARGEFILE64_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <alloca.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/types.h>
#include <fcntl.h>
#include <arpa/inet.h>
#include <openssl/sha.h>
#include "plugin.h"
#include "err.h"
#include "hashinterface.h"
struct android_hdr
{
uint32_t magic;
uint16_t major_version;
uint16_t minor_version;
uint32_t ftr_size;
uint32_t flags;
uint32_t keysize;
uint32_t spare1;
uint64_t fs_size;
uint32_t failed_count;
unsigned char cipherName[64];
} myphdr;
#define CRYPT_FOOTER_OFFSET 0x4000
#define ACCEPTABLE_BACKLOG 0x2000
static char myfilename[255];
static unsigned char mkey[32];
static unsigned char msalt[16];
static unsigned char blockbuf[512*3];
char * hash_plugin_summary(void)
{
return("androidfde \tAndroid Full Disk Encryption plugin");
}
char * hash_plugin_detailed(void)
{
return("androidfde - Android Full Disk Encryption plugin\n"
"------------------------------------------------\n"
"Use this module to crack Android encrypted partitions\n"
"Input should be a encrypted device file specified with -f\n"
"Known software that uses this password hashing method:\n"
"Android\n"
"\nAuthor: Milen Rangelov <[email protected]>\n");
}
// Not reference implementation - this is modified for use by androidfde!
static void decrypt_aes_cbc_essiv(unsigned char *src, unsigned char *dst, unsigned char *key, int startsector,int size)
{
AES_KEY aeskey;
unsigned char essiv[16];
unsigned char essivhash[32];
SHA256_CTX ctx;
unsigned char sectorbuf[16];
unsigned char zeroiv[16];
SHA256_Init(&ctx);
SHA256_Update(&ctx, key, myphdr.keysize);
SHA256_Final(essivhash, &ctx);
memset(sectorbuf,0,16);
memset(zeroiv,0,16);
memset(essiv,0,16);
memcpy(sectorbuf,&startsector,4);
hash_aes_set_encrypt_key(essivhash, 256, &aeskey);
hash_aes_cbc_encrypt(sectorbuf, essiv, 16, &aeskey, zeroiv, AES_ENCRYPT);
hash_aes_set_decrypt_key(key, myphdr.keysize*8, &aeskey);
hash_aes_cbc_encrypt(src, dst, size, &aeskey, essiv, AES_DECRYPT);
}
hash_stat hash_plugin_parse_hash(char *hashline, char *filename)
{
int myfile;
int cnt;
myfile = open(filename, O_RDONLY|O_LARGEFILE);
if (myfile<1)
{
return hash_err;
}
if (lseek(myfile,-(CRYPT_FOOTER_OFFSET+ACCEPTABLE_BACKLOG),SEEK_END)<0)
{
close(myfile);
return hash_err;
}
int flag = 0;
off_t pos = lseek(myfile,0,SEEK_CUR);
for (cnt=0;cnt<ACCEPTABLE_BACKLOG;cnt++)
{
lseek(myfile,pos+cnt,SEEK_SET);
if (read(myfile,&myphdr,sizeof(struct android_hdr))<sizeof(struct android_hdr))
{
return hash_err;
}
if (myphdr.magic==0xD0B5B1C4)
{
flag = 1;
break;
}
}
if (flag==0)
{
close(myfile);
return hash_err;
}
if (strncmp((char*)myphdr.cipherName,"aes",3) != 0)
{
close(myfile);
return hash_err;
}
if (lseek(myfile,myphdr.ftr_size-sizeof(myphdr),SEEK_CUR)<0)
{
close(myfile);
return hash_err;
}
read(myfile,mkey,myphdr.keysize);
if (lseek(myfile,32,SEEK_CUR)<0)
{
close(myfile);
return hash_err;
}
read(myfile,msalt,16);
lseek(myfile,0,SEEK_SET);
read(myfile,blockbuf,512*3);
close(myfile);
strcpy(myfilename, filename);
(void)hash_add_username(filename);
(void)hash_add_hash("Android FDE",0);
(void)hash_add_salt(" ");
(void)hash_add_salt2(" ");
return hash_ok;
}
hash_stat hash_plugin_check_hash(const char *hash, const char *password[VECTORSIZE], const char *salt, char *salt2[VECTORSIZE], const char *username, int *num, int threadid)
{
unsigned char keycandidate[255];
unsigned char keycandidate2[255];
unsigned char decrypted1[512]; // FAT
unsigned char decrypted2[512]; // ext3/4
int a;
AES_KEY aeskey;
for (a=0;a<vectorsize;a++)
{
// Get pbkdf2 of the password to obtain decryption key
hash_pbkdf2(password[a], msalt, 16, 2000, myphdr.keysize+16, keycandidate);
hash_aes_set_decrypt_key(keycandidate, myphdr.keysize*8, &aeskey);
hash_aes_cbc_encrypt(mkey, keycandidate2, 16, &aeskey, keycandidate+16, AES_DECRYPT);
decrypt_aes_cbc_essiv(blockbuf, decrypted1, keycandidate2,0,32);
decrypt_aes_cbc_essiv(blockbuf+1024, decrypted2, keycandidate2,2,128);
// Check for FAT
if ((memcmp(decrypted1+3,"MSDOS5.0",8)==0))
{
*num=a;
return hash_ok;
}
// Check for extfs
uint16_t v2,v3,v4;
uint32_t v1,v5;
memcpy(&v1,decrypted2+72,4);
memcpy(&v2,decrypted2+0x3a,2);
memcpy(&v3,decrypted2+0x3c,2);
memcpy(&v4,decrypted2+0x4c,2);
memcpy(&v5,decrypted2+0x48,4);
if ((v1<5)&&(v2<4)&&(v3<5)&&(v4<2)&&(v5<5))
{
*num=a;
return hash_ok;
}
}
return hash_err;
}
int hash_plugin_hash_length(void)
{
return 16;
}
int hash_plugin_is_raw(void)
{
return 0;
}
int hash_plugin_is_special(void)
{
return 1;
}
void get_vector_size(int size)
{
vectorsize = size;
}
int get_salt_size(void)
{
return 4;
}
| {
"pile_set_name": "Github"
} |
add_subdirectory(aes)
add_subdirectory(bench_operations)
add_subdirectory(euclidean_distance)
add_subdirectory(float)
add_subdirectory(innerproduct)
add_subdirectory(lowmc)
add_subdirectory(millionaire_prob)
add_subdirectory(min-euclidean-dist)
add_subdirectory(psi_phasing)
add_subdirectory(psi_scs)
add_subdirectory(sha1)
add_subdirectory(threshold_euclidean_dist_2d_simd)
add_subdirectory(uc_circuit)
| {
"pile_set_name": "Github"
} |
VMware's Cloud Application Platform
===================================
Copyright (c) 2009-2011 VMware, Inc.
What is Cloud Foundry?
----------------------
Cloud Foundry is an open platform-as-a-service (PaaS). The system supports
multiple frameworks, multiple application infrastructure services and
deployment to multiple clouds.
License
-------
Cloud Foundry uses the Apache 2 license. See LICENSE for details.
Installation Notes
------------------
Cloud Foundry is made up of a number of system components (cloud controller,
health manager, dea, router, etc.). These components can run co-located in a
single vm/single os or can be spread across several machines/vm's.
For development purposes, the preferred environment is to run all of the core
components within a single vm and then interact with the system from outside of
the vm via an ssh tunnel. The pre-defined domain `*.vcap.me` maps to local host,
so when you use this setup, the end result is that your development environment
is available at [http://api.vcap.me](http://api.vcap.me).
For large scale or multi-vm deployments, the system is flexible enough to allow
you to place system components on multiple vm's, run multiple nodes of a given
type (e.g., 8 routers, 4 cloud controllers, etc.)
The detailed install instructions below walk you through the install process
for a single vm installation.
Versions of these instructions have been used for production deployments, and
for our own development purposes. many of us develop on mac laptops, so some
additional instructions for this environment have been included.
Detailed Install/Run Instructions:
----------------------------------
There are two methods for installing VCAP. One is a manual process, which you
might choose to do if you want to understand the details of what goes into
a bringing up a VCAP instance. The other is an automated process contributed
by the community. In both cases, you need to start with a stock Ubuntu
server VM.
### Step 1: create a pristine VM with ssh
* setup a VM with a pristine Ubuntu 10.04.4 server 64bit image,
[download here](http://releases.ubuntu.com/)
* setup your VM with 1G or more of memory
* you may wish to snapshot your VM now in case things go pear shaped
(great snapshot spots are here and after step 4)
* to enable remote access (more fun than using the console), install ssh.
To install ssh:
sudo apt-get install openssh-server
#### Step 2: run the automated setup process
Run the install script. It'll ask for your sudo password at the beginning and
towards the end. The entire process takes about half an hour, so just keep a
loose eye on it.
sudo apt-get install curl
bash < <(curl -s -k -B https://raw.github.com/cloudfoundry/vcap/master/dev_setup/bin/vcap_dev_setup)
NOTE: The automated setup does not auto-start the system. Once you are
done with the setup, exit your current shell, restart a new shell and continue
the following steps
#### Step 3: start the system
~/cloudfoundry/vcap/dev_setup/bin/vcap_dev start
#### Step 4: *Optional, mac/linux users only*, create a local ssh tunnel
From your VM, run `ifconfig` and note your eth0 IP address, which will look something like: `192.168.252.130`
Now go to your mac terminal window and verify that you can connect with SSH:
ssh <your VM user>@<VM IP address>
If this works, create a local port 80 tunnel:
sudo ssh -L <local-port>:<VM IP address>:80 <your VM user>@<VM IP address> -N
If you are not already running a local web server, use port 80 as your local port,
otherwise you may want to use 8080 or another common http port.
Once you do this, from both your mac, and from within the vm, `api.vcap.me` and `*.vcap.me`
will map to localhost which will map to your running Cloud Foundry instance.
Trying your setup
-----------------
### Step 5: validate that you can connect and tests pass
#### From the console of your vm, or from your mac (thanks to local tunnel)
vmc target api.vcap.me
vmc info
Note: If you are using a tunnel and selected a local port other than 80 you
will need to modify the target to include it here, like `api.vcap.me:8080`.
#### This should produce roughly the following:
VMware's Cloud Application Platform
For support visit [email protected]
Target: http://api.vcap.me (v0.999)
Client: v0.3.10
#### Play around as a user, start with:
vmc register --email [email protected] --passwd password
vmc login --email [email protected] --passwd password
#### To see what else you can do try:
vmc help
Testing your setup
------------------
Once the system is installed, you can run the following Yeti cases(Yeti stands
for "Yeti Extraordinary Test Infrastructure") to ensure that major functionality
is working.
You can run the Yeti cases as the following steps:
cd cloudfoundry/vcap/tests
./update ## this is not required for running administrative test cases
bundle exec rake full[1]
During the first time, Yeti will prompt you for information about your environment:
- target
- test user/test password
- admin user/admin password
<br>target should be "api.vcap.me".
<br>This information except password is saved to ~/.bvt/config.yml file.
<br>When run the second time around, Yeti will not prompt for the information again.
### Step 6: you are done, make sure you can run a simple hello world app.
Create an empty directory for your test app (lets call it env), and enter it.
mkdir env && cd env
Cut and paste the following app into a ruby file (lets say env.rb):
require 'rubygems'
require 'sinatra'
get '/' do
host = ENV['VCAP_APP_HOST']
port = ENV['VCAP_APP_PORT']
"<h1>XXXXX Hello from the Cloud! via: #{host}:#{port}</h1>"
end
get '/env' do
res = ''
ENV.each do |k, v|
res << "#{k}: #{v}<br/>"
end
res
end
#### Create & push a 4 instance version of the test app, like so:
vmc push env --instances 4 --mem 64M --url env.vcap.me -n
#### Test it in the browser:
[http://env.vcap.me](http://env.vcap.me)
Note that hitting refresh will show a different port in each refresh reflecting the different active instances
#### Check the status of your app by running:
vmc apps
#### Which should yield the following output:
+-------------+----+---------+-------------+----------+
| Application | # | Health | URLS | Services |
+-------------+----+---------+-------------+----------+
| env | 1 | RUNNING | env.vcap.me | |
+-------------+----+---------+-------------+----------+
| {
"pile_set_name": "Github"
} |
package terraform
import (
"fmt"
"log"
"strings"
"github.com/hashicorp/hcl/v2"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform-plugin-sdk/internal/addrs"
"github.com/hashicorp/terraform-plugin-sdk/internal/configs"
"github.com/hashicorp/terraform-plugin-sdk/internal/plans"
"github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange"
"github.com/hashicorp/terraform-plugin-sdk/internal/providers"
"github.com/hashicorp/terraform-plugin-sdk/internal/states"
"github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags"
)
// EvalCheckPlannedChange is an EvalNode implementation that produces errors
// if the _actual_ expected value is not compatible with what was recorded
// in the plan.
//
// Errors here are most often indicative of a bug in the provider, so our
// error messages will report with that in mind. It's also possible that
// there's a bug in Terraform's Core's own "proposed new value" code in
// EvalDiff.
type EvalCheckPlannedChange struct {
Addr addrs.ResourceInstance
ProviderAddr addrs.AbsProviderConfig
ProviderSchema **ProviderSchema
// We take ResourceInstanceChange objects here just because that's what's
// convenient to pass in from the evaltree implementation, but we really
// only look at the "After" value of each change.
Planned, Actual **plans.ResourceInstanceChange
}
func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) {
providerSchema := *n.ProviderSchema
plannedChange := *n.Planned
actualChange := *n.Actual
schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type)
}
var diags tfdiags.Diagnostics
absAddr := n.Addr.Absolute(ctx.Path())
log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action)
if plannedChange.Action != actualChange.Action {
switch {
case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp:
// It's okay for an update to become a NoOp once we've filled in
// all of the unknown values, since the final values might actually
// match what was there before after all.
log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr)
default:
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced inconsistent final plan",
fmt.Sprintf(
"When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
absAddr, n.ProviderAddr.ProviderConfig.Type,
plannedChange.Action, actualChange.Action,
),
))
}
}
errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After)
for _, err := range errs {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced inconsistent final plan",
fmt.Sprintf(
"When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err),
),
))
}
return nil, diags.Err()
}
// EvalDiff is an EvalNode implementation that detects changes for a given
// resource instance.
type EvalDiff struct {
Addr addrs.ResourceInstance
Config *configs.Resource
Provider *providers.Interface
ProviderAddr addrs.AbsProviderConfig
ProviderSchema **ProviderSchema
State **states.ResourceInstanceObject
PreviousDiff **plans.ResourceInstanceChange
// CreateBeforeDestroy is set if either the resource's own config sets
// create_before_destroy explicitly or if dependencies have forced the
// resource to be handled as create_before_destroy in order to avoid
// a dependency cycle.
CreateBeforeDestroy bool
OutputChange **plans.ResourceInstanceChange
OutputValue *cty.Value
OutputState **states.ResourceInstanceObject
Stub bool
}
// TODO: test
func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
state := *n.State
config := *n.Config
provider := *n.Provider
providerSchema := *n.ProviderSchema
if providerSchema == nil {
return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr)
}
if n.ProviderAddr.ProviderConfig.Type == "" {
panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path())))
}
var diags tfdiags.Diagnostics
// Evaluate the configuration
schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
}
forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
keyData := EvalDataForInstanceKey(n.Addr.Key, forEach)
configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, diags.Err()
}
absAddr := n.Addr.Absolute(ctx.Path())
var priorVal cty.Value
var priorValTainted cty.Value
var priorPrivate []byte
if state != nil {
if state.Status != states.ObjectTainted {
priorVal = state.Value
priorPrivate = state.Private
} else {
// If the prior state is tainted then we'll proceed below like
// we're creating an entirely new object, but then turn it into
// a synthetic "Replace" change at the end, creating the same
// result as if the provider had marked at least one argument
// change as "requires replacement".
priorValTainted = state.Value
priorVal = cty.NullVal(schema.ImpliedType())
}
} else {
priorVal = cty.NullVal(schema.ImpliedType())
}
proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal)
// Call pre-diff hook
if !n.Stub {
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal)
})
if err != nil {
return nil, err
}
}
log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path()))
// Allow the provider to validate the final set of values.
// The config was statically validated early on, but there may have been
// unknown values which the provider could not validate at the time.
validateResp := provider.ValidateResourceTypeConfig(
providers.ValidateResourceTypeConfigRequest{
TypeName: n.Addr.Resource.Type,
Config: configVal,
},
)
if validateResp.Diagnostics.HasErrors() {
return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err()
}
// The provider gets an opportunity to customize the proposed new value,
// which in turn produces the _planned_ new value. But before
// we send back this information, we need to process ignore_changes
// so that CustomizeDiff will not act on them
var ignoreChangeDiags tfdiags.Diagnostics
proposedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, proposedNewVal)
diags = diags.Append(ignoreChangeDiags)
if ignoreChangeDiags.HasErrors() {
return nil, diags.Err()
}
resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{
TypeName: n.Addr.Resource.Type,
Config: configVal,
PriorState: priorVal,
ProposedNewState: proposedNewVal,
PriorPrivate: priorPrivate,
})
diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config))
if diags.HasErrors() {
return nil, diags.Err()
}
plannedNewVal := resp.PlannedState
plannedPrivate := resp.PlannedPrivate
if plannedNewVal == cty.NilVal {
// Should never happen. Since real-world providers return via RPC a nil
// is always a bug in the client-side stub. This is more likely caused
// by an incompletely-configured mock provider in tests, though.
panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String()))
}
// We allow the planned new value to disagree with configuration _values_
// here, since that allows the provider to do special logic like a
// DiffSuppressFunc, but we still require that the provider produces
// a value whose type conforms to the schema.
for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced invalid plan",
fmt.Sprintf(
"Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
),
))
}
if diags.HasErrors() {
return nil, diags.Err()
}
if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 {
if resp.LegacyTypeSystem {
// The shimming of the old type system in the legacy SDK is not precise
// enough to pass this consistency check, so we'll give it a pass here,
// but we will generate a warning about it so that we are more likely
// to notice in the logs if an inconsistency beyond the type system
// leads to a downstream provider failure.
var buf strings.Builder
fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr)
for _, err := range errs {
fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err))
}
log.Print(buf.String())
} else {
for _, err := range errs {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced invalid plan",
fmt.Sprintf(
"Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
),
))
}
return nil, diags.Err()
}
}
// TODO: We should be able to remove this repeat of processing ignored changes
// after the plan, which helps providers relying on old behavior "just work"
// in the next major version, such that we can be stricter about ignore_changes
// values
plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, plannedNewVal)
diags = diags.Append(ignoreChangeDiags)
if ignoreChangeDiags.HasErrors() {
return nil, diags.Err()
}
// The provider produces a list of paths to attributes whose changes mean
// that we must replace rather than update an existing remote object.
// However, we only need to do that if the identified attributes _have_
// actually changed -- particularly after we may have undone some of the
// changes in processIgnoreChanges -- so now we'll filter that list to
// include only where changes are detected.
reqRep := cty.NewPathSet()
if len(resp.RequiresReplace) > 0 {
for _, path := range resp.RequiresReplace {
if priorVal.IsNull() {
// If prior is null then we don't expect any RequiresReplace at all,
// because this is a Create action.
continue
}
priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil)
plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil)
if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() {
// This means the path was invalid in both the prior and new
// values, which is an error with the provider itself.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced invalid plan",
fmt.Sprintf(
"Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
n.ProviderAddr.ProviderConfig.Type, absAddr, path,
),
))
continue
}
// Make sure we have valid Values for both values.
// Note: if the opposing value was of the type
// cty.DynamicPseudoType, the type assigned here may not exactly
// match the schema. This is fine here, since we're only going to
// check for equality, but if the NullVal is to be used, we need to
// check the schema for th true type.
switch {
case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal:
// this should never happen without ApplyPath errors above
panic("requires replace path returned 2 nil values")
case priorChangedVal == cty.NilVal:
priorChangedVal = cty.NullVal(plannedChangedVal.Type())
case plannedChangedVal == cty.NilVal:
plannedChangedVal = cty.NullVal(priorChangedVal.Type())
}
eqV := plannedChangedVal.Equals(priorChangedVal)
if !eqV.IsKnown() || eqV.False() {
reqRep.Add(path)
}
}
if diags.HasErrors() {
return nil, diags.Err()
}
}
eqV := plannedNewVal.Equals(priorVal)
eq := eqV.IsKnown() && eqV.True()
var action plans.Action
switch {
case priorVal.IsNull():
action = plans.Create
case eq:
action = plans.NoOp
case !reqRep.Empty():
// If there are any "requires replace" paths left _after our filtering
// above_ then this is a replace action.
if n.CreateBeforeDestroy {
action = plans.CreateThenDelete
} else {
action = plans.DeleteThenCreate
}
default:
action = plans.Update
// "Delete" is never chosen here, because deletion plans are always
// created more directly elsewhere, such as in "orphan" handling.
}
if action.IsReplace() {
// In this strange situation we want to produce a change object that
// shows our real prior object but has a _new_ object that is built
// from a null prior object, since we're going to delete the one
// that has all the computed values on it.
//
// Therefore we'll ask the provider to plan again here, giving it
// a null object for the prior, and then we'll meld that with the
// _actual_ prior state to produce a correctly-shaped replace change.
// The resulting change should show any computed attributes changing
// from known prior values to unknown values, unless the provider is
// able to predict new values for any of these computed attributes.
nullPriorVal := cty.NullVal(schema.ImpliedType())
// create a new proposed value from the null state and the config
proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal)
resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{
TypeName: n.Addr.Resource.Type,
Config: configVal,
PriorState: nullPriorVal,
ProposedNewState: proposedNewVal,
PriorPrivate: plannedPrivate,
})
// We need to tread carefully here, since if there are any warnings
// in here they probably also came out of our previous call to
// PlanResourceChange above, and so we don't want to repeat them.
// Consequently, we break from the usual pattern here and only
// append these new diagnostics if there's at least one error inside.
if resp.Diagnostics.HasErrors() {
diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config))
return nil, diags.Err()
}
plannedNewVal = resp.PlannedState
plannedPrivate = resp.PlannedPrivate
for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Provider produced invalid plan",
fmt.Sprintf(
"Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err),
),
))
}
if diags.HasErrors() {
return nil, diags.Err()
}
}
// If our prior value was tainted then we actually want this to appear
// as a replace change, even though so far we've been treating it as a
// create.
if action == plans.Create && priorValTainted != cty.NilVal {
if n.CreateBeforeDestroy {
action = plans.CreateThenDelete
} else {
action = plans.DeleteThenCreate
}
priorVal = priorValTainted
}
// As a special case, if we have a previous diff (presumably from the plan
// phases, whereas we're now in the apply phase) and it was for a replace,
// we've already deleted the original object from state by the time we
// get here and so we would've ended up with a _create_ action this time,
// which we now need to paper over to get a result consistent with what
// we originally intended.
if n.PreviousDiff != nil {
prevChange := *n.PreviousDiff
if prevChange.Action.IsReplace() && action == plans.Create {
log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action)
action = prevChange.Action
priorVal = prevChange.Before
}
}
// Call post-refresh hook
if !n.Stub {
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal)
})
if err != nil {
return nil, err
}
}
// Update our output if we care
if n.OutputChange != nil {
*n.OutputChange = &plans.ResourceInstanceChange{
Addr: absAddr,
Private: plannedPrivate,
ProviderAddr: n.ProviderAddr,
Change: plans.Change{
Action: action,
Before: priorVal,
After: plannedNewVal,
},
RequiredReplace: reqRep,
}
}
if n.OutputValue != nil {
*n.OutputValue = configVal
}
// Update the state if we care
if n.OutputState != nil {
*n.OutputState = &states.ResourceInstanceObject{
// We use the special "planned" status here to note that this
// object's value is not yet complete. Objects with this status
// cannot be used during expression evaluation, so the caller
// must _also_ record the returned change in the active plan,
// which the expression evaluator will use in preference to this
// incomplete value recorded in the state.
Status: states.ObjectPlanned,
Value: plannedNewVal,
Private: plannedPrivate,
}
}
return nil, nil
}
func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) {
// ignore_changes only applies when an object already exists, since we
// can't ignore changes to a thing we've not created yet.
if prior.IsNull() {
return proposed, nil
}
ignoreChanges := n.Config.Managed.IgnoreChanges
ignoreAll := n.Config.Managed.IgnoreAllChanges
if len(ignoreChanges) == 0 && !ignoreAll {
return proposed, nil
}
if ignoreAll {
return prior, nil
}
if prior.IsNull() || proposed.IsNull() {
// Ignore changes doesn't apply when we're creating for the first time.
// Proposed should never be null here, but if it is then we'll just let it be.
return proposed, nil
}
return processIgnoreChangesIndividual(prior, proposed, ignoreChanges)
}
func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) {
// When we walk below we will be using cty.Path values for comparison, so
// we'll convert our traversals here so we can compare more easily.
ignoreChangesPath := make([]cty.Path, len(ignoreChanges))
for i, traversal := range ignoreChanges {
path := make(cty.Path, len(traversal))
for si, step := range traversal {
switch ts := step.(type) {
case hcl.TraverseRoot:
path[si] = cty.GetAttrStep{
Name: ts.Name,
}
case hcl.TraverseAttr:
path[si] = cty.GetAttrStep{
Name: ts.Name,
}
case hcl.TraverseIndex:
path[si] = cty.IndexStep{
Key: ts.Key,
}
default:
panic(fmt.Sprintf("unsupported traversal step %#v", step))
}
}
ignoreChangesPath[i] = path
}
var diags tfdiags.Diagnostics
ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) {
// First we must see if this is a path that's being ignored at all.
// We're looking for an exact match here because this walk will visit
// leaf values first and then their containers, and we want to do
// the "ignore" transform once we reach the point indicated, throwing
// away any deeper values we already produced at that point.
var ignoreTraversal hcl.Traversal
for i, candidate := range ignoreChangesPath {
if path.Equals(candidate) {
ignoreTraversal = ignoreChanges[i]
}
}
if ignoreTraversal == nil {
return v, nil
}
// If we're able to follow the same path through the prior value,
// we'll take the value there instead, effectively undoing the
// change that was planned.
priorV, diags := hcl.ApplyPath(prior, path, nil)
if diags.HasErrors() {
// We just ignore the errors and move on here, since we assume it's
// just because the prior value was a slightly-different shape.
// It could potentially also be that the traversal doesn't match
// the schema, but we should've caught that during the validate
// walk if so.
return v, nil
}
return priorV, nil
})
return ret, diags
}
// EvalDiffDestroy is an EvalNode implementation that returns a plain
// destroy diff.
type EvalDiffDestroy struct {
Addr addrs.ResourceInstance
DeposedKey states.DeposedKey
State **states.ResourceInstanceObject
ProviderAddr addrs.AbsProviderConfig
Output **plans.ResourceInstanceChange
OutputState **states.ResourceInstanceObject
}
// TODO: test
func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
absAddr := n.Addr.Absolute(ctx.Path())
state := *n.State
if n.ProviderAddr.ProviderConfig.Type == "" {
if n.DeposedKey == "" {
panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr))
} else {
panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey))
}
}
// If there is no state or our attributes object is null then we're already
// destroyed.
if state == nil || state.Value.IsNull() {
return nil, nil
}
// Call pre-diff hook
err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(
absAddr, n.DeposedKey.Generation(),
state.Value,
cty.NullVal(cty.DynamicPseudoType),
)
})
if err != nil {
return nil, err
}
// Change is always the same for a destroy. We don't need the provider's
// help for this one.
// TODO: Should we give the provider an opportunity to veto this?
change := &plans.ResourceInstanceChange{
Addr: absAddr,
DeposedKey: n.DeposedKey,
Change: plans.Change{
Action: plans.Delete,
Before: state.Value,
After: cty.NullVal(cty.DynamicPseudoType),
},
Private: state.Private,
ProviderAddr: n.ProviderAddr,
}
// Call post-diff hook
err = ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostDiff(
absAddr,
n.DeposedKey.Generation(),
change.Action,
change.Before,
change.After,
)
})
if err != nil {
return nil, err
}
// Update our output
*n.Output = change
if n.OutputState != nil {
// Record our proposed new state, which is nil because we're destroying.
*n.OutputState = nil
}
return nil, nil
}
// EvalReduceDiff is an EvalNode implementation that takes a planned resource
// instance change as might be produced by EvalDiff or EvalDiffDestroy and
// "simplifies" it to a single atomic action to be performed by a specific
// graph node.
//
// Callers must specify whether they are a destroy node or a regular apply
// node. If the result is NoOp then the given change requires no action for
// the specific graph node calling this and so evaluation of the that graph
// node should exit early and take no action.
//
// The object written to OutChange may either be identical to InChange or
// a new change object derived from InChange. Because of the former case, the
// caller must not mutate the object returned in OutChange.
type EvalReduceDiff struct {
Addr addrs.ResourceInstance
InChange **plans.ResourceInstanceChange
Destroy bool
OutChange **plans.ResourceInstanceChange
}
// TODO: test
func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) {
in := *n.InChange
out := in.Simplify(n.Destroy)
if n.OutChange != nil {
*n.OutChange = out
}
if out.Action != in.Action {
if n.Destroy {
log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action)
} else {
log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action)
}
}
return nil, nil
}
// EvalReadDiff is an EvalNode implementation that retrieves the planned
// change for a particular resource instance object.
type EvalReadDiff struct {
Addr addrs.ResourceInstance
DeposedKey states.DeposedKey
ProviderSchema **ProviderSchema
Change **plans.ResourceInstanceChange
}
func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
providerSchema := *n.ProviderSchema
changes := ctx.Changes()
addr := n.Addr.Absolute(ctx.Path())
schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
}
gen := states.CurrentGen
if n.DeposedKey != states.NotDeposed {
gen = n.DeposedKey
}
csrc := changes.GetResourceInstanceChange(addr, gen)
if csrc == nil {
log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr)
return nil, nil
}
change, err := csrc.Decode(schema.ImpliedType())
if err != nil {
return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err)
}
if n.Change != nil {
*n.Change = change
}
log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr)
return nil, nil
}
// EvalWriteDiff is an EvalNode implementation that saves a planned change
// for an instance object into the set of global planned changes.
type EvalWriteDiff struct {
Addr addrs.ResourceInstance
DeposedKey states.DeposedKey
ProviderSchema **ProviderSchema
Change **plans.ResourceInstanceChange
}
// TODO: test
func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
changes := ctx.Changes()
addr := n.Addr.Absolute(ctx.Path())
if n.Change == nil || *n.Change == nil {
// Caller sets nil to indicate that we need to remove a change from
// the set of changes.
gen := states.CurrentGen
if n.DeposedKey != states.NotDeposed {
gen = n.DeposedKey
}
changes.RemoveResourceInstanceChange(addr, gen)
return nil, nil
}
providerSchema := *n.ProviderSchema
change := *n.Change
if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey {
// Should never happen, and indicates a bug in the caller.
panic("inconsistent address and/or deposed key in EvalWriteDiff")
}
schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
}
csrc, err := change.Encode(schema.ImpliedType())
if err != nil {
return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err)
}
changes.AppendResourceInstanceChange(csrc)
if n.DeposedKey == states.NotDeposed {
log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr)
} else {
log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey)
}
return nil, nil
}
| {
"pile_set_name": "Github"
} |
/// Copyright (c) 2012 Ecma International. All rights reserved.
/// Ecma International makes this code available under the terms and conditions set
/// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
/// "Use Terms"). Any redistribution of this code must retain the above
/// copyright and this notice and otherwise comply with the Use Terms.
/**
* @path ch15/15.2/15.2.3/15.2.3.7/15.2.3.7-6-a-227.js
* @description Object.defineProperties - 'O' is an Array, 'P' is an array index property, TypeError is thrown if the [[Configurable]] attribute value of 'P' is false and the [[Configurable]] field of 'desc' is true (15.4.5.1 step 4.c)
*/
function testcase() {
var arr = [];
Object.defineProperty(arr, "1", {
value: 3,
configurable: false
});
try {
Object.defineProperties(arr, {
"1": {
value: 13,
configurable: true
}
});
return false;
} catch (ex) {
return (ex instanceof TypeError) && dataPropertyAttributesAreCorrect(arr, "1", 3, false, false, false);
}
}
runTestCase(testcase);
| {
"pile_set_name": "Github"
} |
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runenames
import (
"strings"
"testing"
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/testtext"
"golang.org/x/text/internal/ucd"
)
func TestName(t *testing.T) {
testtext.SkipIfNotLong(t)
wants := make([]string, 1+unicode.MaxRune)
ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) {
wants[p.Rune(0)] = getName(p)
})
nErrors := 0
for r, want := range wants {
got := Name(rune(r))
if got != want {
t.Errorf("r=%#08x: got %q, want %q", r, got, want)
nErrors++
if nErrors == 100 {
t.Fatal("too many errors")
}
}
}
}
// Copied from gen.go.
func getName(p *ucd.Parser) string {
s := p.String(ucd.Name)
if s == "" {
return ""
}
if s[0] == '<' {
const first = ", First>"
if i := strings.Index(s, first); i >= 0 {
s = s[:i] + ">"
}
}
return s
}
| {
"pile_set_name": "Github"
} |
#!/bin/bash
# Script used to verify the xlat funcs vs documentation
# main()
#
# Which API functions are used to register xlats
#
xlat_api_funcs="xlat_register|xlat_async_register"
src_dir="src/"
doc_xlat="doc/"
#
# Where our output goes.
#
OUTPUT=$1
shift
if [ -z "$OUTPUT" ]; then
echo "Usage: $0 /path/output"
exit 1
fi
#
# Where the correct output is located
#
CORRECT=$(echo $0 | sed 's/\.sh/.txt/')
rm -f $OUTPUT
mkdir -p $(dirname $OUTPUT)
touch $OUTPUT
#
# Search through all of the code for references to xlat API
# registration functions. Then, pull out the names of the xlats
# which are registered.
#
grep --include "*.c" -E "($xlat_api_funcs).*\"" -r $src_dir 2>&1 | \
perl -lpe 's/^.*"(.*)".*$/\1/' | sort | uniq | \
#
# Search through the documentation for references to the names of the
# registered xlat functions.
#
while read _d; do
echo "CHECKING for %{$_d: ... }"
if ! grep -q "%{$_d:" --include "*.adoc" -r $doc_xlat 2>&1; then
echo "%{$_d:...}" >> $OUTPUT
fi
done
#
# Files should be identical. If not, panic.
#
if ! diff $OUTPUT $CORRECT 2>/dev/null ; then
echo "FAILED: $@"
echo
echo "ERROR: Some registered xlats are not documented."
echo "Please compare the following two files:"
echo " expected - $CORRECT"
echo " found - $OUTPUT"
echo
echo "If the found output is correct, then just copy 'found' to 'expected'".
echo
echo "If the xlat is built-in, please document it in"
echo " doc/antora/modules/unlang/pages/xlat/builtin.adoc"
echo
echo "If the xlat is in a module, please document it in"
echo " raddb/mods-available/NAME"
exit 1
fi
exit 0
| {
"pile_set_name": "Github"
} |
// +build !windows
package ole
import "unsafe"
func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 {
return int32(0)
}
func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) {
return uint32(0), NewError(E_NOTIMPL)
}
func (v *IConnectionPoint) Unadvise(cookie uint32) error {
return NewError(E_NOTIMPL)
}
func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) {
return NewError(E_NOTIMPL)
}
| {
"pile_set_name": "Github"
} |
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2005 Blender Foundation.
* All rights reserved.
*/
#include "../node_shader_util.h"
/* **************** Wavelength ******************** */
static bNodeSocketTemplate sh_node_wavelength_in[] = {
{SOCK_FLOAT, N_("Wavelength"), 500.0f, 0.0f, 0.0f, 0.0f, 380.0f, 780.0f},
{-1, ""},
};
static bNodeSocketTemplate sh_node_wavelength_out[] = {
{SOCK_RGBA, N_("Color")},
{-1, ""},
};
/* node type definition */
void register_node_type_sh_wavelength(void)
{
static bNodeType ntype;
sh_node_type_base(&ntype, SH_NODE_WAVELENGTH, "Wavelength", NODE_CLASS_CONVERTOR, 0);
node_type_size_preset(&ntype, NODE_SIZE_MIDDLE);
node_type_socket_templates(&ntype, sh_node_wavelength_in, sh_node_wavelength_out);
node_type_init(&ntype, NULL);
node_type_storage(&ntype, "", NULL, NULL);
nodeRegisterType(&ntype);
}
| {
"pile_set_name": "Github"
} |
#ifndef _IPXE_EFI_FILE_H
#define _IPXE_EFI_FILE_H
/** @file
*
* EFI file protocols
*
*/
extern int efi_file_install ( EFI_HANDLE handle );
extern void efi_file_uninstall ( EFI_HANDLE handle );
#endif /* _IPXE_EFI_FILE_H */
| {
"pile_set_name": "Github"
} |
use rustc_ast as ast;
use rustc_ast::visit::{self, AssocCtxt, FnCtxt, FnKind, Visitor};
use rustc_ast::{AssocTyConstraint, AssocTyConstraintKind, NodeId};
use rustc_ast::{GenericParam, GenericParamKind, PatKind, RangeEnd, VariantData};
use rustc_errors::struct_span_err;
use rustc_feature::{AttributeGate, BUILTIN_ATTRIBUTE_MAP};
use rustc_feature::{Features, GateIssue};
use rustc_session::parse::{feature_err, feature_err_issue};
use rustc_session::Session;
use rustc_span::source_map::Spanned;
use rustc_span::symbol::{sym, Symbol};
use rustc_span::Span;
use tracing::debug;
macro_rules! gate_feature_fn {
($visitor: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr) => {{
let (visitor, has_feature, span, name, explain) =
(&*$visitor, $has_feature, $span, $name, $explain);
let has_feature: bool = has_feature(visitor.features);
debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature);
if !has_feature && !span.allows_unstable($name) {
feature_err_issue(&visitor.sess.parse_sess, name, span, GateIssue::Language, explain)
.emit();
}
}};
}
macro_rules! gate_feature_post {
($visitor: expr, $feature: ident, $span: expr, $explain: expr) => {
gate_feature_fn!($visitor, |x: &Features| x.$feature, $span, sym::$feature, $explain)
};
}
pub fn check_attribute(attr: &ast::Attribute, sess: &Session, features: &Features) {
PostExpansionVisitor { sess, features }.visit_attribute(attr)
}
struct PostExpansionVisitor<'a> {
sess: &'a Session,
// `sess` contains a `Features`, but this might not be that one.
features: &'a Features,
}
impl<'a> PostExpansionVisitor<'a> {
fn check_abi(&self, abi: ast::StrLit) {
let ast::StrLit { symbol_unescaped, span, .. } = abi;
match &*symbol_unescaped.as_str() {
// Stable
"Rust" | "C" | "cdecl" | "stdcall" | "fastcall" | "aapcs" | "win64" | "sysv64"
| "system" => {}
"rust-intrinsic" => {
gate_feature_post!(&self, intrinsics, span, "intrinsics are subject to change");
}
"platform-intrinsic" => {
gate_feature_post!(
&self,
platform_intrinsics,
span,
"platform intrinsics are experimental and possibly buggy"
);
}
"vectorcall" => {
gate_feature_post!(
&self,
abi_vectorcall,
span,
"vectorcall is experimental and subject to change"
);
}
"thiscall" => {
gate_feature_post!(
&self,
abi_thiscall,
span,
"thiscall is experimental and subject to change"
);
}
"rust-call" => {
gate_feature_post!(
&self,
unboxed_closures,
span,
"rust-call ABI is subject to change"
);
}
"ptx-kernel" => {
gate_feature_post!(
&self,
abi_ptx,
span,
"PTX ABIs are experimental and subject to change"
);
}
"unadjusted" => {
gate_feature_post!(
&self,
abi_unadjusted,
span,
"unadjusted ABI is an implementation detail and perma-unstable"
);
}
"msp430-interrupt" => {
gate_feature_post!(
&self,
abi_msp430_interrupt,
span,
"msp430-interrupt ABI is experimental and subject to change"
);
}
"x86-interrupt" => {
gate_feature_post!(
&self,
abi_x86_interrupt,
span,
"x86-interrupt ABI is experimental and subject to change"
);
}
"amdgpu-kernel" => {
gate_feature_post!(
&self,
abi_amdgpu_kernel,
span,
"amdgpu-kernel ABI is experimental and subject to change"
);
}
"avr-interrupt" | "avr-non-blocking-interrupt" => {
gate_feature_post!(
&self,
abi_avr_interrupt,
span,
"avr-interrupt and avr-non-blocking-interrupt ABIs are experimental and subject to change"
);
}
"efiapi" => {
gate_feature_post!(
&self,
abi_efiapi,
span,
"efiapi ABI is experimental and subject to change"
);
}
abi => self
.sess
.parse_sess
.span_diagnostic
.delay_span_bug(span, &format!("unrecognized ABI not caught in lowering: {}", abi)),
}
}
fn check_extern(&self, ext: ast::Extern) {
if let ast::Extern::Explicit(abi) = ext {
self.check_abi(abi);
}
}
fn maybe_report_invalid_custom_discriminants(&self, variants: &[ast::Variant]) {
let has_fields = variants.iter().any(|variant| match variant.data {
VariantData::Tuple(..) | VariantData::Struct(..) => true,
VariantData::Unit(..) => false,
});
let discriminant_spans = variants
.iter()
.filter(|variant| match variant.data {
VariantData::Tuple(..) | VariantData::Struct(..) => false,
VariantData::Unit(..) => true,
})
.filter_map(|variant| variant.disr_expr.as_ref().map(|c| c.value.span))
.collect::<Vec<_>>();
if !discriminant_spans.is_empty() && has_fields {
let mut err = feature_err(
&self.sess.parse_sess,
sym::arbitrary_enum_discriminant,
discriminant_spans.clone(),
"custom discriminant values are not allowed in enums with tuple or struct variants",
);
for sp in discriminant_spans {
err.span_label(sp, "disallowed custom discriminant");
}
for variant in variants.iter() {
match &variant.data {
VariantData::Struct(..) => {
err.span_label(variant.span, "struct variant defined here");
}
VariantData::Tuple(..) => {
err.span_label(variant.span, "tuple variant defined here");
}
VariantData::Unit(..) => {}
}
}
err.emit();
}
}
fn check_gat(&self, generics: &ast::Generics, span: Span) {
if !generics.params.is_empty() {
gate_feature_post!(
&self,
generic_associated_types,
span,
"generic associated types are unstable"
);
}
if !generics.where_clause.predicates.is_empty() {
gate_feature_post!(
&self,
generic_associated_types,
span,
"where clauses on associated types are unstable"
);
}
}
/// Feature gate `impl Trait` inside `type Alias = $type_expr;`.
fn check_impl_trait(&self, ty: &ast::Ty) {
struct ImplTraitVisitor<'a> {
vis: &'a PostExpansionVisitor<'a>,
}
impl Visitor<'_> for ImplTraitVisitor<'_> {
fn visit_ty(&mut self, ty: &ast::Ty) {
if let ast::TyKind::ImplTrait(..) = ty.kind {
gate_feature_post!(
&self.vis,
type_alias_impl_trait,
ty.span,
"`impl Trait` in type aliases is unstable"
);
}
visit::walk_ty(self, ty);
}
}
ImplTraitVisitor { vis: self }.visit_ty(ty);
}
}
impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
fn visit_attribute(&mut self, attr: &ast::Attribute) {
let attr_info =
attr.ident().and_then(|ident| BUILTIN_ATTRIBUTE_MAP.get(&ident.name)).map(|a| **a);
// Check feature gates for built-in attributes.
if let Some((.., AttributeGate::Gated(_, name, descr, has_feature))) = attr_info {
gate_feature_fn!(self, has_feature, attr.span, name, descr);
}
// Check unstable flavors of the `#[doc]` attribute.
if self.sess.check_name(attr, sym::doc) {
for nested_meta in attr.meta_item_list().unwrap_or_default() {
macro_rules! gate_doc { ($($name:ident => $feature:ident)*) => {
$(if nested_meta.has_name(sym::$name) {
let msg = concat!("`#[doc(", stringify!($name), ")]` is experimental");
gate_feature_post!(self, $feature, attr.span, msg);
})*
}}
gate_doc!(
include => external_doc
cfg => doc_cfg
masked => doc_masked
spotlight => doc_spotlight
keyword => doc_keyword
);
}
}
}
fn visit_name(&mut self, sp: Span, name: Symbol) {
if !name.as_str().is_ascii() {
gate_feature_post!(
&self,
non_ascii_idents,
self.sess.parse_sess.source_map().guess_head_span(sp),
"non-ascii idents are not fully supported"
);
}
}
fn visit_item(&mut self, i: &'a ast::Item) {
match i.kind {
ast::ItemKind::ForeignMod(ref foreign_module) => {
if let Some(abi) = foreign_module.abi {
self.check_abi(abi);
}
}
ast::ItemKind::Fn(..) => {
if self.sess.contains_name(&i.attrs[..], sym::plugin_registrar) {
gate_feature_post!(
&self,
plugin_registrar,
i.span,
"compiler plugins are experimental and possibly buggy"
);
}
if self.sess.contains_name(&i.attrs[..], sym::start) {
gate_feature_post!(
&self,
start,
i.span,
"`#[start]` functions are experimental \
and their signature may change \
over time"
);
}
if self.sess.contains_name(&i.attrs[..], sym::main) {
gate_feature_post!(
&self,
main,
i.span,
"declaration of a non-standard `#[main]` \
function may change over time, for now \
a top-level `fn main()` is required"
);
}
}
ast::ItemKind::Struct(..) => {
for attr in self.sess.filter_by_name(&i.attrs[..], sym::repr) {
for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
if item.has_name(sym::simd) {
gate_feature_post!(
&self,
repr_simd,
attr.span,
"SIMD types are experimental and possibly buggy"
);
}
}
}
}
ast::ItemKind::Enum(ast::EnumDef { ref variants, .. }, ..) => {
for variant in variants {
match (&variant.data, &variant.disr_expr) {
(ast::VariantData::Unit(..), _) => {}
(_, Some(disr_expr)) => gate_feature_post!(
&self,
arbitrary_enum_discriminant,
disr_expr.value.span,
"discriminants on non-unit variants are experimental"
),
_ => {}
}
}
let has_feature = self.features.arbitrary_enum_discriminant;
if !has_feature && !i.span.allows_unstable(sym::arbitrary_enum_discriminant) {
self.maybe_report_invalid_custom_discriminants(&variants);
}
}
ast::ItemKind::Impl { polarity, defaultness, ref of_trait, .. } => {
if let ast::ImplPolarity::Negative(span) = polarity {
gate_feature_post!(
&self,
negative_impls,
span.to(of_trait.as_ref().map(|t| t.path.span).unwrap_or(span)),
"negative trait bounds are not yet fully implemented; \
use marker types for now"
);
}
if let ast::Defaultness::Default(_) = defaultness {
gate_feature_post!(&self, specialization, i.span, "specialization is unstable");
}
}
ast::ItemKind::Trait(ast::IsAuto::Yes, ..) => {
gate_feature_post!(
&self,
optin_builtin_traits,
i.span,
"auto traits are experimental and possibly buggy"
);
}
ast::ItemKind::TraitAlias(..) => {
gate_feature_post!(&self, trait_alias, i.span, "trait aliases are experimental");
}
ast::ItemKind::MacroDef(ast::MacroDef { macro_rules: false, .. }) => {
let msg = "`macro` is experimental";
gate_feature_post!(&self, decl_macro, i.span, msg);
}
ast::ItemKind::TyAlias(_, _, _, Some(ref ty)) => self.check_impl_trait(&ty),
_ => {}
}
visit::walk_item(self, i);
}
fn visit_foreign_item(&mut self, i: &'a ast::ForeignItem) {
match i.kind {
ast::ForeignItemKind::Fn(..) | ast::ForeignItemKind::Static(..) => {
let link_name = self.sess.first_attr_value_str_by_name(&i.attrs, sym::link_name);
let links_to_llvm = match link_name {
Some(val) => val.as_str().starts_with("llvm."),
_ => false,
};
if links_to_llvm {
gate_feature_post!(
&self,
link_llvm_intrinsics,
i.span,
"linking to LLVM intrinsics is experimental"
);
}
}
ast::ForeignItemKind::TyAlias(..) => {
gate_feature_post!(&self, extern_types, i.span, "extern types are experimental");
}
ast::ForeignItemKind::MacCall(..) => {}
}
visit::walk_foreign_item(self, i)
}
fn visit_ty(&mut self, ty: &'a ast::Ty) {
match ty.kind {
ast::TyKind::BareFn(ref bare_fn_ty) => {
self.check_extern(bare_fn_ty.ext);
}
ast::TyKind::Never => {
gate_feature_post!(&self, never_type, ty.span, "the `!` type is experimental");
}
_ => {}
}
visit::walk_ty(self, ty)
}
fn visit_fn_ret_ty(&mut self, ret_ty: &'a ast::FnRetTy) {
if let ast::FnRetTy::Ty(ref output_ty) = *ret_ty {
if let ast::TyKind::Never = output_ty.kind {
// Do nothing.
} else {
self.visit_ty(output_ty)
}
}
}
fn visit_expr(&mut self, e: &'a ast::Expr) {
match e.kind {
ast::ExprKind::Box(_) => {
gate_feature_post!(
&self,
box_syntax,
e.span,
"box expression syntax is experimental; you can call `Box::new` instead"
);
}
ast::ExprKind::Type(..) => {
// To avoid noise about type ascription in common syntax errors, only emit if it
// is the *only* error.
if self.sess.parse_sess.span_diagnostic.err_count() == 0 {
gate_feature_post!(
&self,
type_ascription,
e.span,
"type ascription is experimental"
);
}
}
ast::ExprKind::TryBlock(_) => {
gate_feature_post!(&self, try_blocks, e.span, "`try` expression is experimental");
}
ast::ExprKind::Block(_, opt_label) => {
if let Some(label) = opt_label {
gate_feature_post!(
&self,
label_break_value,
label.ident.span,
"labels on blocks are unstable"
);
}
}
_ => {}
}
visit::walk_expr(self, e)
}
fn visit_pat(&mut self, pattern: &'a ast::Pat) {
match &pattern.kind {
PatKind::Box(..) => {
gate_feature_post!(
&self,
box_patterns,
pattern.span,
"box pattern syntax is experimental"
);
}
PatKind::Range(_, _, Spanned { node: RangeEnd::Excluded, .. }) => {
gate_feature_post!(
&self,
exclusive_range_pattern,
pattern.span,
"exclusive range pattern syntax is experimental"
);
}
_ => {}
}
visit::walk_pat(self, pattern)
}
fn visit_fn(&mut self, fn_kind: FnKind<'a>, span: Span, _: NodeId) {
if let Some(header) = fn_kind.header() {
// Stability of const fn methods are covered in `visit_assoc_item` below.
self.check_extern(header.ext);
if let (ast::Const::Yes(_), ast::Extern::Implicit)
| (ast::Const::Yes(_), ast::Extern::Explicit(_)) = (header.constness, header.ext)
{
gate_feature_post!(
&self,
const_extern_fn,
span,
"`const extern fn` definitions are unstable"
);
}
}
if fn_kind.ctxt() != Some(FnCtxt::Foreign) && fn_kind.decl().c_variadic() {
gate_feature_post!(&self, c_variadic, span, "C-variadic functions are unstable");
}
visit::walk_fn(self, fn_kind, span)
}
fn visit_generic_param(&mut self, param: &'a GenericParam) {
if let GenericParamKind::Const { .. } = param.kind {
gate_feature_fn!(
&self,
|x: &Features| x.const_generics || x.min_const_generics,
param.ident.span,
sym::min_const_generics,
"const generics are unstable"
);
}
visit::walk_generic_param(self, param)
}
fn visit_assoc_ty_constraint(&mut self, constraint: &'a AssocTyConstraint) {
if let AssocTyConstraintKind::Bound { .. } = constraint.kind {
gate_feature_post!(
&self,
associated_type_bounds,
constraint.span,
"associated type bounds are unstable"
)
}
visit::walk_assoc_ty_constraint(self, constraint)
}
fn visit_assoc_item(&mut self, i: &'a ast::AssocItem, ctxt: AssocCtxt) {
let is_fn = match i.kind {
ast::AssocItemKind::Fn(_, ref sig, _, _) => {
if let (ast::Const::Yes(_), AssocCtxt::Trait) = (sig.header.constness, ctxt) {
gate_feature_post!(&self, const_fn, i.span, "const fn is unstable");
}
true
}
ast::AssocItemKind::TyAlias(_, ref generics, _, ref ty) => {
if let (Some(_), AssocCtxt::Trait) = (ty, ctxt) {
gate_feature_post!(
&self,
associated_type_defaults,
i.span,
"associated type defaults are unstable"
);
}
if let Some(ty) = ty {
self.check_impl_trait(ty);
}
self.check_gat(generics, i.span);
false
}
_ => false,
};
if let ast::Defaultness::Default(_) = i.kind.defaultness() {
// Limit `min_specialization` to only specializing functions.
gate_feature_fn!(
&self,
|x: &Features| x.specialization || (is_fn && x.min_specialization),
i.span,
sym::specialization,
"specialization is unstable"
);
}
visit::walk_assoc_item(self, i, ctxt)
}
fn visit_vis(&mut self, vis: &'a ast::Visibility) {
if let ast::VisibilityKind::Crate(ast::CrateSugar::JustCrate) = vis.kind {
gate_feature_post!(
&self,
crate_visibility_modifier,
vis.span,
"`crate` visibility modifier is experimental"
);
}
visit::walk_vis(self, vis)
}
}
pub fn check_crate(krate: &ast::Crate, sess: &Session) {
maybe_stage_features(sess, krate);
check_incompatible_features(sess);
let mut visitor = PostExpansionVisitor { sess, features: &sess.features_untracked() };
let spans = sess.parse_sess.gated_spans.spans.borrow();
macro_rules! gate_all {
($gate:ident, $msg:literal) => {
if let Some(spans) = spans.get(&sym::$gate) {
for span in spans {
gate_feature_post!(&visitor, $gate, *span, $msg);
}
}
};
}
gate_all!(if_let_guard, "`if let` guard is not implemented");
gate_all!(let_chains, "`let` expressions in this position are experimental");
gate_all!(async_closure, "async closures are unstable");
gate_all!(generators, "yield syntax is experimental");
gate_all!(or_patterns, "or-patterns syntax is experimental");
gate_all!(raw_ref_op, "raw address of syntax is experimental");
gate_all!(const_trait_bound_opt_out, "`?const` on trait bounds is experimental");
gate_all!(const_trait_impl, "const trait impls are experimental");
gate_all!(half_open_range_patterns, "half-open range patterns are unstable");
// All uses of `gate_all!` below this point were added in #65742,
// and subsequently disabled (with the non-early gating readded).
macro_rules! gate_all {
($gate:ident, $msg:literal) => {
// FIXME(eddyb) do something more useful than always
// disabling these uses of early feature-gatings.
if false {
for span in spans.get(&sym::$gate).unwrap_or(&vec![]) {
gate_feature_post!(&visitor, $gate, *span, $msg);
}
}
};
}
gate_all!(trait_alias, "trait aliases are experimental");
gate_all!(associated_type_bounds, "associated type bounds are unstable");
gate_all!(crate_visibility_modifier, "`crate` visibility modifier is experimental");
gate_all!(const_generics, "const generics are unstable");
gate_all!(decl_macro, "`macro` is experimental");
gate_all!(box_patterns, "box pattern syntax is experimental");
gate_all!(exclusive_range_pattern, "exclusive range pattern syntax is experimental");
gate_all!(try_blocks, "`try` blocks are unstable");
gate_all!(label_break_value, "labels on blocks are unstable");
gate_all!(box_syntax, "box expression syntax is experimental; you can call `Box::new` instead");
// To avoid noise about type ascription in common syntax errors,
// only emit if it is the *only* error. (Also check it last.)
if sess.parse_sess.span_diagnostic.err_count() == 0 {
gate_all!(type_ascription, "type ascription is experimental");
}
visit::walk_crate(&mut visitor, krate);
}
fn maybe_stage_features(sess: &Session, krate: &ast::Crate) {
if !sess.opts.unstable_features.is_nightly_build() {
for attr in krate.attrs.iter().filter(|attr| sess.check_name(attr, sym::feature)) {
struct_span_err!(
sess.parse_sess.span_diagnostic,
attr.span,
E0554,
"`#![feature]` may not be used on the {} release channel",
option_env!("CFG_RELEASE_CHANNEL").unwrap_or("(unknown)")
)
.emit();
}
}
}
fn check_incompatible_features(sess: &Session) {
let features = sess.features_untracked();
let declared_features = features
.declared_lang_features
.iter()
.copied()
.map(|(name, span, _)| (name, span))
.chain(features.declared_lib_features.iter().copied());
for (f1, f2) in rustc_feature::INCOMPATIBLE_FEATURES
.iter()
.filter(|&&(f1, f2)| features.enabled(f1) && features.enabled(f2))
{
if let Some((f1_name, f1_span)) = declared_features.clone().find(|(name, _)| name == f1) {
if let Some((f2_name, f2_span)) = declared_features.clone().find(|(name, _)| name == f2)
{
let spans = vec![f1_span, f2_span];
sess.struct_span_err(
spans.clone(),
&format!(
"features `{}` and `{}` are incompatible, using them at the same time \
is not allowed",
f1_name, f2_name
),
)
.help("remove one of these features")
.emit();
}
}
}
}
| {
"pile_set_name": "Github"
} |
Model:
predict "Class = +1" if score > -1
score = 2 * x4
-2 * x2
-2 * x33
Accuracy: 0.62855
Confusion Matrix:
1337 672
4110 6755
Predictions:
-1
-1
-1
1
1
1
-1
1
1
-1
-1
-1
-1
-1
1
1
1
-1
-1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
-1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
-1
1
1
1
-1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
-1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
-1
-1
1
-1
-1
-1
1
-1
1
-1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
-1
-1
1
1
1
1
1
1
1
1
-1
1
1
-1
-1
1
-1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
-1
1
-1
1
1
-1
1
-1
1
1
1
1
1
1
-1
1
1
-1
-1
1
-1
1
1
1
-1
-1
-1
1
1
1
1
-1
1
1
1
-1
1
1
-1
1
1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
-1
1
1
-1
-1
1
1
1
-1
-1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
-1
1
-1
1
-1
-1
-1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
-1
-1
-1
1
-1
1
1
1
1
-1
1
1
1
-1
1
1
1
-1
1
1
-1
-1
-1
-1
1
1
-1
-1
-1
1
1
-1
1
-1
1
-1
-1
-1
-1
1
1
-1
-1
-1
-1
1
-1
-1
-1
1
-1
1
1
-1
-1
-1
-1
-1
1
1
1
1
-1
1
1
1
-1
1
-1
1
1
-1
1
-1
1
-1
1
1
1
-1
1
1
-1
1
-1
-1
-1
1
-1
-1
-1
1
-1
-1
1
-1
-1
1
-1
1
1
1
1
-1
-1
1
1
-1
1
-1
1
-1
-1
1
-1
1
-1
1
-1
1
-1
-1
1
-1
-1
-1
-1
-1
1
-1
1
-1
-1
1
-1
1
-1
1
1
-1
-1
-1
-1
1
1
-1
-1
-1
-1
-1
-1
-1
1
-1
1
-1
1
-1
1
1
1
-1
1
1
-1
-1
-1
1
1
1
1
-1
1
-1
-1
1
-1
-1
-1
-1
-1
1
-1
-1
1
-1
1
1
-1
-1
1
1
-1
1
1
1
1
1
-1
1
-1
-1
1
-1
-1
1
1
1
1
-1
-1
-1
1
-1
1
-1
1
1
-1
1
1
-1
1
-1
-1
1
-1
1
-1
1
1
-1
1
1
-1
-1
-1
1
-1
1
1
-1
1
1
1
-1
1
-1
-1
1
-1
1
1
-1
-1
-1
-1
-1
1
1
-1
1
-1
1
1
-1
-1
-1
1
1
1
1
-1
-1
1
-1
-1
1
1
-1
-1
-1
1
1
-1
1
1
-1
1
-1
1
1
1
-1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
-1
-1
-1
-1
1
1
1
1
-1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
-1
-1
-1
1
1
-1
1
1
-1
1
1
-1
1
-1
-1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
1
-1
1
1
1
1
-1
-1
1
-1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
-1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
-1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
-1
1
-1
1
1
1
1
-1
1
1
1
-1
-1
1
1
1
1
1
-1
-1
1
1
1
-1
-1
1
1
1
1
1
-1
1
1
1
1
-1
1
-1
1
1
1
-1
1
-1
1
1
1
1
1
-1
1
1
-1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
-1
1
1
-1
1
1
1
-1
-1
1
-1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
-1
-1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
-1
1
1
-1
-1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
-1
-1
1
-1
1
1
1
1
1
1
-1
1
1
-1
-1
1
1
1
1
1
1
1
1
-1
-1
1
1
-1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
-1
1
-1
1
1
1
-1
1
1
-1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
1
-1
1
1
1
1
1
-1
1
-1
-1
1
1
1
1
1
1
1
1
1
-1
-1
-1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
-1
-1
1
1
-1
1
1
1
1
1
1
-1
1
1
-1
1
-1
-1
1
1
-1
1
-1
-1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
1
-1
-1
-1
1
1
1
1
1
1
1
-1
1
1
1
-1
-1
1
-1
-1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
-1
1
-1
1
1
-1
-1
1
-1
-1
-1
1
-1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
-1
1
-1
-1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
-1
-1
-1
1
-1
-1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
-1
-1
-1
-1
-1
-1
-1
-1
1
-1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
-1
1
-1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
-1
1
-1
-1
-1
-1
-1
1
-1
-1
1
-1
-1
-1
1
1
-1
-1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
-1
-1
1
1
-1
-1
-1
1
-1
-1
-1
-1
-1
1
1
1
-1
1
1
1
-1
1
-1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
-1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
-1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
-1
1
-1
1
-1
1
1
1
-1
1
-1
1
-1
-1
1
1
1
-1
1
-1
-1
1
1
1
1
-1
1
1
1
1
1
1
-1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
-1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
-1
1
1
-1
-1
1
1
-1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
-1
1
-1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
-1
-1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
-1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
-1
-1
-1
1
1
-1
1
1
1
-1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
-1
1
-1
1
1
1
-1
1
-1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
-1
-1
1
1
-1
-1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
-1
1
-1
-1
-1
-1
1
-1
-1
1
1
1
-1
-1
-1
-1
1
1
1
-1
-1
-1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
-1
-1
1
-1
-1
-1
1
-1
1
-1
1
-1
1
1
1
1
1
1
1
-1
-1
-1
-1
-1
1
-1
1
-1
1
1
1
1
-1
1
-1
-1
1
1
1
-1
-1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
-1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
-1
-1
-1
-1
1
1
1
-1
1
1
-1
1
1
-1
-1
1
1
1
1
1
1
1
-1
1
1
-1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
-1
-1
1
-1
-1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
-1
1
1
1
1
1
-1
1
-1
1
1
-1
1
1
1
1
-1
-1
1
1
1
-1
1
-1
1
-1
-1
1
1
1
1
-1
1
1
1
-1
1
1
1
-1
1
-1
-1
1
-1
1
1
1
1
-1
-1
-1
1
-1
-1
-1
-1
1
1
1
1
-1
1
-1
1
1
-1
-1
-1
1
1
1
1
-1
1
1
1
1
-1
1
-1
-1
1
-1
-1
1
-1
1
1
1
1
-1
-1
1
-1
1
1
-1
1
-1
1
1
1
1
-1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
-1
-1
-1
1
1
1
1
1
1
1
-1
-1
-1
1
1
-1
1
-1
-1
1
-1
1
-1
1
1
-1
1
-1
-1
-1
-1
-1
1
1
1
1
-1
1
1
-1
-1
1
-1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
-1
1
1
-1
-1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
-1
1
1
1
-1
-1
1
1
1
-1
-1
-1
1
-1
1
-1
1
1
-1
-1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
-1
1
1
-1
1
1
1
1
-1
-1
-1
1
1
-1
-1
1
-1
1
1
1
1
1
1
-1
-1
1
1
1
-1
1
1
1
1
1
-1
-1
-1
-1
-1
-1
-1
-1
1
-1
1
-1
1
1
1
-1
-1
-1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
-1
-1
1
1
1
-1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
-1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
-1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
-1
-1
-1
1
1
1
-1
-1
-1
1
1
1
1
1
-1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
-1
-1
-1
1
-1
1
1
1
1
1
1
1
1
1
-1
-1
-1
-1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
-1
1
1
-1
-1
1
-1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
-1
1
-1
-1
-1
1
-1
-1
1
-1
1
-1
-1
-1
1
-1
1
-1
1
-1
-1
-1
-1
-1
-1
1
-1
-1
1
-1
1
-1
-1
-1
1
-1
1
-1
-1
-1
1
1
1
-1
-1
1
1
-1
-1
-1
1
-1
-1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
-1
-1
-1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
-1
1
1
-1
-1
-1
1
-1
1
1
1
1
1
-1
-1
1
1
1
-1
1
1
1
-1
1
1
1
-1
1
1
-1
1
-1
-1
1
1
1
1
-1
1
-1
1
-1
-1
1
-1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
-1
1
1
-1
-1
1
1
1
1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
-1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
-1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
-1
1
1
-1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
-1
1
-1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
-1
1
1
1
1
1
-1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
-1
1
-1
1
1
1
1
1
-1
-1
-1
1
1
-1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
-1
1
-1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
-1
1
1
1
1
1
1
-1
1
-1
-1
1
1
-1
1
-1
-1
-1
1
1
1
1
-1
-1
-1
1
-1
1
1
1
-1
-1
-1
1
-1
-1
1
1
-1
-1
-1
1
-1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
-1
-1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
-1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
-1
-1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
-1
-1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
1
-1
1
-1
1
-1
1
1
1
1
1
-1
-1
-1
1
-1
1
1
1
1
1
1
1
1
1
-1
-1
-1
-1
1
1
-1
1
-1
-1
-1
-1
-1
1
-1
1
1
-1
1
-1
1
1
1
1
1
1
1
-1
-1
1
-1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
-1
-1
1
-1
-1
1
1
-1
1
1
-1
1
1
1
1
1
-1
-1
-1
1
1
1
-1
-1
1
-1
-1
1
-1
1
1
-1
-1
1
-1
-1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
1
-1
1
1
1
1
1
-1
1
-1
-1
1
1
1
1
1
-1
-1
1
-1
-1
1
1
1
1
-1
1
-1
1
-1
-1
1
-1
1
1
1
-1
1
-1
1
1
-1
1
1
-1
1
1
-1
-1
-1
1
-1
-1
1
1
-1
1
-1
1
-1
1
-1
1
1
-1
-1
-1
1
-1
-1
1
1
1
-1
-1
1
-1
1
-1
1
-1
-1
-1
1
-1
-1
-1
1
1
-1
1
1
-1
-1
1
-1
1
-1
-1
1
-1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
-1
-1
1
1
1
-1
-1
-1
1
-1
1
-1
1
-1
-1
1
-1
-1
-1
1
1
-1
1
-1
1
1
-1
-1
-1
1
1
1
1
1
1
1
-1
1
1
-1
-1
-1
1
1
1
1
-1
-1
-1
-1
-1
-1
-1
1
-1
1
-1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
1
1
1
-1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
-1
-1
1
-1
-1
-1
-1
1
-1
-1
1
1
1
-1
-1
1
-1
-1
-1
1
-1
-1
-1
1
-1
1
1
-1
-1
-1
-1
1
-1
-1
-1
1
-1
1
1
1
-1
-1
1
1
1
1
1
1
1
-1
1
-1
1
-1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
-1
-1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
-1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
-1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
-1
1
-1
-1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
-1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
-1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
-1
1
1
-1
1
-1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
-1
-1
1
1
1
1
-1
-1
-1
-1
1
-1
1
1
1
-1
1
-1
1
1
-1
1
1
1
1
1
1
-1
1
-1
1
1
-1
1
1
1
-1
1
-1
1
1
-1
1
1
-1
-1
1
1
1
1
1
1
1
-1
1
-1
-1
1
1
1
1
1
1
-1
1
-1
1
-1
-1
-1
-1
-1
-1
1
-1
1
1
1
-1
-1
-1
1
-1
1
1
1
-1
-1
1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
-1
1
1
1
1
1
-1
1
-1
1
1
1
-1
1
1
1
1
-1
1
1
1
-1
1
-1
1
-1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
-1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
-1
-1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
-1
1
1
-1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
1
1
-1
1
1
1
1
1
1
1
1
1
1
-1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
1
1
-1
-1
1
1
1
1
1
1
1
-1
1
-1
-1
1
1
1
-1
1
1
1
1
1
1
1
-1
1
1
1
1
-1
-1
-1
-1
1
1
1
1
-1
-1
1
1
-1
1
-1
1
1
1
-1
1
1
-1
1
1
-1
-1
1
1
1
1
-1
1
1
-1
-1
-1
1
-1
-1
1
1
-1
1
1
1
-1
-1
-1
1
1
-1
1
1
-1
1
-1
-1
1
1
1
-1
1
1
-1
1
1
-1
-1
1
1
-1
1
1
-1
1
1
1
-1
-1
1
1
1
-1
-1
-1
1
-1
-1
-1
-1
1
1
| {
"pile_set_name": "Github"
} |
//this code excerpt also demonstrates try/catch exception handling
#include <afxinet.h>
void DisplayHttpPage(LPCTSTR pszServerName, LPCTSTR pszFileName)
{
CInternetSession session(_T("My Session"));
CHttpConnection *pServer = NULL;
CHttpFile *pFile = NULL;
try
{
CString strServerName;
INTERNET_PORT nPort = 80;
DWORD dwRet = 0;
pServer = session.GetHttpConnection(pszServerName, nPort);
pFile = pServer->OpenRequest(CHttpConnection::HTTP_VERB_GET, pszFileName);
pFile->SendRequest();
pFile->QueryInfoStatusCode(dwRet);
if (dwRet == HTTP_STATUS_OK)
{
CHAR szBuff[1024];
while (pFile->Read(szBuff, 1024) > 0)
{
printf_s("%1023s", szBuff);
}
}
delete pFile;
delete pServer;
}
catch (CInternetException *pEx)
{
//catch errors from WinInet
TCHAR pszError[64];
pEx->GetErrorMessage(pszError, 64);
_tprintf_s(_T("%63s"), pszError);
}
session.Close();
} | {
"pile_set_name": "Github"
} |
---
name: Jérôme Petazzoni
organization: Docker
github: jpetazzo
---
| {
"pile_set_name": "Github"
} |
CONTIKI_PROJECT = webserver-example
all: $(CONTIKI_PROJECT)
APPS = webserver
# The webserver application normally contains a built-in file system and support
# for server-side includes.
#
# This webserver example supports building the alternative webserver application
# which serves files from an cfs file system. To build the alternative webserver
# run make with the parameter HTTPD-CFS=1.
ifeq ($(HTTPD-CFS),1)
override webserver_src = webserver-nogui.c http-strings.c psock.c memb.c \
httpd-cfs.c urlconv.c
endif
CONTIKI = ../..
CONTIKI_WITH_IPV4 = 1
include $(CONTIKI)/Makefile.include
# Intentionally httpd.c and httpd-cfs.c implement the same interface. When
# switching from one webserver alternative to the other with an existent
# Contiki library then both files end up in the library making the linker
# use whichever it finds first :-(
#
# The most straightforward way to make sure this doesn't happen is to delete
# the Contiki library. But it would be undesirable to do that on every build
# so the existence of the "wrong" object file is used to detect a switch and
# trigger deletion of the Contiki library - and the trigger file of course.
ifeq ($(HTTPD-CFS),1)
ifneq (${wildcard $(OBJECTDIR)/httpd.o},)
DUMMY := ${shell rm -f contiki-$(TARGET).a $(OBJECTDIR)/httpd.o}
endif
else
ifneq (${wildcard $(OBJECTDIR)/httpd-cfs.o},)
DUMMY := ${shell rm -f contiki-$(TARGET).a $(OBJECTDIR)/httpd-cfs.o}
endif
endif
| {
"pile_set_name": "Github"
} |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2008 Oracle. All rights reserved.
*/
#ifndef BTRFS_TREE_LOG_H
#define BTRFS_TREE_LOG_H
#include "ctree.h"
#include "transaction.h"
/* return value for btrfs_log_dentry_safe that means we don't need to log it at all */
#define BTRFS_NO_LOG_SYNC 256
struct btrfs_log_ctx {
int log_ret;
int log_transid;
int io_err;
bool log_new_dentries;
struct inode *inode;
struct list_head list;
};
static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx,
struct inode *inode)
{
ctx->log_ret = 0;
ctx->log_transid = 0;
ctx->io_err = 0;
ctx->log_new_dentries = false;
ctx->inode = inode;
INIT_LIST_HEAD(&ctx->list);
}
static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans)
{
WRITE_ONCE(fs_info->last_trans_log_full_commit, trans->transid);
}
static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans)
{
return READ_ONCE(fs_info->last_trans_log_full_commit) ==
trans->transid;
}
int btrfs_sync_log(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_log_ctx *ctx);
int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root);
int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_recover_log_trees(struct btrfs_root *tree_root);
int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
struct dentry *dentry,
const loff_t start,
const loff_t end,
struct btrfs_log_ctx *ctx);
int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
struct btrfs_inode *dir, u64 index);
int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
struct btrfs_inode *inode, u64 dirid);
void btrfs_end_log_trans(struct btrfs_root *root);
int btrfs_pin_log_trans(struct btrfs_root *root);
void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir, struct btrfs_inode *inode,
int for_rename);
void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir);
/* Return values for btrfs_log_new_name() */
enum {
BTRFS_DONT_NEED_TRANS_COMMIT,
BTRFS_NEED_TRANS_COMMIT,
BTRFS_DONT_NEED_LOG_SYNC,
BTRFS_NEED_LOG_SYNC,
};
int btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, struct btrfs_inode *old_dir,
struct dentry *parent,
bool sync_log, struct btrfs_log_ctx *ctx);
#endif
| {
"pile_set_name": "Github"
} |
if(DEFINED INCLUDED_POTHOS_LIBRARY_CONFIG_CMAKE)
return()
endif()
set(INCLUDED_POTHOS_LIBRARY_CONFIG_CMAKE TRUE)
########################################################################
# Automatic LIB_SUFFIX detection + configuration option
########################################################################
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
set(LINUX TRUE)
endif()
if(LINUX AND EXISTS "/etc/debian_version")
set(DEBIAN TRUE)
endif()
if(LINUX AND EXISTS "/etc/redhat-release")
set(REDHAT TRUE)
endif()
if(LINUX AND EXISTS "/etc/SuSE-release")
set(SUSE TRUE)
endif()
if(LINUX AND EXISTS "/etc/slackware-version")
set(SLACKWARE TRUE)
endif()
if(NOT DEFINED LIB_SUFFIX AND (REDHAT OR SUSE OR SLACKWARE) AND CMAKE_SYSTEM_PROCESSOR MATCHES "64$")
SET(LIB_SUFFIX 64)
endif()
set(LIB_SUFFIX ${LIB_SUFFIX} CACHE STRING "lib directory suffix")
########################################################################
# rpath setup - http://www.cmake.org/Wiki/CMake_RPATH_handling
########################################################################
# use, i.e. don't skip the full RPATH for the build tree
option(CMAKE_SKIP_BUILD_RPATH "skip rpath build" FALSE)
# when building, don't use the install RPATH already
# (but later on when installing)
option(CMAKE_BUILD_WITH_INSTALL_RPATH "build with install rpath" FALSE)
# the RPATH to be used when installing, but only if it's not a system directory
option(CMAKE_AUTOSET_INSTALL_RPATH TRUE)
if(CMAKE_AUTOSET_INSTALL_RPATH)
LIST(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES "${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}" isSystemDir)
IF("${isSystemDir}" STREQUAL "-1")
SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}")
ENDIF("${isSystemDir}" STREQUAL "-1")
endif(CMAKE_AUTOSET_INSTALL_RPATH)
# add the automatically determined parts of the RPATH
# which point to directories outside the build tree to the install RPATH
option(CMAKE_INSTALL_RPATH_USE_LINK_PATH "build with automatic rpath" TRUE)
if(APPLE)
set(CMAKE_MACOSX_RPATH ON)
endif()
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<selector xmlns:android="http://schemas.android.com/apk/res/android" >
<!-- 默认时的图片 -->
<item android:color ="#6DCAEC" android:state_checked="false"/>
<!-- 选择时的图片 -->
<item android:color="#fffffb" android:state_checked="true"/>
</selector>
| {
"pile_set_name": "Github"
} |
{
"compilerOptions": {
"target": "es5",
"module": "commonjs",
"declaration": false,
"noImplicitAny": false,
"sourceMap": true,
"suppressImplicitAnyIndexErrors": true,
"lib": [
"dom",
"es5",
"scripthost",
"es2015"
],
"experimentalDecorators": true,
"types": [
"webpack-env"
]
},
"compileOnSave": false,
"exclude": [
"node_modules"
]
}
| {
"pile_set_name": "Github"
} |
// SetUpGame.cpp : implementation file
//
#include "stdafx.h"
#include "duallistdemo.h"
#include "SetUpGame.h"
#ifdef _DEBUG
#define new DEBUG_NEW
#undef THIS_FILE
static char THIS_FILE[] = __FILE__;
#endif
/////////////////////////////////////////////////////////////////////////////
// CSetUpGame dialog
CSetUpGame::CSetUpGame(CWnd* pParent /*=NULL*/)
: CDialog(CSetUpGame::IDD, pParent)
{
//{{AFX_DATA_INIT(CSetUpGame)
// NOTE: the ClassWizard will add member initialization here
m_bCloseSound=FALSE;
m_bHackerActivity=FALSE;
//}}AFX_DATA_INIT
}
void CSetUpGame::DoDataExchange(CDataExchange* pDX)
{
CDialog::DoDataExchange(pDX);
//{{AFX_DATA_MAP(CSetUpGame)
// NOTE: the ClassWizard will add DDX and DDV calls here
DDX_Check(pDX, IDC_CLOSE_SOUND, m_bCloseSound);
DDX_Check(pDX, IDC_HACKER, m_bHackerActivity);
//}}AFX_DATA_MAP
}
BEGIN_MESSAGE_MAP(CSetUpGame, CDialog)
//{{AFX_MSG_MAP(CSetUpGame)
// NOTE: the ClassWizard will add message map macros here
//}}AFX_MSG_MAP
END_MESSAGE_MAP()
/////////////////////////////////////////////////////////////////////////////
// CSetUpGame message handlers
| {
"pile_set_name": "Github"
} |
{
"name": "jsprim",
"version": "1.4.1",
"description": "utilities for primitive JavaScript types",
"main": "./lib/jsprim.js",
"repository": {
"type": "git",
"url": "git://github.com/joyent/node-jsprim.git"
},
"dependencies": {
"assert-plus": "1.0.0",
"extsprintf": "1.3.0",
"json-schema": "0.2.3",
"verror": "1.10.0"
},
"engines": [
"node >=0.6.0"
],
"license": "MIT"
}
| {
"pile_set_name": "Github"
} |
/*
***************************************************************************************
* Copyright (C) 2006 EsperTech, Inc. All rights reserved. *
* http://www.espertech.com/esper *
* http://www.espertech.com *
* ---------------------------------------------------------------------------------- *
* The software in this package is published under the terms of the GPL license *
* a copy of which has been included with this distribution in the license.txt file. *
***************************************************************************************
*/
package com.espertech.esper.common.internal.compile.stage1.spec;
import java.io.Serializable;
import java.util.List;
public class GraphOperatorSpec implements Serializable {
private static final long serialVersionUID = 7606589198404851791L;
private final String operatorName;
private final GraphOperatorInput input;
private final GraphOperatorOutput output;
private final GraphOperatorDetail detail;
private final List<AnnotationDesc> annotations;
public GraphOperatorSpec(String operatorName, GraphOperatorInput input, GraphOperatorOutput output, GraphOperatorDetail detail, List<AnnotationDesc> annotations) {
this.operatorName = operatorName;
this.input = input;
this.output = output;
this.detail = detail;
this.annotations = annotations;
}
public String getOperatorName() {
return operatorName;
}
public GraphOperatorInput getInput() {
return input;
}
public GraphOperatorOutput getOutput() {
return output;
}
public GraphOperatorDetail getDetail() {
return detail;
}
public List<AnnotationDesc> getAnnotations() {
return annotations;
}
}
| {
"pile_set_name": "Github"
} |
# Event 20 - task_0
###### Version: 0
## Description
None
## Data Dictionary
|Standard Name|Field Name|Type|Description|Sample Value|
|---|---|---|---|---|
|TBD|LoggingChannelName|UnicodeString|None|`None`|
|TBD|ActivityName|UnicodeString|None|`None`|
## Tags
* etw_level_Verbose
* etw_keywords_K_ActivityLogging
* etw_opcode_Stop
* etw_task_task_0 | {
"pile_set_name": "Github"
} |
#ifndef BOOST_METAPARSE_UTIL_DIGIT_TO_INT_C_HPP
#define BOOST_METAPARSE_UTIL_DIGIT_TO_INT_C_HPP
// Copyright Abel Sinkovics ([email protected]) 2013.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/metaparse/v1/util/digit_to_int_c.hpp>
namespace boost
{
namespace metaparse
{
namespace util
{
using v1::util::digit_to_int_c;
}
}
}
#endif
| {
"pile_set_name": "Github"
} |
就是一个单独的服务,与项目独立,又与项目紧密联系
通过Java 端口通讯 提供服务 消费服务 | {
"pile_set_name": "Github"
} |
# RUN: llvm-mc --disassemble %s -triple=thumb-apple-darwin9 |& grep {invalid instruction encoding}
# Opcode=2128 Name=t2STREXD Format=ARM_FORMAT_THUMBFRM(25)
# 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
# -------------------------------------------------------------------------------------------------
# | 1: 1: 1: 0| 1: 0: 0: 0| 1: 1: 0: 0| 0: 0: 1: 0| 0: 1: 1: 1| 1: 0: 0: 0| 0: 1: 1: 1| 1: 0: 0: 0|
# -------------------------------------------------------------------------------------------------
#
# if d == n || d == t || d == t2 then UNPREDICTABLE
mc-input.txt:1:1: warning: invalid instruction encoding
| {
"pile_set_name": "Github"
} |
/*
* wm5100.c -- WM5100 ALSA SoC Audio driver
*
* Copyright 2011-2 Wolfson Microelectronics plc
*
* Author: Mark Brown <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/gcd.h>
#include <linux/gpio/driver.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/fixed.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <sound/wm5100.h>
#include "wm5100.h"
#define WM5100_NUM_CORE_SUPPLIES 2
static const char *wm5100_core_supply_names[WM5100_NUM_CORE_SUPPLIES] = {
"DBVDD1",
"LDOVDD", /* If DCVDD is supplied externally specify as LDOVDD */
};
#define WM5100_AIFS 3
#define WM5100_SYNC_SRS 3
struct wm5100_fll {
int fref;
int fout;
int src;
struct completion lock;
};
/* codec private data */
struct wm5100_priv {
struct device *dev;
struct regmap *regmap;
struct snd_soc_component *component;
struct regulator_bulk_data core_supplies[WM5100_NUM_CORE_SUPPLIES];
int rev;
int sysclk;
int asyncclk;
bool aif_async[WM5100_AIFS];
bool aif_symmetric[WM5100_AIFS];
int sr_ref[WM5100_SYNC_SRS];
bool out_ena[2];
struct snd_soc_jack *jack;
bool jack_detecting;
bool jack_mic;
int jack_mode;
int jack_flips;
struct wm5100_fll fll[2];
struct wm5100_pdata pdata;
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio_chip;
#endif
};
static int wm5100_sr_code[] = {
0,
12000,
24000,
48000,
96000,
192000,
384000,
768000,
0,
11025,
22050,
44100,
88200,
176400,
352800,
705600,
4000,
8000,
16000,
32000,
64000,
128000,
256000,
512000,
};
static int wm5100_sr_regs[WM5100_SYNC_SRS] = {
WM5100_CLOCKING_4,
WM5100_CLOCKING_5,
WM5100_CLOCKING_6,
};
static int wm5100_alloc_sr(struct snd_soc_component *component, int rate)
{
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
int sr_code, sr_free, i;
for (i = 0; i < ARRAY_SIZE(wm5100_sr_code); i++)
if (wm5100_sr_code[i] == rate)
break;
if (i == ARRAY_SIZE(wm5100_sr_code)) {
dev_err(component->dev, "Unsupported sample rate: %dHz\n", rate);
return -EINVAL;
}
sr_code = i;
if ((wm5100->sysclk % rate) == 0) {
/* Is this rate already in use? */
sr_free = -1;
for (i = 0; i < ARRAY_SIZE(wm5100_sr_regs); i++) {
if (!wm5100->sr_ref[i] && sr_free == -1) {
sr_free = i;
continue;
}
if ((snd_soc_component_read32(component, wm5100_sr_regs[i]) &
WM5100_SAMPLE_RATE_1_MASK) == sr_code)
break;
}
if (i < ARRAY_SIZE(wm5100_sr_regs)) {
wm5100->sr_ref[i]++;
dev_dbg(component->dev, "SR %dHz, slot %d, ref %d\n",
rate, i, wm5100->sr_ref[i]);
return i;
}
if (sr_free == -1) {
dev_err(component->dev, "All SR slots already in use\n");
return -EBUSY;
}
dev_dbg(component->dev, "Allocating SR slot %d for %dHz\n",
sr_free, rate);
wm5100->sr_ref[sr_free]++;
snd_soc_component_update_bits(component, wm5100_sr_regs[sr_free],
WM5100_SAMPLE_RATE_1_MASK,
sr_code);
return sr_free;
} else {
dev_err(component->dev,
"SR %dHz incompatible with %dHz SYSCLK and %dHz ASYNCCLK\n",
rate, wm5100->sysclk, wm5100->asyncclk);
return -EINVAL;
}
}
static void wm5100_free_sr(struct snd_soc_component *component, int rate)
{
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
int i, sr_code;
for (i = 0; i < ARRAY_SIZE(wm5100_sr_code); i++)
if (wm5100_sr_code[i] == rate)
break;
if (i == ARRAY_SIZE(wm5100_sr_code)) {
dev_err(component->dev, "Unsupported sample rate: %dHz\n", rate);
return;
}
sr_code = wm5100_sr_code[i];
for (i = 0; i < ARRAY_SIZE(wm5100_sr_regs); i++) {
if (!wm5100->sr_ref[i])
continue;
if ((snd_soc_component_read32(component, wm5100_sr_regs[i]) &
WM5100_SAMPLE_RATE_1_MASK) == sr_code)
break;
}
if (i < ARRAY_SIZE(wm5100_sr_regs)) {
wm5100->sr_ref[i]--;
dev_dbg(component->dev, "Dereference SR %dHz, count now %d\n",
rate, wm5100->sr_ref[i]);
} else {
dev_warn(component->dev, "Freeing unreferenced sample rate %dHz\n",
rate);
}
}
static int wm5100_reset(struct wm5100_priv *wm5100)
{
if (wm5100->pdata.reset) {
gpio_set_value_cansleep(wm5100->pdata.reset, 0);
gpio_set_value_cansleep(wm5100->pdata.reset, 1);
return 0;
} else {
return regmap_write(wm5100->regmap, WM5100_SOFTWARE_RESET, 0);
}
}
static DECLARE_TLV_DB_SCALE(in_tlv, -6300, 100, 0);
static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
static DECLARE_TLV_DB_SCALE(mixer_tlv, -3200, 100, 0);
static DECLARE_TLV_DB_SCALE(out_tlv, -6400, 100, 0);
static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
static const char *wm5100_mixer_texts[] = {
"None",
"Tone Generator 1",
"Tone Generator 2",
"AEC loopback",
"IN1L",
"IN1R",
"IN2L",
"IN2R",
"IN3L",
"IN3R",
"IN4L",
"IN4R",
"AIF1RX1",
"AIF1RX2",
"AIF1RX3",
"AIF1RX4",
"AIF1RX5",
"AIF1RX6",
"AIF1RX7",
"AIF1RX8",
"AIF2RX1",
"AIF2RX2",
"AIF3RX1",
"AIF3RX2",
"EQ1",
"EQ2",
"EQ3",
"EQ4",
"DRC1L",
"DRC1R",
"LHPF1",
"LHPF2",
"LHPF3",
"LHPF4",
"DSP1.1",
"DSP1.2",
"DSP1.3",
"DSP1.4",
"DSP1.5",
"DSP1.6",
"DSP2.1",
"DSP2.2",
"DSP2.3",
"DSP2.4",
"DSP2.5",
"DSP2.6",
"DSP3.1",
"DSP3.2",
"DSP3.3",
"DSP3.4",
"DSP3.5",
"DSP3.6",
"ASRC1L",
"ASRC1R",
"ASRC2L",
"ASRC2R",
"ISRC1INT1",
"ISRC1INT2",
"ISRC1INT3",
"ISRC1INT4",
"ISRC2INT1",
"ISRC2INT2",
"ISRC2INT3",
"ISRC2INT4",
"ISRC1DEC1",
"ISRC1DEC2",
"ISRC1DEC3",
"ISRC1DEC4",
"ISRC2DEC1",
"ISRC2DEC2",
"ISRC2DEC3",
"ISRC2DEC4",
};
static int wm5100_mixer_values[] = {
0x00,
0x04, /* Tone */
0x05,
0x08, /* AEC */
0x10, /* Input */
0x11,
0x12,
0x13,
0x14,
0x15,
0x16,
0x17,
0x20, /* AIF */
0x21,
0x22,
0x23,
0x24,
0x25,
0x26,
0x27,
0x28,
0x29,
0x30, /* AIF3 - check */
0x31,
0x50, /* EQ */
0x51,
0x52,
0x53,
0x54,
0x58, /* DRC */
0x59,
0x60, /* LHPF1 */
0x61, /* LHPF2 */
0x62, /* LHPF3 */
0x63, /* LHPF4 */
0x68, /* DSP1 */
0x69,
0x6a,
0x6b,
0x6c,
0x6d,
0x70, /* DSP2 */
0x71,
0x72,
0x73,
0x74,
0x75,
0x78, /* DSP3 */
0x79,
0x7a,
0x7b,
0x7c,
0x7d,
0x90, /* ASRC1 */
0x91,
0x92, /* ASRC2 */
0x93,
0xa0, /* ISRC1DEC1 */
0xa1,
0xa2,
0xa3,
0xa4, /* ISRC1INT1 */
0xa5,
0xa6,
0xa7,
0xa8, /* ISRC2DEC1 */
0xa9,
0xaa,
0xab,
0xac, /* ISRC2INT1 */
0xad,
0xae,
0xaf,
};
#define WM5100_MIXER_CONTROLS(name, base) \
SOC_SINGLE_TLV(name " Input 1 Volume", base + 1 , \
WM5100_MIXER_VOL_SHIFT, 80, 0, mixer_tlv), \
SOC_SINGLE_TLV(name " Input 2 Volume", base + 3 , \
WM5100_MIXER_VOL_SHIFT, 80, 0, mixer_tlv), \
SOC_SINGLE_TLV(name " Input 3 Volume", base + 5 , \
WM5100_MIXER_VOL_SHIFT, 80, 0, mixer_tlv), \
SOC_SINGLE_TLV(name " Input 4 Volume", base + 7 , \
WM5100_MIXER_VOL_SHIFT, 80, 0, mixer_tlv)
#define WM5100_MUX_ENUM_DECL(name, reg) \
SOC_VALUE_ENUM_SINGLE_DECL(name, reg, 0, 0xff, \
wm5100_mixer_texts, wm5100_mixer_values)
#define WM5100_MUX_CTL_DECL(name) \
const struct snd_kcontrol_new name##_mux = \
SOC_DAPM_ENUM("Route", name##_enum)
#define WM5100_MIXER_ENUMS(name, base_reg) \
static WM5100_MUX_ENUM_DECL(name##_in1_enum, base_reg); \
static WM5100_MUX_ENUM_DECL(name##_in2_enum, base_reg + 2); \
static WM5100_MUX_ENUM_DECL(name##_in3_enum, base_reg + 4); \
static WM5100_MUX_ENUM_DECL(name##_in4_enum, base_reg + 6); \
static WM5100_MUX_CTL_DECL(name##_in1); \
static WM5100_MUX_CTL_DECL(name##_in2); \
static WM5100_MUX_CTL_DECL(name##_in3); \
static WM5100_MUX_CTL_DECL(name##_in4)
WM5100_MIXER_ENUMS(HPOUT1L, WM5100_OUT1LMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(HPOUT1R, WM5100_OUT1RMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(HPOUT2L, WM5100_OUT2LMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(HPOUT2R, WM5100_OUT2RMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(HPOUT3L, WM5100_OUT3LMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(HPOUT3R, WM5100_OUT3RMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(SPKOUTL, WM5100_OUT4LMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(SPKOUTR, WM5100_OUT4RMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(SPKDAT1L, WM5100_OUT5LMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(SPKDAT1R, WM5100_OUT5RMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(SPKDAT2L, WM5100_OUT6LMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(SPKDAT2R, WM5100_OUT6RMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(PWM1, WM5100_PWM1MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(PWM2, WM5100_PWM1MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF1TX1, WM5100_AIF1TX1MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF1TX2, WM5100_AIF1TX2MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF1TX3, WM5100_AIF1TX3MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF1TX4, WM5100_AIF1TX4MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF1TX5, WM5100_AIF1TX5MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF1TX6, WM5100_AIF1TX6MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF1TX7, WM5100_AIF1TX7MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF1TX8, WM5100_AIF1TX8MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF2TX1, WM5100_AIF2TX1MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF2TX2, WM5100_AIF2TX2MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF3TX1, WM5100_AIF1TX1MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(AIF3TX2, WM5100_AIF1TX2MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(EQ1, WM5100_EQ1MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(EQ2, WM5100_EQ2MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(EQ3, WM5100_EQ3MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(EQ4, WM5100_EQ4MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(DRC1L, WM5100_DRC1LMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(DRC1R, WM5100_DRC1RMIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(LHPF1, WM5100_HPLP1MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(LHPF2, WM5100_HPLP2MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(LHPF3, WM5100_HPLP3MIX_INPUT_1_SOURCE);
WM5100_MIXER_ENUMS(LHPF4, WM5100_HPLP4MIX_INPUT_1_SOURCE);
#define WM5100_MUX(name, ctrl) \
SND_SOC_DAPM_MUX(name, SND_SOC_NOPM, 0, 0, ctrl)
#define WM5100_MIXER_WIDGETS(name, name_str) \
WM5100_MUX(name_str " Input 1", &name##_in1_mux), \
WM5100_MUX(name_str " Input 2", &name##_in2_mux), \
WM5100_MUX(name_str " Input 3", &name##_in3_mux), \
WM5100_MUX(name_str " Input 4", &name##_in4_mux), \
SND_SOC_DAPM_MIXER(name_str " Mixer", SND_SOC_NOPM, 0, 0, NULL, 0)
#define WM5100_MIXER_INPUT_ROUTES(name) \
{ name, "Tone Generator 1", "Tone Generator 1" }, \
{ name, "Tone Generator 2", "Tone Generator 2" }, \
{ name, "IN1L", "IN1L PGA" }, \
{ name, "IN1R", "IN1R PGA" }, \
{ name, "IN2L", "IN2L PGA" }, \
{ name, "IN2R", "IN2R PGA" }, \
{ name, "IN3L", "IN3L PGA" }, \
{ name, "IN3R", "IN3R PGA" }, \
{ name, "IN4L", "IN4L PGA" }, \
{ name, "IN4R", "IN4R PGA" }, \
{ name, "AIF1RX1", "AIF1RX1" }, \
{ name, "AIF1RX2", "AIF1RX2" }, \
{ name, "AIF1RX3", "AIF1RX3" }, \
{ name, "AIF1RX4", "AIF1RX4" }, \
{ name, "AIF1RX5", "AIF1RX5" }, \
{ name, "AIF1RX6", "AIF1RX6" }, \
{ name, "AIF1RX7", "AIF1RX7" }, \
{ name, "AIF1RX8", "AIF1RX8" }, \
{ name, "AIF2RX1", "AIF2RX1" }, \
{ name, "AIF2RX2", "AIF2RX2" }, \
{ name, "AIF3RX1", "AIF3RX1" }, \
{ name, "AIF3RX2", "AIF3RX2" }, \
{ name, "EQ1", "EQ1" }, \
{ name, "EQ2", "EQ2" }, \
{ name, "EQ3", "EQ3" }, \
{ name, "EQ4", "EQ4" }, \
{ name, "DRC1L", "DRC1L" }, \
{ name, "DRC1R", "DRC1R" }, \
{ name, "LHPF1", "LHPF1" }, \
{ name, "LHPF2", "LHPF2" }, \
{ name, "LHPF3", "LHPF3" }, \
{ name, "LHPF4", "LHPF4" }
#define WM5100_MIXER_ROUTES(widget, name) \
{ widget, NULL, name " Mixer" }, \
{ name " Mixer", NULL, name " Input 1" }, \
{ name " Mixer", NULL, name " Input 2" }, \
{ name " Mixer", NULL, name " Input 3" }, \
{ name " Mixer", NULL, name " Input 4" }, \
WM5100_MIXER_INPUT_ROUTES(name " Input 1"), \
WM5100_MIXER_INPUT_ROUTES(name " Input 2"), \
WM5100_MIXER_INPUT_ROUTES(name " Input 3"), \
WM5100_MIXER_INPUT_ROUTES(name " Input 4")
static const char *wm5100_lhpf_mode_text[] = {
"Low-pass", "High-pass"
};
static SOC_ENUM_SINGLE_DECL(wm5100_lhpf1_mode,
WM5100_HPLPF1_1, WM5100_LHPF1_MODE_SHIFT,
wm5100_lhpf_mode_text);
static SOC_ENUM_SINGLE_DECL(wm5100_lhpf2_mode,
WM5100_HPLPF2_1, WM5100_LHPF2_MODE_SHIFT,
wm5100_lhpf_mode_text);
static SOC_ENUM_SINGLE_DECL(wm5100_lhpf3_mode,
WM5100_HPLPF3_1, WM5100_LHPF3_MODE_SHIFT,
wm5100_lhpf_mode_text);
static SOC_ENUM_SINGLE_DECL(wm5100_lhpf4_mode,
WM5100_HPLPF4_1, WM5100_LHPF4_MODE_SHIFT,
wm5100_lhpf_mode_text);
static const struct snd_kcontrol_new wm5100_snd_controls[] = {
SOC_SINGLE("IN1 High Performance Switch", WM5100_IN1L_CONTROL,
WM5100_IN1_OSR_SHIFT, 1, 0),
SOC_SINGLE("IN2 High Performance Switch", WM5100_IN2L_CONTROL,
WM5100_IN2_OSR_SHIFT, 1, 0),
SOC_SINGLE("IN3 High Performance Switch", WM5100_IN3L_CONTROL,
WM5100_IN3_OSR_SHIFT, 1, 0),
SOC_SINGLE("IN4 High Performance Switch", WM5100_IN4L_CONTROL,
WM5100_IN4_OSR_SHIFT, 1, 0),
/* Only applicable for analogue inputs */
SOC_DOUBLE_R_TLV("IN1 Volume", WM5100_IN1L_CONTROL, WM5100_IN1R_CONTROL,
WM5100_IN1L_PGA_VOL_SHIFT, 94, 0, in_tlv),
SOC_DOUBLE_R_TLV("IN2 Volume", WM5100_IN2L_CONTROL, WM5100_IN2R_CONTROL,
WM5100_IN2L_PGA_VOL_SHIFT, 94, 0, in_tlv),
SOC_DOUBLE_R_TLV("IN3 Volume", WM5100_IN3L_CONTROL, WM5100_IN3R_CONTROL,
WM5100_IN3L_PGA_VOL_SHIFT, 94, 0, in_tlv),
SOC_DOUBLE_R_TLV("IN4 Volume", WM5100_IN4L_CONTROL, WM5100_IN4R_CONTROL,
WM5100_IN4L_PGA_VOL_SHIFT, 94, 0, in_tlv),
SOC_DOUBLE_R_TLV("IN1 Digital Volume", WM5100_ADC_DIGITAL_VOLUME_1L,
WM5100_ADC_DIGITAL_VOLUME_1R, WM5100_IN1L_VOL_SHIFT, 191,
0, digital_tlv),
SOC_DOUBLE_R_TLV("IN2 Digital Volume", WM5100_ADC_DIGITAL_VOLUME_2L,
WM5100_ADC_DIGITAL_VOLUME_2R, WM5100_IN2L_VOL_SHIFT, 191,
0, digital_tlv),
SOC_DOUBLE_R_TLV("IN3 Digital Volume", WM5100_ADC_DIGITAL_VOLUME_3L,
WM5100_ADC_DIGITAL_VOLUME_3R, WM5100_IN3L_VOL_SHIFT, 191,
0, digital_tlv),
SOC_DOUBLE_R_TLV("IN4 Digital Volume", WM5100_ADC_DIGITAL_VOLUME_4L,
WM5100_ADC_DIGITAL_VOLUME_4R, WM5100_IN4L_VOL_SHIFT, 191,
0, digital_tlv),
SOC_DOUBLE_R("IN1 Switch", WM5100_ADC_DIGITAL_VOLUME_1L,
WM5100_ADC_DIGITAL_VOLUME_1R, WM5100_IN1L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("IN2 Switch", WM5100_ADC_DIGITAL_VOLUME_2L,
WM5100_ADC_DIGITAL_VOLUME_2R, WM5100_IN2L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("IN3 Switch", WM5100_ADC_DIGITAL_VOLUME_3L,
WM5100_ADC_DIGITAL_VOLUME_3R, WM5100_IN3L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("IN4 Switch", WM5100_ADC_DIGITAL_VOLUME_4L,
WM5100_ADC_DIGITAL_VOLUME_4R, WM5100_IN4L_MUTE_SHIFT, 1, 1),
SND_SOC_BYTES_MASK("EQ1 Coefficients", WM5100_EQ1_1, 20, WM5100_EQ1_ENA),
SND_SOC_BYTES_MASK("EQ2 Coefficients", WM5100_EQ2_1, 20, WM5100_EQ2_ENA),
SND_SOC_BYTES_MASK("EQ3 Coefficients", WM5100_EQ3_1, 20, WM5100_EQ3_ENA),
SND_SOC_BYTES_MASK("EQ4 Coefficients", WM5100_EQ4_1, 20, WM5100_EQ4_ENA),
SND_SOC_BYTES_MASK("DRC Coefficients", WM5100_DRC1_CTRL1, 5,
WM5100_DRCL_ENA | WM5100_DRCR_ENA),
SND_SOC_BYTES("LHPF1 Coefficeints", WM5100_HPLPF1_2, 1),
SND_SOC_BYTES("LHPF2 Coefficeints", WM5100_HPLPF2_2, 1),
SND_SOC_BYTES("LHPF3 Coefficeints", WM5100_HPLPF3_2, 1),
SND_SOC_BYTES("LHPF4 Coefficeints", WM5100_HPLPF4_2, 1),
SOC_SINGLE("HPOUT1 High Performance Switch", WM5100_OUT_VOLUME_1L,
WM5100_OUT1_OSR_SHIFT, 1, 0),
SOC_SINGLE("HPOUT2 High Performance Switch", WM5100_OUT_VOLUME_2L,
WM5100_OUT2_OSR_SHIFT, 1, 0),
SOC_SINGLE("HPOUT3 High Performance Switch", WM5100_OUT_VOLUME_3L,
WM5100_OUT3_OSR_SHIFT, 1, 0),
SOC_SINGLE("SPKOUT High Performance Switch", WM5100_OUT_VOLUME_4L,
WM5100_OUT4_OSR_SHIFT, 1, 0),
SOC_SINGLE("SPKDAT1 High Performance Switch", WM5100_DAC_VOLUME_LIMIT_5L,
WM5100_OUT5_OSR_SHIFT, 1, 0),
SOC_SINGLE("SPKDAT2 High Performance Switch", WM5100_DAC_VOLUME_LIMIT_6L,
WM5100_OUT6_OSR_SHIFT, 1, 0),
SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", WM5100_DAC_DIGITAL_VOLUME_1L,
WM5100_DAC_DIGITAL_VOLUME_1R, WM5100_OUT1L_VOL_SHIFT, 159, 0,
digital_tlv),
SOC_DOUBLE_R_TLV("HPOUT2 Digital Volume", WM5100_DAC_DIGITAL_VOLUME_2L,
WM5100_DAC_DIGITAL_VOLUME_2R, WM5100_OUT2L_VOL_SHIFT, 159, 0,
digital_tlv),
SOC_DOUBLE_R_TLV("HPOUT3 Digital Volume", WM5100_DAC_DIGITAL_VOLUME_3L,
WM5100_DAC_DIGITAL_VOLUME_3R, WM5100_OUT3L_VOL_SHIFT, 159, 0,
digital_tlv),
SOC_DOUBLE_R_TLV("SPKOUT Digital Volume", WM5100_DAC_DIGITAL_VOLUME_4L,
WM5100_DAC_DIGITAL_VOLUME_4R, WM5100_OUT4L_VOL_SHIFT, 159, 0,
digital_tlv),
SOC_DOUBLE_R_TLV("SPKDAT1 Digital Volume", WM5100_DAC_DIGITAL_VOLUME_5L,
WM5100_DAC_DIGITAL_VOLUME_5R, WM5100_OUT5L_VOL_SHIFT, 159, 0,
digital_tlv),
SOC_DOUBLE_R_TLV("SPKDAT2 Digital Volume", WM5100_DAC_DIGITAL_VOLUME_6L,
WM5100_DAC_DIGITAL_VOLUME_6R, WM5100_OUT6L_VOL_SHIFT, 159, 0,
digital_tlv),
SOC_DOUBLE_R("HPOUT1 Digital Switch", WM5100_DAC_DIGITAL_VOLUME_1L,
WM5100_DAC_DIGITAL_VOLUME_1R, WM5100_OUT1L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("HPOUT2 Digital Switch", WM5100_DAC_DIGITAL_VOLUME_2L,
WM5100_DAC_DIGITAL_VOLUME_2R, WM5100_OUT2L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("HPOUT3 Digital Switch", WM5100_DAC_DIGITAL_VOLUME_3L,
WM5100_DAC_DIGITAL_VOLUME_3R, WM5100_OUT3L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("SPKOUT Digital Switch", WM5100_DAC_DIGITAL_VOLUME_4L,
WM5100_DAC_DIGITAL_VOLUME_4R, WM5100_OUT4L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("SPKDAT1 Digital Switch", WM5100_DAC_DIGITAL_VOLUME_5L,
WM5100_DAC_DIGITAL_VOLUME_5R, WM5100_OUT5L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("SPKDAT2 Digital Switch", WM5100_DAC_DIGITAL_VOLUME_6L,
WM5100_DAC_DIGITAL_VOLUME_6R, WM5100_OUT6L_MUTE_SHIFT, 1, 1),
/* FIXME: Only valid from -12dB to 0dB (52-64) */
SOC_DOUBLE_R_TLV("HPOUT1 Volume", WM5100_OUT_VOLUME_1L, WM5100_OUT_VOLUME_1R,
WM5100_OUT1L_PGA_VOL_SHIFT, 64, 0, out_tlv),
SOC_DOUBLE_R_TLV("HPOUT2 Volume", WM5100_OUT_VOLUME_2L, WM5100_OUT_VOLUME_2R,
WM5100_OUT2L_PGA_VOL_SHIFT, 64, 0, out_tlv),
SOC_DOUBLE_R_TLV("HPOUT3 Volume", WM5100_OUT_VOLUME_3L, WM5100_OUT_VOLUME_3R,
WM5100_OUT2L_PGA_VOL_SHIFT, 64, 0, out_tlv),
SOC_DOUBLE("SPKDAT1 Switch", WM5100_PDM_SPK1_CTRL_1, WM5100_SPK1L_MUTE_SHIFT,
WM5100_SPK1R_MUTE_SHIFT, 1, 1),
SOC_DOUBLE("SPKDAT2 Switch", WM5100_PDM_SPK2_CTRL_1, WM5100_SPK2L_MUTE_SHIFT,
WM5100_SPK2R_MUTE_SHIFT, 1, 1),
SOC_SINGLE_TLV("EQ1 Band 1 Volume", WM5100_EQ1_1, WM5100_EQ1_B1_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ1 Band 2 Volume", WM5100_EQ1_1, WM5100_EQ1_B2_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ1 Band 3 Volume", WM5100_EQ1_1, WM5100_EQ1_B3_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ1 Band 4 Volume", WM5100_EQ1_2, WM5100_EQ1_B4_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ1 Band 5 Volume", WM5100_EQ1_2, WM5100_EQ1_B5_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ2 Band 1 Volume", WM5100_EQ2_1, WM5100_EQ2_B1_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ2 Band 2 Volume", WM5100_EQ2_1, WM5100_EQ2_B2_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ2 Band 3 Volume", WM5100_EQ2_1, WM5100_EQ2_B3_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ2 Band 4 Volume", WM5100_EQ2_2, WM5100_EQ2_B4_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ2 Band 5 Volume", WM5100_EQ2_2, WM5100_EQ2_B5_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ3 Band 1 Volume", WM5100_EQ1_1, WM5100_EQ3_B1_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ3 Band 2 Volume", WM5100_EQ3_1, WM5100_EQ3_B2_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ3 Band 3 Volume", WM5100_EQ3_1, WM5100_EQ3_B3_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ3 Band 4 Volume", WM5100_EQ3_2, WM5100_EQ3_B4_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ3 Band 5 Volume", WM5100_EQ3_2, WM5100_EQ3_B5_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ4 Band 1 Volume", WM5100_EQ4_1, WM5100_EQ4_B1_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ4 Band 2 Volume", WM5100_EQ4_1, WM5100_EQ4_B2_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ4 Band 3 Volume", WM5100_EQ4_1, WM5100_EQ4_B3_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ4 Band 4 Volume", WM5100_EQ4_2, WM5100_EQ4_B4_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_SINGLE_TLV("EQ4 Band 5 Volume", WM5100_EQ4_2, WM5100_EQ4_B5_GAIN_SHIFT,
24, 0, eq_tlv),
SOC_ENUM("LHPF1 Mode", wm5100_lhpf1_mode),
SOC_ENUM("LHPF2 Mode", wm5100_lhpf2_mode),
SOC_ENUM("LHPF3 Mode", wm5100_lhpf3_mode),
SOC_ENUM("LHPF4 Mode", wm5100_lhpf4_mode),
WM5100_MIXER_CONTROLS("HPOUT1L", WM5100_OUT1LMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("HPOUT1R", WM5100_OUT1RMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("HPOUT2L", WM5100_OUT2LMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("HPOUT2R", WM5100_OUT2RMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("HPOUT3L", WM5100_OUT3LMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("HPOUT3R", WM5100_OUT3RMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("SPKOUTL", WM5100_OUT4LMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("SPKOUTR", WM5100_OUT4RMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("SPKDAT1L", WM5100_OUT5LMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("SPKDAT1R", WM5100_OUT5RMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("SPKDAT2L", WM5100_OUT6LMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("SPKDAT2R", WM5100_OUT6RMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("PWM1", WM5100_PWM1MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("PWM2", WM5100_PWM2MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF1TX1", WM5100_AIF1TX1MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF1TX2", WM5100_AIF1TX2MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF1TX3", WM5100_AIF1TX3MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF1TX4", WM5100_AIF1TX4MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF1TX5", WM5100_AIF1TX5MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF1TX6", WM5100_AIF1TX6MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF1TX7", WM5100_AIF1TX7MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF1TX8", WM5100_AIF1TX8MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF2TX1", WM5100_AIF2TX1MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF2TX2", WM5100_AIF2TX2MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF3TX1", WM5100_AIF3TX1MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("AIF3TX2", WM5100_AIF3TX2MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("EQ1", WM5100_EQ1MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("EQ2", WM5100_EQ2MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("EQ3", WM5100_EQ3MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("EQ4", WM5100_EQ4MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("DRC1L", WM5100_DRC1LMIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("DRC1R", WM5100_DRC1RMIX_INPUT_1_SOURCE),
SND_SOC_BYTES_MASK("DRC", WM5100_DRC1_CTRL1, 5,
WM5100_DRCL_ENA | WM5100_DRCR_ENA),
WM5100_MIXER_CONTROLS("LHPF1", WM5100_HPLP1MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("LHPF2", WM5100_HPLP2MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("LHPF3", WM5100_HPLP3MIX_INPUT_1_SOURCE),
WM5100_MIXER_CONTROLS("LHPF4", WM5100_HPLP4MIX_INPUT_1_SOURCE),
};
static void wm5100_seq_notifier(struct snd_soc_component *component,
enum snd_soc_dapm_type event, int subseq)
{
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
u16 val, expect, i;
/* Wait for the outputs to flag themselves as enabled */
if (wm5100->out_ena[0]) {
expect = snd_soc_component_read32(component, WM5100_CHANNEL_ENABLES_1);
for (i = 0; i < 200; i++) {
val = snd_soc_component_read32(component, WM5100_OUTPUT_STATUS_1);
if (val == expect) {
wm5100->out_ena[0] = false;
break;
}
}
if (i == 200) {
dev_err(component->dev, "Timeout waiting for OUTPUT1 %x\n",
expect);
}
}
if (wm5100->out_ena[1]) {
expect = snd_soc_component_read32(component, WM5100_OUTPUT_ENABLES_2);
for (i = 0; i < 200; i++) {
val = snd_soc_component_read32(component, WM5100_OUTPUT_STATUS_2);
if (val == expect) {
wm5100->out_ena[1] = false;
break;
}
}
if (i == 200) {
dev_err(component->dev, "Timeout waiting for OUTPUT2 %x\n",
expect);
}
}
}
static int wm5100_out_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
switch (w->reg) {
case WM5100_CHANNEL_ENABLES_1:
wm5100->out_ena[0] = true;
break;
case WM5100_OUTPUT_ENABLES_2:
wm5100->out_ena[0] = true;
break;
default:
break;
}
return 0;
}
static void wm5100_log_status3(struct wm5100_priv *wm5100, int val)
{
if (val & WM5100_SPK_SHUTDOWN_WARN_EINT)
dev_crit(wm5100->dev, "Speaker shutdown warning\n");
if (val & WM5100_SPK_SHUTDOWN_EINT)
dev_crit(wm5100->dev, "Speaker shutdown\n");
if (val & WM5100_CLKGEN_ERR_EINT)
dev_crit(wm5100->dev, "SYSCLK underclocked\n");
if (val & WM5100_CLKGEN_ERR_ASYNC_EINT)
dev_crit(wm5100->dev, "ASYNCCLK underclocked\n");
}
static void wm5100_log_status4(struct wm5100_priv *wm5100, int val)
{
if (val & WM5100_AIF3_ERR_EINT)
dev_err(wm5100->dev, "AIF3 configuration error\n");
if (val & WM5100_AIF2_ERR_EINT)
dev_err(wm5100->dev, "AIF2 configuration error\n");
if (val & WM5100_AIF1_ERR_EINT)
dev_err(wm5100->dev, "AIF1 configuration error\n");
if (val & WM5100_CTRLIF_ERR_EINT)
dev_err(wm5100->dev, "Control interface error\n");
if (val & WM5100_ISRC2_UNDERCLOCKED_EINT)
dev_err(wm5100->dev, "ISRC2 underclocked\n");
if (val & WM5100_ISRC1_UNDERCLOCKED_EINT)
dev_err(wm5100->dev, "ISRC1 underclocked\n");
if (val & WM5100_FX_UNDERCLOCKED_EINT)
dev_err(wm5100->dev, "FX underclocked\n");
if (val & WM5100_AIF3_UNDERCLOCKED_EINT)
dev_err(wm5100->dev, "AIF3 underclocked\n");
if (val & WM5100_AIF2_UNDERCLOCKED_EINT)
dev_err(wm5100->dev, "AIF2 underclocked\n");
if (val & WM5100_AIF1_UNDERCLOCKED_EINT)
dev_err(wm5100->dev, "AIF1 underclocked\n");
if (val & WM5100_ASRC_UNDERCLOCKED_EINT)
dev_err(wm5100->dev, "ASRC underclocked\n");
if (val & WM5100_DAC_UNDERCLOCKED_EINT)
dev_err(wm5100->dev, "DAC underclocked\n");
if (val & WM5100_ADC_UNDERCLOCKED_EINT)
dev_err(wm5100->dev, "ADC underclocked\n");
if (val & WM5100_MIXER_UNDERCLOCKED_EINT)
dev_err(wm5100->dev, "Mixer underclocked\n");
}
static int wm5100_post_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
int ret;
ret = snd_soc_component_read32(component, WM5100_INTERRUPT_RAW_STATUS_3);
ret &= WM5100_SPK_SHUTDOWN_WARN_STS |
WM5100_SPK_SHUTDOWN_STS | WM5100_CLKGEN_ERR_STS |
WM5100_CLKGEN_ERR_ASYNC_STS;
wm5100_log_status3(wm5100, ret);
ret = snd_soc_component_read32(component, WM5100_INTERRUPT_RAW_STATUS_4);
wm5100_log_status4(wm5100, ret);
return 0;
}
static const struct snd_soc_dapm_widget wm5100_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("SYSCLK", WM5100_CLOCKING_3, WM5100_SYSCLK_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_SUPPLY("ASYNCCLK", WM5100_CLOCKING_6, WM5100_ASYNC_CLK_ENA_SHIFT,
0, NULL, 0),
SND_SOC_DAPM_REGULATOR_SUPPLY("CPVDD", 20, 0),
SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD2", 0, 0),
SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD3", 0, 0),
SND_SOC_DAPM_SUPPLY("CP1", WM5100_HP_CHARGE_PUMP_1, WM5100_CP1_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_SUPPLY("CP2", WM5100_MIC_CHARGE_PUMP_1, WM5100_CP2_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_SUPPLY("CP2 Active", WM5100_MIC_CHARGE_PUMP_1,
WM5100_CP2_BYPASS_SHIFT, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS1", WM5100_MIC_BIAS_CTRL_1, WM5100_MICB1_ENA_SHIFT,
0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS2", WM5100_MIC_BIAS_CTRL_2, WM5100_MICB2_ENA_SHIFT,
0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS3", WM5100_MIC_BIAS_CTRL_3, WM5100_MICB3_ENA_SHIFT,
0, NULL, 0),
SND_SOC_DAPM_INPUT("IN1L"),
SND_SOC_DAPM_INPUT("IN1R"),
SND_SOC_DAPM_INPUT("IN2L"),
SND_SOC_DAPM_INPUT("IN2R"),
SND_SOC_DAPM_INPUT("IN3L"),
SND_SOC_DAPM_INPUT("IN3R"),
SND_SOC_DAPM_INPUT("IN4L"),
SND_SOC_DAPM_INPUT("IN4R"),
SND_SOC_DAPM_SIGGEN("TONE"),
SND_SOC_DAPM_PGA_E("IN1L PGA", WM5100_INPUT_ENABLES, WM5100_IN1L_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("IN1R PGA", WM5100_INPUT_ENABLES, WM5100_IN1R_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("IN2L PGA", WM5100_INPUT_ENABLES, WM5100_IN2L_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("IN2R PGA", WM5100_INPUT_ENABLES, WM5100_IN2R_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("IN3L PGA", WM5100_INPUT_ENABLES, WM5100_IN3L_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("IN3R PGA", WM5100_INPUT_ENABLES, WM5100_IN3R_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("IN4L PGA", WM5100_INPUT_ENABLES, WM5100_IN4L_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("IN4R PGA", WM5100_INPUT_ENABLES, WM5100_IN4R_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA("Tone Generator 1", WM5100_TONE_GENERATOR_1,
WM5100_TONE1_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("Tone Generator 2", WM5100_TONE_GENERATOR_1,
WM5100_TONE2_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX1", "AIF1 Playback", 0,
WM5100_AUDIO_IF_1_27, WM5100_AIF1RX1_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX2", "AIF1 Playback", 1,
WM5100_AUDIO_IF_1_27, WM5100_AIF1RX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX3", "AIF1 Playback", 2,
WM5100_AUDIO_IF_1_27, WM5100_AIF1RX3_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX4", "AIF1 Playback", 3,
WM5100_AUDIO_IF_1_27, WM5100_AIF1RX4_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX5", "AIF1 Playback", 4,
WM5100_AUDIO_IF_1_27, WM5100_AIF1RX5_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX6", "AIF1 Playback", 5,
WM5100_AUDIO_IF_1_27, WM5100_AIF1RX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX7", "AIF1 Playback", 6,
WM5100_AUDIO_IF_1_27, WM5100_AIF1RX7_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX8", "AIF1 Playback", 7,
WM5100_AUDIO_IF_1_27, WM5100_AIF1RX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF2RX1", "AIF2 Playback", 0,
WM5100_AUDIO_IF_2_27, WM5100_AIF2RX1_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF2RX2", "AIF2 Playback", 1,
WM5100_AUDIO_IF_2_27, WM5100_AIF2RX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF3RX1", "AIF3 Playback", 0,
WM5100_AUDIO_IF_3_27, WM5100_AIF3RX1_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF3RX2", "AIF3 Playback", 1,
WM5100_AUDIO_IF_3_27, WM5100_AIF3RX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX1", "AIF1 Capture", 0,
WM5100_AUDIO_IF_1_26, WM5100_AIF1TX1_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX2", "AIF1 Capture", 1,
WM5100_AUDIO_IF_1_26, WM5100_AIF1TX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX3", "AIF1 Capture", 2,
WM5100_AUDIO_IF_1_26, WM5100_AIF1TX3_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX4", "AIF1 Capture", 3,
WM5100_AUDIO_IF_1_26, WM5100_AIF1TX4_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX5", "AIF1 Capture", 4,
WM5100_AUDIO_IF_1_26, WM5100_AIF1TX5_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX6", "AIF1 Capture", 5,
WM5100_AUDIO_IF_1_26, WM5100_AIF1TX6_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX7", "AIF1 Capture", 6,
WM5100_AUDIO_IF_1_26, WM5100_AIF1TX7_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX8", "AIF1 Capture", 7,
WM5100_AUDIO_IF_1_26, WM5100_AIF1TX8_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF2TX1", "AIF2 Capture", 0,
WM5100_AUDIO_IF_2_26, WM5100_AIF2TX1_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF2TX2", "AIF2 Capture", 1,
WM5100_AUDIO_IF_2_26, WM5100_AIF2TX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF3TX1", "AIF3 Capture", 0,
WM5100_AUDIO_IF_3_26, WM5100_AIF3TX1_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF3TX2", "AIF3 Capture", 1,
WM5100_AUDIO_IF_3_26, WM5100_AIF3TX2_ENA_SHIFT, 0),
SND_SOC_DAPM_PGA_E("OUT6L", WM5100_OUTPUT_ENABLES_2, WM5100_OUT6L_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT6R", WM5100_OUTPUT_ENABLES_2, WM5100_OUT6R_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT5L", WM5100_OUTPUT_ENABLES_2, WM5100_OUT5L_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT5R", WM5100_OUTPUT_ENABLES_2, WM5100_OUT5R_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT4L", WM5100_OUTPUT_ENABLES_2, WM5100_OUT4L_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT4R", WM5100_OUTPUT_ENABLES_2, WM5100_OUT4R_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT3L", WM5100_CHANNEL_ENABLES_1, WM5100_HP3L_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT3R", WM5100_CHANNEL_ENABLES_1, WM5100_HP3R_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT2L", WM5100_CHANNEL_ENABLES_1, WM5100_HP2L_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT2R", WM5100_CHANNEL_ENABLES_1, WM5100_HP2R_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT1L", WM5100_CHANNEL_ENABLES_1, WM5100_HP1L_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("OUT1R", WM5100_CHANNEL_ENABLES_1, WM5100_HP1R_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("PWM1 Driver", WM5100_PWM_DRIVE_1, WM5100_PWM1_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA_E("PWM2 Driver", WM5100_PWM_DRIVE_1, WM5100_PWM2_ENA_SHIFT, 0,
NULL, 0, wm5100_out_ev, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_PGA("EQ1", WM5100_EQ1_1, WM5100_EQ1_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("EQ2", WM5100_EQ2_1, WM5100_EQ2_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("EQ3", WM5100_EQ3_1, WM5100_EQ3_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("EQ4", WM5100_EQ4_1, WM5100_EQ4_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("DRC1L", WM5100_DRC1_CTRL1, WM5100_DRCL_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("DRC1R", WM5100_DRC1_CTRL1, WM5100_DRCR_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("LHPF1", WM5100_HPLPF1_1, WM5100_LHPF1_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("LHPF2", WM5100_HPLPF2_1, WM5100_LHPF2_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("LHPF3", WM5100_HPLPF3_1, WM5100_LHPF3_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("LHPF4", WM5100_HPLPF4_1, WM5100_LHPF4_ENA_SHIFT, 0,
NULL, 0),
WM5100_MIXER_WIDGETS(EQ1, "EQ1"),
WM5100_MIXER_WIDGETS(EQ2, "EQ2"),
WM5100_MIXER_WIDGETS(EQ3, "EQ3"),
WM5100_MIXER_WIDGETS(EQ4, "EQ4"),
WM5100_MIXER_WIDGETS(DRC1L, "DRC1L"),
WM5100_MIXER_WIDGETS(DRC1R, "DRC1R"),
WM5100_MIXER_WIDGETS(LHPF1, "LHPF1"),
WM5100_MIXER_WIDGETS(LHPF2, "LHPF2"),
WM5100_MIXER_WIDGETS(LHPF3, "LHPF3"),
WM5100_MIXER_WIDGETS(LHPF4, "LHPF4"),
WM5100_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"),
WM5100_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"),
WM5100_MIXER_WIDGETS(AIF1TX3, "AIF1TX3"),
WM5100_MIXER_WIDGETS(AIF1TX4, "AIF1TX4"),
WM5100_MIXER_WIDGETS(AIF1TX5, "AIF1TX5"),
WM5100_MIXER_WIDGETS(AIF1TX6, "AIF1TX6"),
WM5100_MIXER_WIDGETS(AIF1TX7, "AIF1TX7"),
WM5100_MIXER_WIDGETS(AIF1TX8, "AIF1TX8"),
WM5100_MIXER_WIDGETS(AIF2TX1, "AIF2TX1"),
WM5100_MIXER_WIDGETS(AIF2TX2, "AIF2TX2"),
WM5100_MIXER_WIDGETS(AIF3TX1, "AIF3TX1"),
WM5100_MIXER_WIDGETS(AIF3TX2, "AIF3TX2"),
WM5100_MIXER_WIDGETS(HPOUT1L, "HPOUT1L"),
WM5100_MIXER_WIDGETS(HPOUT1R, "HPOUT1R"),
WM5100_MIXER_WIDGETS(HPOUT2L, "HPOUT2L"),
WM5100_MIXER_WIDGETS(HPOUT2R, "HPOUT2R"),
WM5100_MIXER_WIDGETS(HPOUT3L, "HPOUT3L"),
WM5100_MIXER_WIDGETS(HPOUT3R, "HPOUT3R"),
WM5100_MIXER_WIDGETS(SPKOUTL, "SPKOUTL"),
WM5100_MIXER_WIDGETS(SPKOUTR, "SPKOUTR"),
WM5100_MIXER_WIDGETS(SPKDAT1L, "SPKDAT1L"),
WM5100_MIXER_WIDGETS(SPKDAT1R, "SPKDAT1R"),
WM5100_MIXER_WIDGETS(SPKDAT2L, "SPKDAT2L"),
WM5100_MIXER_WIDGETS(SPKDAT2R, "SPKDAT2R"),
WM5100_MIXER_WIDGETS(PWM1, "PWM1"),
WM5100_MIXER_WIDGETS(PWM2, "PWM2"),
SND_SOC_DAPM_OUTPUT("HPOUT1L"),
SND_SOC_DAPM_OUTPUT("HPOUT1R"),
SND_SOC_DAPM_OUTPUT("HPOUT2L"),
SND_SOC_DAPM_OUTPUT("HPOUT2R"),
SND_SOC_DAPM_OUTPUT("HPOUT3L"),
SND_SOC_DAPM_OUTPUT("HPOUT3R"),
SND_SOC_DAPM_OUTPUT("SPKOUTL"),
SND_SOC_DAPM_OUTPUT("SPKOUTR"),
SND_SOC_DAPM_OUTPUT("SPKDAT1"),
SND_SOC_DAPM_OUTPUT("SPKDAT2"),
SND_SOC_DAPM_OUTPUT("PWM1"),
SND_SOC_DAPM_OUTPUT("PWM2"),
};
/* We register a _POST event if we don't have IRQ support so we can
* look at the error status from the CODEC - if we've got the IRQ
* hooked up then we will get prompted to look by an interrupt.
*/
static const struct snd_soc_dapm_widget wm5100_dapm_widgets_noirq[] = {
SND_SOC_DAPM_POST("Post", wm5100_post_ev),
};
static const struct snd_soc_dapm_route wm5100_dapm_routes[] = {
{ "CP1", NULL, "CPVDD" },
{ "CP2 Active", NULL, "CPVDD" },
{ "IN1L", NULL, "SYSCLK" },
{ "IN1R", NULL, "SYSCLK" },
{ "IN2L", NULL, "SYSCLK" },
{ "IN2R", NULL, "SYSCLK" },
{ "IN3L", NULL, "SYSCLK" },
{ "IN3R", NULL, "SYSCLK" },
{ "IN4L", NULL, "SYSCLK" },
{ "IN4R", NULL, "SYSCLK" },
{ "OUT1L", NULL, "SYSCLK" },
{ "OUT1R", NULL, "SYSCLK" },
{ "OUT2L", NULL, "SYSCLK" },
{ "OUT2R", NULL, "SYSCLK" },
{ "OUT3L", NULL, "SYSCLK" },
{ "OUT3R", NULL, "SYSCLK" },
{ "OUT4L", NULL, "SYSCLK" },
{ "OUT4R", NULL, "SYSCLK" },
{ "OUT5L", NULL, "SYSCLK" },
{ "OUT5R", NULL, "SYSCLK" },
{ "OUT6L", NULL, "SYSCLK" },
{ "OUT6R", NULL, "SYSCLK" },
{ "AIF1RX1", NULL, "SYSCLK" },
{ "AIF1RX2", NULL, "SYSCLK" },
{ "AIF1RX3", NULL, "SYSCLK" },
{ "AIF1RX4", NULL, "SYSCLK" },
{ "AIF1RX5", NULL, "SYSCLK" },
{ "AIF1RX6", NULL, "SYSCLK" },
{ "AIF1RX7", NULL, "SYSCLK" },
{ "AIF1RX8", NULL, "SYSCLK" },
{ "AIF2RX1", NULL, "SYSCLK" },
{ "AIF2RX1", NULL, "DBVDD2" },
{ "AIF2RX2", NULL, "SYSCLK" },
{ "AIF2RX2", NULL, "DBVDD2" },
{ "AIF3RX1", NULL, "SYSCLK" },
{ "AIF3RX1", NULL, "DBVDD3" },
{ "AIF3RX2", NULL, "SYSCLK" },
{ "AIF3RX2", NULL, "DBVDD3" },
{ "AIF1TX1", NULL, "SYSCLK" },
{ "AIF1TX2", NULL, "SYSCLK" },
{ "AIF1TX3", NULL, "SYSCLK" },
{ "AIF1TX4", NULL, "SYSCLK" },
{ "AIF1TX5", NULL, "SYSCLK" },
{ "AIF1TX6", NULL, "SYSCLK" },
{ "AIF1TX7", NULL, "SYSCLK" },
{ "AIF1TX8", NULL, "SYSCLK" },
{ "AIF2TX1", NULL, "SYSCLK" },
{ "AIF2TX1", NULL, "DBVDD2" },
{ "AIF2TX2", NULL, "SYSCLK" },
{ "AIF2TX2", NULL, "DBVDD2" },
{ "AIF3TX1", NULL, "SYSCLK" },
{ "AIF3TX1", NULL, "DBVDD3" },
{ "AIF3TX2", NULL, "SYSCLK" },
{ "AIF3TX2", NULL, "DBVDD3" },
{ "MICBIAS1", NULL, "CP2" },
{ "MICBIAS2", NULL, "CP2" },
{ "MICBIAS3", NULL, "CP2" },
{ "IN1L PGA", NULL, "CP2" },
{ "IN1R PGA", NULL, "CP2" },
{ "IN2L PGA", NULL, "CP2" },
{ "IN2R PGA", NULL, "CP2" },
{ "IN3L PGA", NULL, "CP2" },
{ "IN3R PGA", NULL, "CP2" },
{ "IN4L PGA", NULL, "CP2" },
{ "IN4R PGA", NULL, "CP2" },
{ "IN1L PGA", NULL, "CP2 Active" },
{ "IN1R PGA", NULL, "CP2 Active" },
{ "IN2L PGA", NULL, "CP2 Active" },
{ "IN2R PGA", NULL, "CP2 Active" },
{ "IN3L PGA", NULL, "CP2 Active" },
{ "IN3R PGA", NULL, "CP2 Active" },
{ "IN4L PGA", NULL, "CP2 Active" },
{ "IN4R PGA", NULL, "CP2 Active" },
{ "OUT1L", NULL, "CP1" },
{ "OUT1R", NULL, "CP1" },
{ "OUT2L", NULL, "CP1" },
{ "OUT2R", NULL, "CP1" },
{ "OUT3L", NULL, "CP1" },
{ "OUT3R", NULL, "CP1" },
{ "Tone Generator 1", NULL, "TONE" },
{ "Tone Generator 2", NULL, "TONE" },
{ "IN1L PGA", NULL, "IN1L" },
{ "IN1R PGA", NULL, "IN1R" },
{ "IN2L PGA", NULL, "IN2L" },
{ "IN2R PGA", NULL, "IN2R" },
{ "IN3L PGA", NULL, "IN3L" },
{ "IN3R PGA", NULL, "IN3R" },
{ "IN4L PGA", NULL, "IN4L" },
{ "IN4R PGA", NULL, "IN4R" },
WM5100_MIXER_ROUTES("OUT1L", "HPOUT1L"),
WM5100_MIXER_ROUTES("OUT1R", "HPOUT1R"),
WM5100_MIXER_ROUTES("OUT2L", "HPOUT2L"),
WM5100_MIXER_ROUTES("OUT2R", "HPOUT2R"),
WM5100_MIXER_ROUTES("OUT3L", "HPOUT3L"),
WM5100_MIXER_ROUTES("OUT3R", "HPOUT3R"),
WM5100_MIXER_ROUTES("OUT4L", "SPKOUTL"),
WM5100_MIXER_ROUTES("OUT4R", "SPKOUTR"),
WM5100_MIXER_ROUTES("OUT5L", "SPKDAT1L"),
WM5100_MIXER_ROUTES("OUT5R", "SPKDAT1R"),
WM5100_MIXER_ROUTES("OUT6L", "SPKDAT2L"),
WM5100_MIXER_ROUTES("OUT6R", "SPKDAT2R"),
WM5100_MIXER_ROUTES("PWM1 Driver", "PWM1"),
WM5100_MIXER_ROUTES("PWM2 Driver", "PWM2"),
WM5100_MIXER_ROUTES("AIF1TX1", "AIF1TX1"),
WM5100_MIXER_ROUTES("AIF1TX2", "AIF1TX2"),
WM5100_MIXER_ROUTES("AIF1TX3", "AIF1TX3"),
WM5100_MIXER_ROUTES("AIF1TX4", "AIF1TX4"),
WM5100_MIXER_ROUTES("AIF1TX5", "AIF1TX5"),
WM5100_MIXER_ROUTES("AIF1TX6", "AIF1TX6"),
WM5100_MIXER_ROUTES("AIF1TX7", "AIF1TX7"),
WM5100_MIXER_ROUTES("AIF1TX8", "AIF1TX8"),
WM5100_MIXER_ROUTES("AIF2TX1", "AIF2TX1"),
WM5100_MIXER_ROUTES("AIF2TX2", "AIF2TX2"),
WM5100_MIXER_ROUTES("AIF3TX1", "AIF3TX1"),
WM5100_MIXER_ROUTES("AIF3TX2", "AIF3TX2"),
WM5100_MIXER_ROUTES("EQ1", "EQ1"),
WM5100_MIXER_ROUTES("EQ2", "EQ2"),
WM5100_MIXER_ROUTES("EQ3", "EQ3"),
WM5100_MIXER_ROUTES("EQ4", "EQ4"),
WM5100_MIXER_ROUTES("DRC1L", "DRC1L"),
WM5100_MIXER_ROUTES("DRC1R", "DRC1R"),
WM5100_MIXER_ROUTES("LHPF1", "LHPF1"),
WM5100_MIXER_ROUTES("LHPF2", "LHPF2"),
WM5100_MIXER_ROUTES("LHPF3", "LHPF3"),
WM5100_MIXER_ROUTES("LHPF4", "LHPF4"),
{ "HPOUT1L", NULL, "OUT1L" },
{ "HPOUT1R", NULL, "OUT1R" },
{ "HPOUT2L", NULL, "OUT2L" },
{ "HPOUT2R", NULL, "OUT2R" },
{ "HPOUT3L", NULL, "OUT3L" },
{ "HPOUT3R", NULL, "OUT3R" },
{ "SPKOUTL", NULL, "OUT4L" },
{ "SPKOUTR", NULL, "OUT4R" },
{ "SPKDAT1", NULL, "OUT5L" },
{ "SPKDAT1", NULL, "OUT5R" },
{ "SPKDAT2", NULL, "OUT6L" },
{ "SPKDAT2", NULL, "OUT6R" },
{ "PWM1", NULL, "PWM1 Driver" },
{ "PWM2", NULL, "PWM2 Driver" },
};
static const struct reg_sequence wm5100_reva_patches[] = {
{ WM5100_AUDIO_IF_1_10, 0 },
{ WM5100_AUDIO_IF_1_11, 1 },
{ WM5100_AUDIO_IF_1_12, 2 },
{ WM5100_AUDIO_IF_1_13, 3 },
{ WM5100_AUDIO_IF_1_14, 4 },
{ WM5100_AUDIO_IF_1_15, 5 },
{ WM5100_AUDIO_IF_1_16, 6 },
{ WM5100_AUDIO_IF_1_17, 7 },
{ WM5100_AUDIO_IF_1_18, 0 },
{ WM5100_AUDIO_IF_1_19, 1 },
{ WM5100_AUDIO_IF_1_20, 2 },
{ WM5100_AUDIO_IF_1_21, 3 },
{ WM5100_AUDIO_IF_1_22, 4 },
{ WM5100_AUDIO_IF_1_23, 5 },
{ WM5100_AUDIO_IF_1_24, 6 },
{ WM5100_AUDIO_IF_1_25, 7 },
{ WM5100_AUDIO_IF_2_10, 0 },
{ WM5100_AUDIO_IF_2_11, 1 },
{ WM5100_AUDIO_IF_2_18, 0 },
{ WM5100_AUDIO_IF_2_19, 1 },
{ WM5100_AUDIO_IF_3_10, 0 },
{ WM5100_AUDIO_IF_3_11, 1 },
{ WM5100_AUDIO_IF_3_18, 0 },
{ WM5100_AUDIO_IF_3_19, 1 },
};
static int wm5100_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_component *component = dai->component;
int lrclk, bclk, mask, base;
base = dai->driver->base;
lrclk = 0;
bclk = 0;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
mask = 0;
break;
case SND_SOC_DAIFMT_I2S:
mask = 2;
break;
default:
dev_err(component->dev, "Unsupported DAI format %d\n",
fmt & SND_SOC_DAIFMT_FORMAT_MASK);
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
break;
case SND_SOC_DAIFMT_CBS_CFM:
lrclk |= WM5100_AIF1TX_LRCLK_MSTR;
break;
case SND_SOC_DAIFMT_CBM_CFS:
bclk |= WM5100_AIF1_BCLK_MSTR;
break;
case SND_SOC_DAIFMT_CBM_CFM:
lrclk |= WM5100_AIF1TX_LRCLK_MSTR;
bclk |= WM5100_AIF1_BCLK_MSTR;
break;
default:
dev_err(component->dev, "Unsupported master mode %d\n",
fmt & SND_SOC_DAIFMT_MASTER_MASK);
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
bclk |= WM5100_AIF1_BCLK_INV;
lrclk |= WM5100_AIF1TX_LRCLK_INV;
break;
case SND_SOC_DAIFMT_IB_NF:
bclk |= WM5100_AIF1_BCLK_INV;
break;
case SND_SOC_DAIFMT_NB_IF:
lrclk |= WM5100_AIF1TX_LRCLK_INV;
break;
default:
return -EINVAL;
}
snd_soc_component_update_bits(component, base + 1, WM5100_AIF1_BCLK_MSTR |
WM5100_AIF1_BCLK_INV, bclk);
snd_soc_component_update_bits(component, base + 2, WM5100_AIF1TX_LRCLK_MSTR |
WM5100_AIF1TX_LRCLK_INV, lrclk);
snd_soc_component_update_bits(component, base + 3, WM5100_AIF1TX_LRCLK_MSTR |
WM5100_AIF1TX_LRCLK_INV, lrclk);
snd_soc_component_update_bits(component, base + 5, WM5100_AIF1_FMT_MASK, mask);
return 0;
}
#define WM5100_NUM_BCLK_RATES 19
static int wm5100_bclk_rates_dat[WM5100_NUM_BCLK_RATES] = {
32000,
48000,
64000,
96000,
128000,
192000,
256000,
384000,
512000,
768000,
1024000,
1536000,
2048000,
3072000,
4096000,
6144000,
8192000,
12288000,
24576000,
};
static int wm5100_bclk_rates_cd[WM5100_NUM_BCLK_RATES] = {
29400,
44100,
58800,
88200,
117600,
176400,
235200,
352800,
470400,
705600,
940800,
1411200,
1881600,
2882400,
3763200,
5644800,
7526400,
11289600,
22579600,
};
static int wm5100_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
bool async = wm5100->aif_async[dai->id];
int i, base, bclk, aif_rate, lrclk, wl, fl, sr;
int *bclk_rates;
base = dai->driver->base;
/* Data sizes if not using TDM */
wl = params_width(params);
if (wl < 0)
return wl;
fl = snd_soc_params_to_frame_size(params);
if (fl < 0)
return fl;
dev_dbg(component->dev, "Word length %d bits, frame length %d bits\n",
wl, fl);
/* Target BCLK rate */
bclk = snd_soc_params_to_bclk(params);
if (bclk < 0)
return bclk;
/* Root for BCLK depends on SYS/ASYNCCLK */
if (!async) {
aif_rate = wm5100->sysclk;
sr = wm5100_alloc_sr(component, params_rate(params));
if (sr < 0)
return sr;
} else {
/* If we're in ASYNCCLK set the ASYNC sample rate */
aif_rate = wm5100->asyncclk;
sr = 3;
for (i = 0; i < ARRAY_SIZE(wm5100_sr_code); i++)
if (params_rate(params) == wm5100_sr_code[i])
break;
if (i == ARRAY_SIZE(wm5100_sr_code)) {
dev_err(component->dev, "Invalid rate %dHzn",
params_rate(params));
return -EINVAL;
}
/* TODO: We should really check for symmetry */
snd_soc_component_update_bits(component, WM5100_CLOCKING_8,
WM5100_ASYNC_SAMPLE_RATE_MASK, i);
}
if (!aif_rate) {
dev_err(component->dev, "%s has no rate set\n",
async ? "ASYNCCLK" : "SYSCLK");
return -EINVAL;
}
dev_dbg(component->dev, "Target BCLK is %dHz, using %dHz %s\n",
bclk, aif_rate, async ? "ASYNCCLK" : "SYSCLK");
if (aif_rate % 4000)
bclk_rates = wm5100_bclk_rates_cd;
else
bclk_rates = wm5100_bclk_rates_dat;
for (i = 0; i < WM5100_NUM_BCLK_RATES; i++)
if (bclk_rates[i] >= bclk && (bclk_rates[i] % bclk == 0))
break;
if (i == WM5100_NUM_BCLK_RATES) {
dev_err(component->dev,
"No valid BCLK for %dHz found from %dHz %s\n",
bclk, aif_rate, async ? "ASYNCCLK" : "SYSCLK");
return -EINVAL;
}
bclk = i;
dev_dbg(component->dev, "Setting %dHz BCLK\n", bclk_rates[bclk]);
snd_soc_component_update_bits(component, base + 1, WM5100_AIF1_BCLK_FREQ_MASK, bclk);
lrclk = bclk_rates[bclk] / params_rate(params);
dev_dbg(component->dev, "Setting %dHz LRCLK\n", bclk_rates[bclk] / lrclk);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
wm5100->aif_symmetric[dai->id])
snd_soc_component_update_bits(component, base + 7,
WM5100_AIF1RX_BCPF_MASK, lrclk);
else
snd_soc_component_update_bits(component, base + 6,
WM5100_AIF1TX_BCPF_MASK, lrclk);
i = (wl << WM5100_AIF1TX_WL_SHIFT) | fl;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
snd_soc_component_update_bits(component, base + 9,
WM5100_AIF1RX_WL_MASK |
WM5100_AIF1RX_SLOT_LEN_MASK, i);
else
snd_soc_component_update_bits(component, base + 8,
WM5100_AIF1TX_WL_MASK |
WM5100_AIF1TX_SLOT_LEN_MASK, i);
snd_soc_component_update_bits(component, base + 4, WM5100_AIF1_RATE_MASK, sr);
return 0;
}
static const struct snd_soc_dai_ops wm5100_dai_ops = {
.set_fmt = wm5100_set_fmt,
.hw_params = wm5100_hw_params,
};
static int wm5100_set_sysclk(struct snd_soc_component *component, int clk_id,
int source, unsigned int freq, int dir)
{
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
int *rate_store;
int fval, audio_rate, ret, reg;
switch (clk_id) {
case WM5100_CLK_SYSCLK:
reg = WM5100_CLOCKING_3;
rate_store = &wm5100->sysclk;
break;
case WM5100_CLK_ASYNCCLK:
reg = WM5100_CLOCKING_7;
rate_store = &wm5100->asyncclk;
break;
case WM5100_CLK_32KHZ:
/* The 32kHz clock is slightly different to the others */
switch (source) {
case WM5100_CLKSRC_MCLK1:
case WM5100_CLKSRC_MCLK2:
case WM5100_CLKSRC_SYSCLK:
snd_soc_component_update_bits(component, WM5100_CLOCKING_1,
WM5100_CLK_32K_SRC_MASK,
source);
break;
default:
return -EINVAL;
}
return 0;
case WM5100_CLK_AIF1:
case WM5100_CLK_AIF2:
case WM5100_CLK_AIF3:
/* Not real clocks, record which clock domain they're in */
switch (source) {
case WM5100_CLKSRC_SYSCLK:
wm5100->aif_async[clk_id - 1] = false;
break;
case WM5100_CLKSRC_ASYNCCLK:
wm5100->aif_async[clk_id - 1] = true;
break;
default:
dev_err(component->dev, "Invalid source %d\n", source);
return -EINVAL;
}
return 0;
case WM5100_CLK_OPCLK:
switch (freq) {
case 5644800:
case 6144000:
snd_soc_component_update_bits(component, WM5100_MISC_GPIO_1,
WM5100_OPCLK_SEL_MASK, 0);
break;
case 11289600:
case 12288000:
snd_soc_component_update_bits(component, WM5100_MISC_GPIO_1,
WM5100_OPCLK_SEL_MASK, 0);
break;
case 22579200:
case 24576000:
snd_soc_component_update_bits(component, WM5100_MISC_GPIO_1,
WM5100_OPCLK_SEL_MASK, 0);
break;
default:
dev_err(component->dev, "Unsupported OPCLK %dHz\n",
freq);
return -EINVAL;
}
return 0;
default:
dev_err(component->dev, "Unknown clock %d\n", clk_id);
return -EINVAL;
}
switch (source) {
case WM5100_CLKSRC_SYSCLK:
case WM5100_CLKSRC_ASYNCCLK:
dev_err(component->dev, "Invalid source %d\n", source);
return -EINVAL;
}
switch (freq) {
case 5644800:
case 6144000:
fval = 0;
break;
case 11289600:
case 12288000:
fval = 1;
break;
case 22579200:
case 24576000:
fval = 2;
break;
default:
dev_err(component->dev, "Invalid clock rate: %d\n", freq);
return -EINVAL;
}
switch (freq) {
case 5644800:
case 11289600:
case 22579200:
audio_rate = 44100;
break;
case 6144000:
case 12288000:
case 24576000:
audio_rate = 48000;
break;
default:
BUG();
audio_rate = 0;
break;
}
/* TODO: Check if MCLKs are in use and enable/disable pulls to
* match.
*/
snd_soc_component_update_bits(component, reg, WM5100_SYSCLK_FREQ_MASK |
WM5100_SYSCLK_SRC_MASK,
fval << WM5100_SYSCLK_FREQ_SHIFT | source);
/* If this is SYSCLK then configure the clock rate for the
* internal audio functions to the natural sample rate for
* this clock rate.
*/
if (clk_id == WM5100_CLK_SYSCLK) {
dev_dbg(component->dev, "Setting primary audio rate to %dHz",
audio_rate);
if (0 && *rate_store)
wm5100_free_sr(component, audio_rate);
ret = wm5100_alloc_sr(component, audio_rate);
if (ret != 0)
dev_warn(component->dev, "Primary audio slot is %d\n",
ret);
}
*rate_store = freq;
return 0;
}
struct _fll_div {
u16 fll_fratio;
u16 fll_outdiv;
u16 fll_refclk_div;
u16 n;
u16 theta;
u16 lambda;
};
static struct {
unsigned int min;
unsigned int max;
u16 fll_fratio;
int ratio;
} fll_fratios[] = {
{ 0, 64000, 4, 16 },
{ 64000, 128000, 3, 8 },
{ 128000, 256000, 2, 4 },
{ 256000, 1000000, 1, 2 },
{ 1000000, 13500000, 0, 1 },
};
static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
unsigned int Fout)
{
unsigned int target;
unsigned int div;
unsigned int fratio, gcd_fll;
int i;
/* Fref must be <=13.5MHz */
div = 1;
fll_div->fll_refclk_div = 0;
while ((Fref / div) > 13500000) {
div *= 2;
fll_div->fll_refclk_div++;
if (div > 8) {
pr_err("Can't scale %dMHz input down to <=13.5MHz\n",
Fref);
return -EINVAL;
}
}
pr_debug("FLL Fref=%u Fout=%u\n", Fref, Fout);
/* Apply the division for our remaining calculations */
Fref /= div;
/* Fvco should be 90-100MHz; don't check the upper bound */
div = 2;
while (Fout * div < 90000000) {
div++;
if (div > 64) {
pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n",
Fout);
return -EINVAL;
}
}
target = Fout * div;
fll_div->fll_outdiv = div - 1;
pr_debug("FLL Fvco=%dHz\n", target);
/* Find an appropraite FLL_FRATIO and factor it out of the target */
for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) {
if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) {
fll_div->fll_fratio = fll_fratios[i].fll_fratio;
fratio = fll_fratios[i].ratio;
break;
}
}
if (i == ARRAY_SIZE(fll_fratios)) {
pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref);
return -EINVAL;
}
fll_div->n = target / (fratio * Fref);
if (target % Fref == 0) {
fll_div->theta = 0;
fll_div->lambda = 0;
} else {
gcd_fll = gcd(target, fratio * Fref);
fll_div->theta = (target - (fll_div->n * fratio * Fref))
/ gcd_fll;
fll_div->lambda = (fratio * Fref) / gcd_fll;
}
pr_debug("FLL N=%x THETA=%x LAMBDA=%x\n",
fll_div->n, fll_div->theta, fll_div->lambda);
pr_debug("FLL_FRATIO=%x(%d) FLL_OUTDIV=%x FLL_REFCLK_DIV=%x\n",
fll_div->fll_fratio, fratio, fll_div->fll_outdiv,
fll_div->fll_refclk_div);
return 0;
}
static int wm5100_set_fll(struct snd_soc_component *component, int fll_id, int source,
unsigned int Fref, unsigned int Fout)
{
struct i2c_client *i2c = to_i2c_client(component->dev);
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
struct _fll_div factors;
struct wm5100_fll *fll;
int ret, base, lock, i, timeout;
unsigned long time_left;
switch (fll_id) {
case WM5100_FLL1:
fll = &wm5100->fll[0];
base = WM5100_FLL1_CONTROL_1 - 1;
lock = WM5100_FLL1_LOCK_STS;
break;
case WM5100_FLL2:
fll = &wm5100->fll[1];
base = WM5100_FLL2_CONTROL_2 - 1;
lock = WM5100_FLL2_LOCK_STS;
break;
default:
dev_err(component->dev, "Unknown FLL %d\n",fll_id);
return -EINVAL;
}
if (!Fout) {
dev_dbg(component->dev, "FLL%d disabled", fll_id);
if (fll->fout)
pm_runtime_put(component->dev);
fll->fout = 0;
snd_soc_component_update_bits(component, base + 1, WM5100_FLL1_ENA, 0);
return 0;
}
switch (source) {
case WM5100_FLL_SRC_MCLK1:
case WM5100_FLL_SRC_MCLK2:
case WM5100_FLL_SRC_FLL1:
case WM5100_FLL_SRC_FLL2:
case WM5100_FLL_SRC_AIF1BCLK:
case WM5100_FLL_SRC_AIF2BCLK:
case WM5100_FLL_SRC_AIF3BCLK:
break;
default:
dev_err(component->dev, "Invalid FLL source %d\n", source);
return -EINVAL;
}
ret = fll_factors(&factors, Fref, Fout);
if (ret < 0)
return ret;
/* Disable the FLL while we reconfigure */
snd_soc_component_update_bits(component, base + 1, WM5100_FLL1_ENA, 0);
snd_soc_component_update_bits(component, base + 2,
WM5100_FLL1_OUTDIV_MASK | WM5100_FLL1_FRATIO_MASK,
(factors.fll_outdiv << WM5100_FLL1_OUTDIV_SHIFT) |
factors.fll_fratio);
snd_soc_component_update_bits(component, base + 3, WM5100_FLL1_THETA_MASK,
factors.theta);
snd_soc_component_update_bits(component, base + 5, WM5100_FLL1_N_MASK, factors.n);
snd_soc_component_update_bits(component, base + 6,
WM5100_FLL1_REFCLK_DIV_MASK |
WM5100_FLL1_REFCLK_SRC_MASK,
(factors.fll_refclk_div
<< WM5100_FLL1_REFCLK_DIV_SHIFT) | source);
snd_soc_component_update_bits(component, base + 7, WM5100_FLL1_LAMBDA_MASK,
factors.lambda);
/* Clear any pending completions */
try_wait_for_completion(&fll->lock);
pm_runtime_get_sync(component->dev);
snd_soc_component_update_bits(component, base + 1, WM5100_FLL1_ENA, WM5100_FLL1_ENA);
if (i2c->irq)
timeout = 2;
else
timeout = 50;
snd_soc_component_update_bits(component, WM5100_CLOCKING_3, WM5100_SYSCLK_ENA,
WM5100_SYSCLK_ENA);
/* Poll for the lock; will use interrupt when we can test */
for (i = 0; i < timeout; i++) {
if (i2c->irq) {
time_left = wait_for_completion_timeout(&fll->lock,
msecs_to_jiffies(25));
if (time_left > 0)
break;
} else {
msleep(1);
}
ret = snd_soc_component_read32(component,
WM5100_INTERRUPT_RAW_STATUS_3);
if (ret < 0) {
dev_err(component->dev,
"Failed to read FLL status: %d\n",
ret);
continue;
}
if (ret & lock)
break;
}
if (i == timeout) {
dev_err(component->dev, "FLL%d lock timed out\n", fll_id);
pm_runtime_put(component->dev);
return -ETIMEDOUT;
}
fll->src = source;
fll->fref = Fref;
fll->fout = Fout;
dev_dbg(component->dev, "FLL%d running %dHz->%dHz\n", fll_id,
Fref, Fout);
return 0;
}
/* Actually go much higher */
#define WM5100_RATES SNDRV_PCM_RATE_8000_192000
#define WM5100_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_driver wm5100_dai[] = {
{
.name = "wm5100-aif1",
.base = WM5100_AUDIO_IF_1_1 - 1,
.playback = {
.stream_name = "AIF1 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = WM5100_RATES,
.formats = WM5100_FORMATS,
},
.capture = {
.stream_name = "AIF1 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = WM5100_RATES,
.formats = WM5100_FORMATS,
},
.ops = &wm5100_dai_ops,
},
{
.name = "wm5100-aif2",
.id = 1,
.base = WM5100_AUDIO_IF_2_1 - 1,
.playback = {
.stream_name = "AIF2 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = WM5100_RATES,
.formats = WM5100_FORMATS,
},
.capture = {
.stream_name = "AIF2 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = WM5100_RATES,
.formats = WM5100_FORMATS,
},
.ops = &wm5100_dai_ops,
},
{
.name = "wm5100-aif3",
.id = 2,
.base = WM5100_AUDIO_IF_3_1 - 1,
.playback = {
.stream_name = "AIF3 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = WM5100_RATES,
.formats = WM5100_FORMATS,
},
.capture = {
.stream_name = "AIF3 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = WM5100_RATES,
.formats = WM5100_FORMATS,
},
.ops = &wm5100_dai_ops,
},
};
static int wm5100_dig_vu[] = {
WM5100_ADC_DIGITAL_VOLUME_1L,
WM5100_ADC_DIGITAL_VOLUME_1R,
WM5100_ADC_DIGITAL_VOLUME_2L,
WM5100_ADC_DIGITAL_VOLUME_2R,
WM5100_ADC_DIGITAL_VOLUME_3L,
WM5100_ADC_DIGITAL_VOLUME_3R,
WM5100_ADC_DIGITAL_VOLUME_4L,
WM5100_ADC_DIGITAL_VOLUME_4R,
WM5100_DAC_DIGITAL_VOLUME_1L,
WM5100_DAC_DIGITAL_VOLUME_1R,
WM5100_DAC_DIGITAL_VOLUME_2L,
WM5100_DAC_DIGITAL_VOLUME_2R,
WM5100_DAC_DIGITAL_VOLUME_3L,
WM5100_DAC_DIGITAL_VOLUME_3R,
WM5100_DAC_DIGITAL_VOLUME_4L,
WM5100_DAC_DIGITAL_VOLUME_4R,
WM5100_DAC_DIGITAL_VOLUME_5L,
WM5100_DAC_DIGITAL_VOLUME_5R,
WM5100_DAC_DIGITAL_VOLUME_6L,
WM5100_DAC_DIGITAL_VOLUME_6R,
};
static void wm5100_set_detect_mode(struct wm5100_priv *wm5100, int the_mode)
{
struct wm5100_jack_mode *mode = &wm5100->pdata.jack_modes[the_mode];
if (WARN_ON(the_mode >= ARRAY_SIZE(wm5100->pdata.jack_modes)))
return;
gpio_set_value_cansleep(wm5100->pdata.hp_pol, mode->hp_pol);
regmap_update_bits(wm5100->regmap, WM5100_ACCESSORY_DETECT_MODE_1,
WM5100_ACCDET_BIAS_SRC_MASK |
WM5100_ACCDET_SRC,
(mode->bias << WM5100_ACCDET_BIAS_SRC_SHIFT) |
mode->micd_src << WM5100_ACCDET_SRC_SHIFT);
regmap_update_bits(wm5100->regmap, WM5100_MISC_CONTROL,
WM5100_HPCOM_SRC,
mode->micd_src << WM5100_HPCOM_SRC_SHIFT);
wm5100->jack_mode = the_mode;
dev_dbg(wm5100->dev, "Set microphone polarity to %d\n",
wm5100->jack_mode);
}
static void wm5100_report_headphone(struct wm5100_priv *wm5100)
{
dev_dbg(wm5100->dev, "Headphone detected\n");
wm5100->jack_detecting = false;
snd_soc_jack_report(wm5100->jack, SND_JACK_HEADPHONE,
SND_JACK_HEADPHONE);
/* Increase the detection rate a bit for responsiveness. */
regmap_update_bits(wm5100->regmap, WM5100_MIC_DETECT_1,
WM5100_ACCDET_RATE_MASK,
7 << WM5100_ACCDET_RATE_SHIFT);
}
static void wm5100_micd_irq(struct wm5100_priv *wm5100)
{
unsigned int val;
int ret;
ret = regmap_read(wm5100->regmap, WM5100_MIC_DETECT_3, &val);
if (ret != 0) {
dev_err(wm5100->dev, "Failed to read microphone status: %d\n",
ret);
return;
}
dev_dbg(wm5100->dev, "Microphone event: %x\n", val);
if (!(val & WM5100_ACCDET_VALID)) {
dev_warn(wm5100->dev, "Microphone detection state invalid\n");
return;
}
/* No accessory, reset everything and report removal */
if (!(val & WM5100_ACCDET_STS)) {
dev_dbg(wm5100->dev, "Jack removal detected\n");
wm5100->jack_mic = false;
wm5100->jack_detecting = true;
wm5100->jack_flips = 0;
snd_soc_jack_report(wm5100->jack, 0,
SND_JACK_LINEOUT | SND_JACK_HEADSET |
SND_JACK_BTN_0);
regmap_update_bits(wm5100->regmap, WM5100_MIC_DETECT_1,
WM5100_ACCDET_RATE_MASK,
WM5100_ACCDET_RATE_MASK);
return;
}
/* If the measurement is very high we've got a microphone,
* either we just detected one or if we already reported then
* we've got a button release event.
*/
if (val & 0x400) {
if (wm5100->jack_detecting) {
dev_dbg(wm5100->dev, "Microphone detected\n");
wm5100->jack_mic = true;
wm5100->jack_detecting = false;
snd_soc_jack_report(wm5100->jack,
SND_JACK_HEADSET,
SND_JACK_HEADSET | SND_JACK_BTN_0);
/* Increase poll rate to give better responsiveness
* for buttons */
regmap_update_bits(wm5100->regmap, WM5100_MIC_DETECT_1,
WM5100_ACCDET_RATE_MASK,
5 << WM5100_ACCDET_RATE_SHIFT);
} else {
dev_dbg(wm5100->dev, "Mic button up\n");
snd_soc_jack_report(wm5100->jack, 0, SND_JACK_BTN_0);
}
return;
}
/* If we detected a lower impedence during initial startup
* then we probably have the wrong polarity, flip it. Don't
* do this for the lowest impedences to speed up detection of
* plain headphones and give up if neither polarity looks
* sensible.
*/
if (wm5100->jack_detecting && (val & 0x3f8)) {
wm5100->jack_flips++;
if (wm5100->jack_flips > 1)
wm5100_report_headphone(wm5100);
else
wm5100_set_detect_mode(wm5100, !wm5100->jack_mode);
return;
}
/* Don't distinguish between buttons, just report any low
* impedence as BTN_0.
*/
if (val & 0x3fc) {
if (wm5100->jack_mic) {
dev_dbg(wm5100->dev, "Mic button detected\n");
snd_soc_jack_report(wm5100->jack, SND_JACK_BTN_0,
SND_JACK_BTN_0);
} else if (wm5100->jack_detecting) {
wm5100_report_headphone(wm5100);
}
}
}
int wm5100_detect(struct snd_soc_component *component, struct snd_soc_jack *jack)
{
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
if (jack) {
wm5100->jack = jack;
wm5100->jack_detecting = true;
wm5100->jack_flips = 0;
wm5100_set_detect_mode(wm5100, 0);
/* Slowest detection rate, gives debounce for initial
* detection */
snd_soc_component_update_bits(component, WM5100_MIC_DETECT_1,
WM5100_ACCDET_BIAS_STARTTIME_MASK |
WM5100_ACCDET_RATE_MASK,
(7 << WM5100_ACCDET_BIAS_STARTTIME_SHIFT) |
WM5100_ACCDET_RATE_MASK);
/* We need the charge pump to power MICBIAS */
snd_soc_dapm_mutex_lock(dapm);
snd_soc_dapm_force_enable_pin_unlocked(dapm, "CP2");
snd_soc_dapm_force_enable_pin_unlocked(dapm, "SYSCLK");
snd_soc_dapm_sync_unlocked(dapm);
snd_soc_dapm_mutex_unlock(dapm);
/* We start off just enabling microphone detection - even a
* plain headphone will trigger detection.
*/
snd_soc_component_update_bits(component, WM5100_MIC_DETECT_1,
WM5100_ACCDET_ENA, WM5100_ACCDET_ENA);
snd_soc_component_update_bits(component, WM5100_INTERRUPT_STATUS_3_MASK,
WM5100_IM_ACCDET_EINT, 0);
} else {
snd_soc_component_update_bits(component, WM5100_INTERRUPT_STATUS_3_MASK,
WM5100_IM_HPDET_EINT |
WM5100_IM_ACCDET_EINT,
WM5100_IM_HPDET_EINT |
WM5100_IM_ACCDET_EINT);
snd_soc_component_update_bits(component, WM5100_MIC_DETECT_1,
WM5100_ACCDET_ENA, 0);
wm5100->jack = NULL;
}
return 0;
}
EXPORT_SYMBOL_GPL(wm5100_detect);
static irqreturn_t wm5100_irq(int irq, void *data)
{
struct wm5100_priv *wm5100 = data;
irqreturn_t status = IRQ_NONE;
unsigned int irq_val, mask_val;
int ret;
ret = regmap_read(wm5100->regmap, WM5100_INTERRUPT_STATUS_3, &irq_val);
if (ret < 0) {
dev_err(wm5100->dev, "Failed to read IRQ status 3: %d\n",
ret);
irq_val = 0;
}
ret = regmap_read(wm5100->regmap, WM5100_INTERRUPT_STATUS_3_MASK,
&mask_val);
if (ret < 0) {
dev_err(wm5100->dev, "Failed to read IRQ mask 3: %d\n",
ret);
mask_val = 0xffff;
}
irq_val &= ~mask_val;
regmap_write(wm5100->regmap, WM5100_INTERRUPT_STATUS_3, irq_val);
if (irq_val)
status = IRQ_HANDLED;
wm5100_log_status3(wm5100, irq_val);
if (irq_val & WM5100_FLL1_LOCK_EINT) {
dev_dbg(wm5100->dev, "FLL1 locked\n");
complete(&wm5100->fll[0].lock);
}
if (irq_val & WM5100_FLL2_LOCK_EINT) {
dev_dbg(wm5100->dev, "FLL2 locked\n");
complete(&wm5100->fll[1].lock);
}
if (irq_val & WM5100_ACCDET_EINT)
wm5100_micd_irq(wm5100);
ret = regmap_read(wm5100->regmap, WM5100_INTERRUPT_STATUS_4, &irq_val);
if (ret < 0) {
dev_err(wm5100->dev, "Failed to read IRQ status 4: %d\n",
ret);
irq_val = 0;
}
ret = regmap_read(wm5100->regmap, WM5100_INTERRUPT_STATUS_4_MASK,
&mask_val);
if (ret < 0) {
dev_err(wm5100->dev, "Failed to read IRQ mask 4: %d\n",
ret);
mask_val = 0xffff;
}
irq_val &= ~mask_val;
if (irq_val)
status = IRQ_HANDLED;
regmap_write(wm5100->regmap, WM5100_INTERRUPT_STATUS_4, irq_val);
wm5100_log_status4(wm5100, irq_val);
return status;
}
static irqreturn_t wm5100_edge_irq(int irq, void *data)
{
irqreturn_t ret = IRQ_NONE;
irqreturn_t val;
do {
val = wm5100_irq(irq, data);
if (val != IRQ_NONE)
ret = val;
} while (val != IRQ_NONE);
return ret;
}
#ifdef CONFIG_GPIOLIB
static void wm5100_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct wm5100_priv *wm5100 = gpiochip_get_data(chip);
regmap_update_bits(wm5100->regmap, WM5100_GPIO_CTRL_1 + offset,
WM5100_GP1_LVL, !!value << WM5100_GP1_LVL_SHIFT);
}
static int wm5100_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value)
{
struct wm5100_priv *wm5100 = gpiochip_get_data(chip);
int val, ret;
val = (1 << WM5100_GP1_FN_SHIFT) | (!!value << WM5100_GP1_LVL_SHIFT);
ret = regmap_update_bits(wm5100->regmap, WM5100_GPIO_CTRL_1 + offset,
WM5100_GP1_FN_MASK | WM5100_GP1_DIR |
WM5100_GP1_LVL, val);
if (ret < 0)
return ret;
else
return 0;
}
static int wm5100_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct wm5100_priv *wm5100 = gpiochip_get_data(chip);
unsigned int reg;
int ret;
ret = regmap_read(wm5100->regmap, WM5100_GPIO_CTRL_1 + offset, ®);
if (ret < 0)
return ret;
return (reg & WM5100_GP1_LVL) != 0;
}
static int wm5100_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct wm5100_priv *wm5100 = gpiochip_get_data(chip);
return regmap_update_bits(wm5100->regmap, WM5100_GPIO_CTRL_1 + offset,
WM5100_GP1_FN_MASK | WM5100_GP1_DIR,
(1 << WM5100_GP1_FN_SHIFT) |
(1 << WM5100_GP1_DIR_SHIFT));
}
static const struct gpio_chip wm5100_template_chip = {
.label = "wm5100",
.owner = THIS_MODULE,
.direction_output = wm5100_gpio_direction_out,
.set = wm5100_gpio_set,
.direction_input = wm5100_gpio_direction_in,
.get = wm5100_gpio_get,
.can_sleep = 1,
};
static void wm5100_init_gpio(struct i2c_client *i2c)
{
struct wm5100_priv *wm5100 = i2c_get_clientdata(i2c);
int ret;
wm5100->gpio_chip = wm5100_template_chip;
wm5100->gpio_chip.ngpio = 6;
wm5100->gpio_chip.parent = &i2c->dev;
if (wm5100->pdata.gpio_base)
wm5100->gpio_chip.base = wm5100->pdata.gpio_base;
else
wm5100->gpio_chip.base = -1;
ret = gpiochip_add_data(&wm5100->gpio_chip, wm5100);
if (ret != 0)
dev_err(&i2c->dev, "Failed to add GPIOs: %d\n", ret);
}
static void wm5100_free_gpio(struct i2c_client *i2c)
{
struct wm5100_priv *wm5100 = i2c_get_clientdata(i2c);
gpiochip_remove(&wm5100->gpio_chip);
}
#else
static void wm5100_init_gpio(struct i2c_client *i2c)
{
}
static void wm5100_free_gpio(struct i2c_client *i2c)
{
}
#endif
static int wm5100_probe(struct snd_soc_component *component)
{
struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct i2c_client *i2c = to_i2c_client(component->dev);
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
int ret, i;
wm5100->component = component;
for (i = 0; i < ARRAY_SIZE(wm5100_dig_vu); i++)
snd_soc_component_update_bits(component, wm5100_dig_vu[i], WM5100_OUT_VU,
WM5100_OUT_VU);
/* Don't debounce interrupts to support use of SYSCLK only */
snd_soc_component_write(component, WM5100_IRQ_DEBOUNCE_1, 0);
snd_soc_component_write(component, WM5100_IRQ_DEBOUNCE_2, 0);
/* TODO: check if we're symmetric */
if (i2c->irq)
snd_soc_dapm_new_controls(dapm, wm5100_dapm_widgets_noirq,
ARRAY_SIZE(wm5100_dapm_widgets_noirq));
if (wm5100->pdata.hp_pol) {
ret = gpio_request_one(wm5100->pdata.hp_pol,
GPIOF_OUT_INIT_HIGH, "WM5100 HP_POL");
if (ret < 0) {
dev_err(&i2c->dev, "Failed to request HP_POL %d: %d\n",
wm5100->pdata.hp_pol, ret);
goto err_gpio;
}
}
return 0;
err_gpio:
return ret;
}
static void wm5100_remove(struct snd_soc_component *component)
{
struct wm5100_priv *wm5100 = snd_soc_component_get_drvdata(component);
if (wm5100->pdata.hp_pol) {
gpio_free(wm5100->pdata.hp_pol);
}
}
static const struct snd_soc_component_driver soc_component_dev_wm5100 = {
.probe = wm5100_probe,
.remove = wm5100_remove,
.set_sysclk = wm5100_set_sysclk,
.set_pll = wm5100_set_fll,
.seq_notifier = wm5100_seq_notifier,
.controls = wm5100_snd_controls,
.num_controls = ARRAY_SIZE(wm5100_snd_controls),
.dapm_widgets = wm5100_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm5100_dapm_widgets),
.dapm_routes = wm5100_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm5100_dapm_routes),
.use_pmdown_time = 1,
.endianness = 1,
.non_legacy_dai_naming = 1,
};
static const struct regmap_config wm5100_regmap = {
.reg_bits = 16,
.val_bits = 16,
.max_register = WM5100_MAX_REGISTER,
.reg_defaults = wm5100_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm5100_reg_defaults),
.volatile_reg = wm5100_volatile_register,
.readable_reg = wm5100_readable_register,
.cache_type = REGCACHE_RBTREE,
};
static const unsigned int wm5100_mic_ctrl_reg[] = {
WM5100_IN1L_CONTROL,
WM5100_IN2L_CONTROL,
WM5100_IN3L_CONTROL,
WM5100_IN4L_CONTROL,
};
static int wm5100_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm5100_pdata *pdata = dev_get_platdata(&i2c->dev);
struct wm5100_priv *wm5100;
unsigned int reg;
int ret, i, irq_flags;
wm5100 = devm_kzalloc(&i2c->dev, sizeof(struct wm5100_priv),
GFP_KERNEL);
if (wm5100 == NULL)
return -ENOMEM;
wm5100->dev = &i2c->dev;
wm5100->regmap = devm_regmap_init_i2c(i2c, &wm5100_regmap);
if (IS_ERR(wm5100->regmap)) {
ret = PTR_ERR(wm5100->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
goto err;
}
for (i = 0; i < ARRAY_SIZE(wm5100->fll); i++)
init_completion(&wm5100->fll[i].lock);
if (pdata)
wm5100->pdata = *pdata;
i2c_set_clientdata(i2c, wm5100);
for (i = 0; i < ARRAY_SIZE(wm5100->core_supplies); i++)
wm5100->core_supplies[i].supply = wm5100_core_supply_names[i];
ret = devm_regulator_bulk_get(&i2c->dev,
ARRAY_SIZE(wm5100->core_supplies),
wm5100->core_supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request core supplies: %d\n",
ret);
goto err;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm5100->core_supplies),
wm5100->core_supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to enable core supplies: %d\n",
ret);
goto err;
}
if (wm5100->pdata.ldo_ena) {
ret = gpio_request_one(wm5100->pdata.ldo_ena,
GPIOF_OUT_INIT_HIGH, "WM5100 LDOENA");
if (ret < 0) {
dev_err(&i2c->dev, "Failed to request LDOENA %d: %d\n",
wm5100->pdata.ldo_ena, ret);
goto err_enable;
}
msleep(2);
}
if (wm5100->pdata.reset) {
ret = gpio_request_one(wm5100->pdata.reset,
GPIOF_OUT_INIT_HIGH, "WM5100 /RESET");
if (ret < 0) {
dev_err(&i2c->dev, "Failed to request /RESET %d: %d\n",
wm5100->pdata.reset, ret);
goto err_ldo;
}
}
ret = regmap_read(wm5100->regmap, WM5100_SOFTWARE_RESET, ®);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read ID register: %d\n", ret);
goto err_reset;
}
switch (reg) {
case 0x8997:
case 0x5100:
break;
default:
dev_err(&i2c->dev, "Device is not a WM5100, ID is %x\n", reg);
ret = -EINVAL;
goto err_reset;
}
ret = regmap_read(wm5100->regmap, WM5100_DEVICE_REVISION, ®);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read revision register\n");
goto err_reset;
}
wm5100->rev = reg & WM5100_DEVICE_REVISION_MASK;
dev_info(&i2c->dev, "revision %c\n", wm5100->rev + 'A');
ret = wm5100_reset(wm5100);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to issue reset\n");
goto err_reset;
}
switch (wm5100->rev) {
case 0:
ret = regmap_register_patch(wm5100->regmap,
wm5100_reva_patches,
ARRAY_SIZE(wm5100_reva_patches));
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register patches: %d\n",
ret);
goto err_reset;
}
break;
default:
break;
}
wm5100_init_gpio(i2c);
for (i = 0; i < ARRAY_SIZE(wm5100->pdata.gpio_defaults); i++) {
if (!wm5100->pdata.gpio_defaults[i])
continue;
regmap_write(wm5100->regmap, WM5100_GPIO_CTRL_1 + i,
wm5100->pdata.gpio_defaults[i]);
}
for (i = 0; i < ARRAY_SIZE(wm5100->pdata.in_mode); i++) {
regmap_update_bits(wm5100->regmap, wm5100_mic_ctrl_reg[i],
WM5100_IN1_MODE_MASK |
WM5100_IN1_DMIC_SUP_MASK,
(wm5100->pdata.in_mode[i] <<
WM5100_IN1_MODE_SHIFT) |
(wm5100->pdata.dmic_sup[i] <<
WM5100_IN1_DMIC_SUP_SHIFT));
}
if (i2c->irq) {
if (wm5100->pdata.irq_flags)
irq_flags = wm5100->pdata.irq_flags;
else
irq_flags = IRQF_TRIGGER_LOW;
irq_flags |= IRQF_ONESHOT;
if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
ret = request_threaded_irq(i2c->irq, NULL,
wm5100_edge_irq, irq_flags,
"wm5100", wm5100);
else
ret = request_threaded_irq(i2c->irq, NULL, wm5100_irq,
irq_flags, "wm5100",
wm5100);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",
i2c->irq, ret);
} else {
/* Enable default interrupts */
regmap_update_bits(wm5100->regmap,
WM5100_INTERRUPT_STATUS_3_MASK,
WM5100_IM_SPK_SHUTDOWN_WARN_EINT |
WM5100_IM_SPK_SHUTDOWN_EINT |
WM5100_IM_ASRC2_LOCK_EINT |
WM5100_IM_ASRC1_LOCK_EINT |
WM5100_IM_FLL2_LOCK_EINT |
WM5100_IM_FLL1_LOCK_EINT |
WM5100_CLKGEN_ERR_EINT |
WM5100_CLKGEN_ERR_ASYNC_EINT, 0);
regmap_update_bits(wm5100->regmap,
WM5100_INTERRUPT_STATUS_4_MASK,
WM5100_AIF3_ERR_EINT |
WM5100_AIF2_ERR_EINT |
WM5100_AIF1_ERR_EINT |
WM5100_CTRLIF_ERR_EINT |
WM5100_ISRC2_UNDERCLOCKED_EINT |
WM5100_ISRC1_UNDERCLOCKED_EINT |
WM5100_FX_UNDERCLOCKED_EINT |
WM5100_AIF3_UNDERCLOCKED_EINT |
WM5100_AIF2_UNDERCLOCKED_EINT |
WM5100_AIF1_UNDERCLOCKED_EINT |
WM5100_ASRC_UNDERCLOCKED_EINT |
WM5100_DAC_UNDERCLOCKED_EINT |
WM5100_ADC_UNDERCLOCKED_EINT |
WM5100_MIXER_UNDERCLOCKED_EINT, 0);
}
}
pm_runtime_set_active(&i2c->dev);
pm_runtime_enable(&i2c->dev);
pm_request_idle(&i2c->dev);
ret = devm_snd_soc_register_component(&i2c->dev,
&soc_component_dev_wm5100, wm5100_dai,
ARRAY_SIZE(wm5100_dai));
if (ret < 0) {
dev_err(&i2c->dev, "Failed to register WM5100: %d\n", ret);
goto err_reset;
}
return ret;
err_reset:
if (i2c->irq)
free_irq(i2c->irq, wm5100);
wm5100_free_gpio(i2c);
if (wm5100->pdata.reset) {
gpio_set_value_cansleep(wm5100->pdata.reset, 0);
gpio_free(wm5100->pdata.reset);
}
err_ldo:
if (wm5100->pdata.ldo_ena) {
gpio_set_value_cansleep(wm5100->pdata.ldo_ena, 0);
gpio_free(wm5100->pdata.ldo_ena);
}
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm5100->core_supplies),
wm5100->core_supplies);
err:
return ret;
}
static int wm5100_i2c_remove(struct i2c_client *i2c)
{
struct wm5100_priv *wm5100 = i2c_get_clientdata(i2c);
if (i2c->irq)
free_irq(i2c->irq, wm5100);
wm5100_free_gpio(i2c);
if (wm5100->pdata.reset) {
gpio_set_value_cansleep(wm5100->pdata.reset, 0);
gpio_free(wm5100->pdata.reset);
}
if (wm5100->pdata.ldo_ena) {
gpio_set_value_cansleep(wm5100->pdata.ldo_ena, 0);
gpio_free(wm5100->pdata.ldo_ena);
}
return 0;
}
#ifdef CONFIG_PM
static int wm5100_runtime_suspend(struct device *dev)
{
struct wm5100_priv *wm5100 = dev_get_drvdata(dev);
regcache_cache_only(wm5100->regmap, true);
regcache_mark_dirty(wm5100->regmap);
if (wm5100->pdata.ldo_ena)
gpio_set_value_cansleep(wm5100->pdata.ldo_ena, 0);
regulator_bulk_disable(ARRAY_SIZE(wm5100->core_supplies),
wm5100->core_supplies);
return 0;
}
static int wm5100_runtime_resume(struct device *dev)
{
struct wm5100_priv *wm5100 = dev_get_drvdata(dev);
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(wm5100->core_supplies),
wm5100->core_supplies);
if (ret != 0) {
dev_err(dev, "Failed to enable supplies: %d\n",
ret);
return ret;
}
if (wm5100->pdata.ldo_ena) {
gpio_set_value_cansleep(wm5100->pdata.ldo_ena, 1);
msleep(2);
}
regcache_cache_only(wm5100->regmap, false);
regcache_sync(wm5100->regmap);
return 0;
}
#endif
static const struct dev_pm_ops wm5100_pm = {
SET_RUNTIME_PM_OPS(wm5100_runtime_suspend, wm5100_runtime_resume,
NULL)
};
static const struct i2c_device_id wm5100_i2c_id[] = {
{ "wm5100", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm5100_i2c_id);
static struct i2c_driver wm5100_i2c_driver = {
.driver = {
.name = "wm5100",
.pm = &wm5100_pm,
},
.probe = wm5100_i2c_probe,
.remove = wm5100_i2c_remove,
.id_table = wm5100_i2c_id,
};
module_i2c_driver(wm5100_i2c_driver);
MODULE_DESCRIPTION("ASoC WM5100 driver");
MODULE_AUTHOR("Mark Brown <[email protected]>");
MODULE_LICENSE("GPL");
| {
"pile_set_name": "Github"
} |
---
title: 217 - ClientOperationPrepared
ms.date: 03/30/2017
ms.assetid: ad207f04-b038-4f33-95e9-27a361df8ecd
ms.openlocfilehash: 5979cd8ffe0e05b61af01d2aa98c4a2c63fd432c
ms.sourcegitcommit: 9b552addadfb57fab0b9e7852ed4f1f1b8a42f8e
ms.translationtype: MT
ms.contentlocale: es-ES
ms.lasthandoff: 04/23/2019
ms.locfileid: "61781773"
---
# <a name="217---clientoperationprepared"></a>217 - ClientOperationPrepared
## <a name="properties"></a>Propiedades
|||
|-|-|
|ID|217|
|Palabras clave|Troubleshooting, ServiceModel|
|Nivel|Información|
|Canal|Microsoft-Windows-Application Server-Applications/Analytic|
## <a name="description"></a>Descripción
Los clientes emiten este evento solo antes de que se envíe una operación al servicio.
## <a name="message"></a>Mensaje
El cliente ejecuta la acción '%1' asociada al contrato '%2'. El mensaje se enviará a '%3'.
## <a name="details"></a>Detalles
|Nombre del elemento de datos|Tipo del elemento de datos|Descripción|
|--------------------|--------------------|-----------------|
|Acción|`xs:string`|El encabezado de acción de SOAP del mensaje saliente.|
|Contract Name|`xs:string`|El nombre del contrato. Ejemplo: ICalculator.|
|Destino|`xs:string`|La dirección del extremo de servicio a la que se ha enviado el mensaje.|
|HostReference|`xs:string`|En el caso de los servicios hospedados en web, este campo identifica de manera única el servicio en la jerarquía web. Su formato se define como ' ruta de acceso Virtual de sitio Web de nombre de la aplicación|ruta de acceso Virtual del servicio|ServiceName ". Ejemplo: ' Default Web Site/CalculatorApplication|/CalculatorService.svc|CalculatorService'.|
|AppDomain|`xs:string`|La cadena devuelta por AppDomain.CurrentDomain.FriendlyName.|
| {
"pile_set_name": "Github"
} |
/* Copyright 2010-present MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace MongoDB.Bson.Serialization.Serializers
{
/// <summary>
/// Represents a serializer for LazyBsonDocuments.
/// </summary>
public class LazyBsonDocumentSerializer : BsonValueSerializerBase<LazyBsonDocument>
{
// constructors
/// <summary>
/// Initializes a new instance of the <see cref="LazyBsonDocumentSerializer"/> class.
/// </summary>
public LazyBsonDocumentSerializer()
: base(BsonType.Document)
{
}
// protected methods
/// <summary>
/// Deserializes a value.
/// </summary>
/// <param name="context">The deserialization context.</param>
/// <param name="args">The deserialization args.</param>
/// <returns>A deserialized value.</returns>
protected override LazyBsonDocument DeserializeValue(BsonDeserializationContext context, BsonDeserializationArgs args)
{
var bsonReader = context.Reader;
var slice = bsonReader.ReadRawBsonDocument();
return new LazyBsonDocument(slice);
}
/// <summary>
/// Serializes a value.
/// </summary>
/// <param name="context">The serialization context.</param>
/// <param name="args">The serialization args.</param>
/// <param name="value">The object.</param>
protected override void SerializeValue(BsonSerializationContext context, BsonSerializationArgs args, LazyBsonDocument value)
{
var bsonWriter = context.Writer;
var slice = value.Slice;
if (slice == null)
{
BsonDocumentSerializer.Instance.Serialize(context, value);
}
else
{
bsonWriter.WriteRawBsonDocument(slice);
}
}
}
}
| {
"pile_set_name": "Github"
} |
Shindo.tests("Excon redirecting with cookie preserved") do
env_init
with_rackup('redirecting_with_cookie.ru') do
tests('second request will send cookies set by the first').returns('ok') do
Excon.get(
'http://127.0.0.1:9292',
:path => '/sets_cookie',
:middlewares => Excon.defaults[:middlewares] + [Excon::Middleware::CaptureCookies, Excon::Middleware::RedirectFollower]
).body
end
tests('second request will send multiple cookies set by the first').returns('ok') do
Excon.get(
'http://127.0.0.1:9292',
:path => '/sets_multi_cookie',
:middlewares => Excon.defaults[:middlewares] + [Excon::Middleware::CaptureCookies, Excon::Middleware::RedirectFollower]
).body
end
end
with_rackup('redirecting.ru') do
tests("runs normally when there are no cookies set").returns('ok') do
Excon.post(
'http://127.0.0.1:9292',
:path => '/first',
:middlewares => Excon.defaults[:middlewares] + [Excon::Middleware::CaptureCookies, Excon::Middleware::RedirectFollower],
:body => "a=Some_content"
).body
end
end
env_restore
end
| {
"pile_set_name": "Github"
} |
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
import (
"math/big"
)
func bigFromBase10(s string) *big.Int {
n, _ := new(big.Int).SetString(s, 10)
return n
}
// u is the BN parameter that determines the prime: 1868033³.
var u = bigFromBase10("6518589491078791937")
// p is a prime over which we form a basic field: 36u⁴+36u³+24u³+6u+1.
var p = bigFromBase10("65000549695646603732796438742359905742825358107623003571877145026864184071783")
// Order is the number of elements in both G₁ and G₂: 36u⁴+36u³+18u³+6u+1.
var Order = bigFromBase10("65000549695646603732796438742359905742570406053903786389881062969044166799969")
// xiToPMinus1Over6 is ξ^((p-1)/6) where ξ = i+3.
var xiToPMinus1Over6 = &gfP2{bigFromBase10("8669379979083712429711189836753509758585994370025260553045152614783263110636"), bigFromBase10("19998038925833620163537568958541907098007303196759855091367510456613536016040")}
// xiToPMinus1Over3 is ξ^((p-1)/3) where ξ = i+3.
var xiToPMinus1Over3 = &gfP2{bigFromBase10("26098034838977895781559542626833399156321265654106457577426020397262786167059"), bigFromBase10("15931493369629630809226283458085260090334794394361662678240713231519278691715")}
// xiToPMinus1Over2 is ξ^((p-1)/2) where ξ = i+3.
var xiToPMinus1Over2 = &gfP2{bigFromBase10("50997318142241922852281555961173165965672272825141804376761836765206060036244"), bigFromBase10("38665955945962842195025998234511023902832543644254935982879660597356748036009")}
// xiToPSquaredMinus1Over3 is ξ^((p²-1)/3) where ξ = i+3.
var xiToPSquaredMinus1Over3 = bigFromBase10("65000549695646603727810655408050771481677621702948236658134783353303381437752")
// xiTo2PSquaredMinus2Over3 is ξ^((2p²-2)/3) where ξ = i+3 (a cubic root of unity, mod p).
var xiTo2PSquaredMinus2Over3 = bigFromBase10("4985783334309134261147736404674766913742361673560802634030")
// xiToPSquaredMinus1Over6 is ξ^((1p²-1)/6) where ξ = i+3 (a cubic root of -1, mod p).
var xiToPSquaredMinus1Over6 = bigFromBase10("65000549695646603727810655408050771481677621702948236658134783353303381437753")
// xiTo2PMinus2Over3 is ξ^((2p-2)/3) where ξ = i+3.
var xiTo2PMinus2Over3 = &gfP2{bigFromBase10("19885131339612776214803633203834694332692106372356013117629940868870585019582"), bigFromBase10("21645619881471562101905880913352894726728173167203616652430647841922248593627")}
| {
"pile_set_name": "Github"
} |
# libXrandr.la - a libtool library file
# Generated by libtool (GNU libtool) 2.4.6
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# The name that we can dlopen(3).
dlname='libXrandr.so.2'
# Names of this library.
library_names='libXrandr.so.2.2.0 libXrandr.so.2 libXrandr.so'
# The name of the static archive.
old_library=''
# Linker flags that cannot go in dependency_libs.
inherited_linker_flags=''
# Libraries that this one depends upon.
dependency_libs=' =/usr/lib/libXext.la =/usr/lib/libXrender.la =/usr/lib/libX11.la =/usr/lib/libX11.la =/usr/lib/libxcb.la =/usr/lib/libxcb.la =/usr/lib/libXau.la =/usr/lib/libXdmcp.la =/usr/lib/libXau.la =/usr/lib/libXdmcp.la -ldl'
# Names of additional weak libraries provided by this library
weak_library_names=''
# Version information for libXrandr.
current=4
age=2
revision=0
# Is this an already installed library?
installed=yes
# Should we warn about portability when linking against -modules?
shouldnotlink=no
# Files to dlopen/dlpreopen
dlopen=''
dlpreopen=''
# Directory that this library needs to be installed in:
libdir='/usr/lib'
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.