content
stringlengths
10
4.9M
/* * Fetches a Dataset Policy by the ID * * */ public DatasetPolicy fetchDatasetPolicyById(Integer id, String... dependencies) { log.debug("fetchDatasetPolicyById: id="+id); HibernateSessionFactory.getInstance().getCurrentSession().beginTransaction(); DatasetPolicyDAO dao = daoFactory.getDatasetPolicyDAO(); DatasetPolicy result = dao.fetchById(id, dependencies); HibernateSessionFactory.getInstance().getCurrentSession().getTransaction().commit(); return result; }
/** * Web config for the Spring Connector. */ @Configuration @ConditionalOnProperty(prefix = INTERLEDGER_SPSP_SERVER_PARENT_ACCOUNT, name = LINK_TYPE, havingValue = IlpOverHttpLink.LINK_TYPE_STRING) @EnableWebMvc @ComponentScan(basePackages = "org.interledger.spsp.server.controllers") @Import({SecurityConfiguration.class}) public class SpringSpspServerWebMvc implements WebMvcConfigurer { // TODO: Configure TLS // TODO: Configure HTTP/2 @Autowired @Qualifier(CodecContextConfig.ILP) private CodecContext ilpCodecContext; @Autowired private ObjectMapper objectMapper; //////////////////////// // HttpMessageConverters //////////////////////// @Bean OerPreparePacketHttpMessageConverter oerPreparePacketHttpMessageConverter() { return new OerPreparePacketHttpMessageConverter(ilpCodecContext); } @Override public void configureMessageConverters(List<HttpMessageConverter<?>> converters) { // For any byte[] payloads (e.g., `/settlements`) ByteArrayHttpMessageConverter octetStreamConverter = new ByteArrayHttpMessageConverter(); octetStreamConverter.setSupportedMediaTypes(Lists.newArrayList(APPLICATION_OCTET_STREAM)); converters.add(octetStreamConverter); converters.add(constructProblemsJsonConverter()); // For ProblemsJson only. converters.add(new MappingJackson2HttpMessageConverter(objectMapper)); // For any JSON payloads. converters.add(oerPreparePacketHttpMessageConverter()); } @Override public void extendMessageConverters(List<HttpMessageConverter<?>> converters) { converters.replaceAll(messageConverter -> { if (messageConverter instanceof MappingJackson2HttpMessageConverter) { // in `configureMessageConverters`, there is at least one extra MessageConverter that is used specifically to // serialize Problems to JSON with non-String numbers (e.g., the status code). In that case, we don't want to // replace the message converter because we want it to use the custom ObjectMapper that it was configured // with. if (((MappingJackson2HttpMessageConverter) messageConverter).getObjectMapper().getRegisteredModuleIds() .contains(ProblemModule.class.getName())) { return messageConverter; } // Necessary to make sure the correct ObjectMapper is used in all Jackson Message Converters. return new MappingJackson2HttpMessageConverter(objectMapper); } else { return messageConverter; } }); } @Override public void addFormatters(FormatterRegistry registry) { // registry.addConverter(rateLimitSettingsEntityConverter); // registry.addConverter(accountBalanceSettingsEntityConverter); // registry.addConverter(settlementEngineDetailsEntityConverter); // registry.addConverter(accountSettingsConverter); // registry.addConverter(fxRateOverrideEntityConverter); // registry.addConverter(staticRouteEntityConverter); } @VisibleForTesting protected MappingJackson2HttpMessageConverter constructProblemsJsonConverter() { final ObjectMapper objectMapper = ObjectMapperFactory.createObjectMapperForProblemsJson(); final MappingJackson2HttpMessageConverter problemsJsonConverter = new MappingJackson2HttpMessageConverter(objectMapper); problemsJsonConverter.setSupportedMediaTypes(Lists.newArrayList(MediaTypes.PROBLEM, MediaTypes.X_PROBLEM)); return problemsJsonConverter; } }
// LoadApidata loads data from Warframe.com api func LoadApidata(id1 string, id2 int) (ret []byte) { client := &http.Client{} url := "http://content.warframe.com/dynamic/worldState.php" if id1 != "pc" { url = "http://content." + id1 + ".warframe.com/dynamic/worldState.php" } req, _ := http.NewRequest("GET", url, nil) res, err := client.Do(req) if err != nil { fmt.Println("Errored when sending request to the server") return } defer res.Body.Close() body, _ := ioutil.ReadAll(res.Body) _, _ = io.Copy(ioutil.Discard, res.Body) Apidata[id2] = body[:] return }
<reponame>DataDog/go-cfclient package cfclient // Code generated by go generate. DO NOT EDIT. // This file was generated by robots at // 2020-10-26 14:27:51.820693 -0600 MDT m=+1.372772192 import "github.com/pkg/errors" // IsInvalidAuthTokenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 1000 // - HTTP code: 401 // - message: "Invalid Auth Token" func IsInvalidAuthTokenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 1000 } // IsMessageParseError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 1001 // - HTTP code: 400 // - message: "Request invalid due to parse error: %s" func IsMessageParseError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 1001 } // IsInvalidRelationError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 1002 // - HTTP code: 400 // - message: "%s" func IsInvalidRelationError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 1002 } // IsInvalidContentTypeError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 1003 // - HTTP code: 400 // - message: "Invalid content type, expected: %s" func IsInvalidContentTypeError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 1003 } // IsBadRequestError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 1004 // - HTTP code: 400 // - message: "Bad request: %s" func IsBadRequestError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 1004 } // IsNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10000 // - HTTP code: 404 // - message: "Unknown request" func IsNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10000 } // IsServerError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10001 // - HTTP code: 500 // - message: "Server error" func IsServerError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10001 } // IsNotAuthenticatedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10002 // - HTTP code: 401 // - message: "Authentication error" func IsNotAuthenticatedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10002 } // IsNotAuthorizedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10003 // - HTTP code: 403 // - message: "You are not authorized to perform the requested action" func IsNotAuthorizedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10003 } // IsInvalidRequestError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10004 // - HTTP code: 400 // - message: "The request is invalid" func IsInvalidRequestError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10004 } // IsBadQueryParameterError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10005 // - HTTP code: 400 // - message: "The query parameter is invalid: %s" func IsBadQueryParameterError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10005 } // IsAssociationNotEmptyError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10006 // - HTTP code: 400 // - message: "Please delete the %s associations for your %s." func IsAssociationNotEmptyError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10006 } // IsInsufficientScopeError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10007 // - HTTP code: 403 // - message: "Your token lacks the necessary scopes to access this resource." func IsInsufficientScopeError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10007 } // IsUnprocessableEntityError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10008 // - HTTP code: 422 // - message: "%s" func IsUnprocessableEntityError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10008 } // IsUnableToPerformError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10009 // - HTTP code: 400 // - message: "%s could not be completed: %s" func IsUnableToPerformError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10009 } // IsResourceNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10010 // - HTTP code: 404 // - message: "%s" func IsResourceNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10010 } // IsDatabaseError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10011 // - HTTP code: 500 // - message: "Database error" func IsDatabaseError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10011 } // IsOrderByParameterInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10012 // - HTTP code: 500 // - message: "Cannot order by: %s" func IsOrderByParameterInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10012 } // IsRateLimitExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10013 // - HTTP code: 429 // - message: "Rate Limit Exceeded" func IsRateLimitExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10013 } // IsIPBasedRateLimitExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10014 // - HTTP code: 429 // - message: "Rate Limit Exceeded: Unauthenticated requests from this IP address have exceeded the limit. Please log in." func IsIPBasedRateLimitExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10014 } // IsServiceUnavailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 10015 // - HTTP code: 503 // - message: "%s" func IsServiceUnavailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 10015 } // IsUserInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 20001 // - HTTP code: 400 // - message: "The user info is invalid: %s" func IsUserInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 20001 } // IsUaaIdTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 20002 // - HTTP code: 400 // - message: "The UAA ID is taken: %s" func IsUaaIdTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 20002 } // IsUserNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 20003 // - HTTP code: 404 // - message: "The user could not be found: %s" func IsUserNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 20003 } // IsUaaUnavailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 20004 // - HTTP code: 503 // - message: "The UAA service is currently unavailable" func IsUaaUnavailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 20004 } // IsUaaEndpointDisabledError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 20005 // - HTTP code: 501 // - message: "The UAA endpoint needed is disabled" func IsUaaEndpointDisabledError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 20005 } // IsUserIsInMultipleOriginsError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 20006 // - HTTP code: 400 // - message: "The user exists in multiple origins. Specify an origin for the requested user from: %s" func IsUserIsInMultipleOriginsError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 20006 } // IsUserWithOriginNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 20007 // - HTTP code: 404 // - message: "The user could not be found, %s" func IsUserWithOriginNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 20007 } // IsOutOfRouterGroupPortsError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 21008 // - HTTP code: 403 // - message: "There are no more ports available for router group: %s. Please contact your administrator for more information." func IsOutOfRouterGroupPortsError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 21008 } // IsOrganizationInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 30001 // - HTTP code: 400 // - message: "The organization info is invalid: %s" func IsOrganizationInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 30001 } // IsOrganizationNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 30002 // - HTTP code: 400 // - message: "The organization name is taken: %s" func IsOrganizationNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 30002 } // IsOrganizationNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 30003 // - HTTP code: 404 // - message: "The organization could not be found: %s" func IsOrganizationNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 30003 } // IsLastManagerInOrgError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 30004 // - HTTP code: 403 // - message: "Cannot remove last Org Manager in org" func IsLastManagerInOrgError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 30004 } // IsLastBillingManagerInOrgError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 30005 // - HTTP code: 403 // - message: "Cannot remove last Billing Manager in org" func IsLastBillingManagerInOrgError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 30005 } // IsLastUserInOrgError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 30006 // - HTTP code: 403 // - message: "Cannot remove last User in org" func IsLastUserInOrgError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 30006 } // IsOrganizationAlreadySetError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 30007 // - HTTP code: 400 // - message: "Cannot change organization" func IsOrganizationAlreadySetError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 30007 } // IsSpaceInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 40001 // - HTTP code: 400 // - message: "The app space info is invalid: %s" func IsSpaceInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 40001 } // IsSpaceNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 40002 // - HTTP code: 400 // - message: "The app space name is taken: %s" func IsSpaceNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 40002 } // IsSpaceUserNotInOrgError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 40003 // - HTTP code: 400 // - message: "The app space and the user are not in the same org: %s" func IsSpaceUserNotInOrgError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 40003 } // IsSpaceNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 40004 // - HTTP code: 404 // - message: "The app space could not be found: %s" func IsSpaceNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 40004 } // IsServiceInstanceNameEmptyError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60001 // - HTTP code: 400 // - message: "Service instance name is required." func IsServiceInstanceNameEmptyError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60001 } // IsServiceInstanceNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60002 // - HTTP code: 400 // - message: "The service instance name is taken: %s" func IsServiceInstanceNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60002 } // IsServiceInstanceInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60003 // - HTTP code: 400 // - message: "The service instance is invalid: %s" func IsServiceInstanceInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60003 } // IsServiceInstanceNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60004 // - HTTP code: 404 // - message: "The service instance could not be found: %s" func IsServiceInstanceNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60004 } // IsServiceInstanceQuotaExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60005 // - HTTP code: 400 // - message: "You have exceeded your organization's services limit." func IsServiceInstanceQuotaExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60005 } // IsPreviouslyUsedAs_ServiceInstancePaidQuotaExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60006 // - HTTP code: 400 // - message: "You have exceeded your organization's services limit." func IsPreviouslyUsedAs_ServiceInstancePaidQuotaExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60006 } // IsServiceInstanceServicePlanNotAllowedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60007 // - HTTP code: 400 // - message: "The service instance cannot be created because paid service plans are not allowed." func IsServiceInstanceServicePlanNotAllowedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60007 } // IsServiceInstanceDuplicateNotAllowedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60008 // - HTTP code: 400 // - message: "An instance of this service is already present in this space. Some services only support one instance per space." func IsServiceInstanceDuplicateNotAllowedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60008 } // IsServiceInstanceNameTooLongError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60009 // - HTTP code: 400 // - message: "You have requested an invalid service instance name. Names are limited to 50 characters." func IsServiceInstanceNameTooLongError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60009 } // IsServiceInstanceOrganizationNotAuthorizedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60010 // - HTTP code: 403 // - message: "A service instance for the selected plan cannot be created in this organization. The plan is visible because another organization you belong to has access to it." func IsServiceInstanceOrganizationNotAuthorizedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60010 } // IsServiceInstanceDeprovisionFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60011 // - HTTP code: 409 // - message: "The service broker reported an error during deprovisioning: %s" func IsServiceInstanceDeprovisionFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60011 } // IsServiceInstanceSpaceQuotaExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60012 // - HTTP code: 400 // - message: "You have exceeded your space's services limit." func IsServiceInstanceSpaceQuotaExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60012 } // IsServiceInstanceServicePlanNotAllowedBySpaceQuotaError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60013 // - HTTP code: 400 // - message: "The service instance cannot be created because paid service plans are not allowed for your space." func IsServiceInstanceServicePlanNotAllowedBySpaceQuotaError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60013 } // IsServiceInstanceSpaceChangeNotAllowedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60014 // - HTTP code: 400 // - message: "Cannot update space for service instance." func IsServiceInstanceSpaceChangeNotAllowedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60014 } // IsServiceInstanceTagsTooLongError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60015 // - HTTP code: 400 // - message: "Combined length of tags for service %s must be 2048 characters or less." func IsServiceInstanceTagsTooLongError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60015 } // IsAsyncServiceInstanceOperationInProgressError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60016 // - HTTP code: 409 // - message: "An operation for service instance %s is in progress." func IsAsyncServiceInstanceOperationInProgressError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60016 } // IsServiceInstanceRouteBindingSpaceMismatchError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60017 // - HTTP code: 400 // - message: "The service instance and the route are in different spaces." func IsServiceInstanceRouteBindingSpaceMismatchError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60017 } // IsServiceInstanceSpaceNotAuthorizedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60018 // - HTTP code: 403 // - message: "A service instance for the selected plan cannot be created in this space." func IsServiceInstanceSpaceNotAuthorizedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60018 } // IsServiceInstanceRouteServiceURLInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60019 // - HTTP code: 400 // - message: "The route service URL is invalid: %s" func IsServiceInstanceRouteServiceURLInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60019 } // IsServiceInstanceRouteServiceRequiresDiegoError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60020 // - HTTP code: 400 // - message: "Route services are only supported for apps on Diego. Unbind the service instance from the route or enable Diego for the app." func IsServiceInstanceRouteServiceRequiresDiegoError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60020 } // IsServiceInstanceRouteServiceDisabledError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60021 // - HTTP code: 403 // - message: "Support for route services is disabled" func IsServiceInstanceRouteServiceDisabledError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60021 } // IsAppPortMappingRequiresDiegoError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60022 // - HTTP code: 400 // - message: "App ports are supported for Diego apps only." func IsAppPortMappingRequiresDiegoError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60022 } // IsRoutePortNotEnabledOnAppError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60023 // - HTTP code: 400 // - message: "Routes can only be mapped to ports already enabled for the application." func IsRoutePortNotEnabledOnAppError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60023 } // IsMultipleAppPortsMappedDiegoToDeaError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60024 // - HTTP code: 400 // - message: "The app has routes mapped to multiple ports. Multiple ports are supported for Diego only. Please unmap routes from all but one app port. Multiple routes can be mapped to the same port if desired." func IsMultipleAppPortsMappedDiegoToDeaError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60024 } // IsVolumeMountServiceDisabledError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60025 // - HTTP code: 403 // - message: "Support for volume mount services is disabled" func IsVolumeMountServiceDisabledError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60025 } // IsDockerAppToDeaError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60026 // - HTTP code: 400 // - message: "Docker apps cannot run on DEAs" func IsDockerAppToDeaError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60026 } // IsServiceInstanceRecursiveDeleteFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60027 // - HTTP code: 502 // - message: "Deletion of service instance %s failed because one or more associated resources could not be deleted.\n\n%s" func IsServiceInstanceRecursiveDeleteFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60027 } // IsManagedServiceInstanceNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60028 // - HTTP code: 404 // - message: "The service instance could not be found: %s" func IsManagedServiceInstanceNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60028 } // IsServiceInstanceWithInaccessiblePlanNotUpdateableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60029 // - HTTP code: 403 // - message: "Cannot update %s of a service instance that belongs to inaccessible plan" func IsServiceInstanceWithInaccessiblePlanNotUpdateableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60029 } // IsServiceInstanceProvisionFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 60030 // - HTTP code: 400 // - message: "The service broker reported an error during provisioning: %s" func IsServiceInstanceProvisionFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 60030 } // IsRuntimeInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 70001 // - HTTP code: 400 // - message: "The runtime is invalid: %s" func IsRuntimeInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 70001 } // IsRuntimeNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 70002 // - HTTP code: 400 // - message: "The runtime name is taken: %s" func IsRuntimeNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 70002 } // IsRuntimeNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 70003 // - HTTP code: 404 // - message: "The runtime could not be found: %s" func IsRuntimeNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 70003 } // IsFrameworkInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 80001 // - HTTP code: 400 // - message: "The framework is invalid: %s" func IsFrameworkInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 80001 } // IsFrameworkNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 80002 // - HTTP code: 400 // - message: "The framework name is taken: %s" func IsFrameworkNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 80002 } // IsFrameworkNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 80003 // - HTTP code: 404 // - message: "The framework could not be found: %s" func IsFrameworkNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 80003 } // IsServiceBindingInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 90001 // - HTTP code: 400 // - message: "The service binding is invalid: %s" func IsServiceBindingInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 90001 } // IsServiceBindingDifferentSpacesError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 90002 // - HTTP code: 400 // - message: "The app and the service are not in the same app space: %s" func IsServiceBindingDifferentSpacesError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 90002 } // IsServiceBindingAppServiceTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 90003 // - HTTP code: 400 // - message: "%s" func IsServiceBindingAppServiceTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 90003 } // IsServiceBindingNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 90004 // - HTTP code: 404 // - message: "The service binding could not be found: %s" func IsServiceBindingNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 90004 } // IsUnbindableServiceError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 90005 // - HTTP code: 400 // - message: "The service instance doesn't support binding." func IsUnbindableServiceError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 90005 } // IsInvalidLoggingServiceBindingError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 90006 // - HTTP code: 502 // - message: "The service is attempting to stream logs from your application, but is not registered as a logging service. Please contact the service provider." func IsInvalidLoggingServiceBindingError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 90006 } // IsServiceFetchBindingParametersNotSupportedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 90007 // - HTTP code: 400 // - message: "This service does not support fetching service binding parameters." func IsServiceFetchBindingParametersNotSupportedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 90007 } // IsAsyncServiceBindingOperationInProgressError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 90008 // - HTTP code: 409 // - message: "An operation for the service binding between app %s and service instance %s is in progress." func IsAsyncServiceBindingOperationInProgressError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 90008 } // IsAppInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 100001 // - HTTP code: 400 // - message: "The app is invalid: %s" func IsAppInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 100001 } // IsAppNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 100002 // - HTTP code: 400 // - message: "The app name is taken: %s" func IsAppNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 100002 } // IsAppNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 100004 // - HTTP code: 404 // - message: "The app could not be found: %s" func IsAppNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 100004 } // IsAppMemoryQuotaExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 100005 // - HTTP code: 400 // - message: "You have exceeded your organization's memory limit: %s" func IsAppMemoryQuotaExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 100005 } // IsAppMemoryInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 100006 // - HTTP code: 400 // - message: "You have specified an invalid amount of memory for your application." func IsAppMemoryInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 100006 } // IsQuotaInstanceMemoryLimitExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 100007 // - HTTP code: 400 // - message: "You have exceeded the instance memory limit for your organization's quota." func IsQuotaInstanceMemoryLimitExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 100007 } // IsQuotaInstanceLimitExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 100008 // - HTTP code: 400 // - message: "You have exceeded the instance limit for your organization's quota." func IsQuotaInstanceLimitExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 100008 } // IsAppMemoryInsufficientForSidecarsError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 100009 // - HTTP code: 400 // - message: "The requested memory allocation is not large enough to run all of your sidecar processes." func IsAppMemoryInsufficientForSidecarsError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 100009 } // IsServicePlanInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 110001 // - HTTP code: 400 // - message: "The service plan is invalid: %s" func IsServicePlanInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 110001 } // IsServicePlanNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 110002 // - HTTP code: 400 // - message: "The service plan name is taken: %s" func IsServicePlanNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 110002 } // IsServicePlanNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 110003 // - HTTP code: 404 // - message: "The service plan could not be found: %s" func IsServicePlanNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 110003 } // IsServicePlanNotUpdateableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 110004 // - HTTP code: 400 // - message: "The service does not support changing plans." func IsServicePlanNotUpdateableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 110004 } // IsServiceInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 120001 // - HTTP code: 400 // - message: "The service is invalid: %s" func IsServiceInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 120001 } // IsServiceLabelTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 120002 // - HTTP code: 400 // - message: "The service label is taken: %s" func IsServiceLabelTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 120002 } // IsServiceNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 120003 // - HTTP code: 404 // - message: "The service could not be found: %s" func IsServiceNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 120003 } // IsServiceFetchInstanceParametersNotSupportedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 120004 // - HTTP code: 400 // - message: "This service does not support fetching service instance parameters." func IsServiceFetchInstanceParametersNotSupportedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 120004 } // IsDomainInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 130001 // - HTTP code: 400 // - message: "The domain is invalid: %s" func IsDomainInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 130001 } // IsDomainNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 130002 // - HTTP code: 404 // - message: "The domain could not be found: %s" func IsDomainNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 130002 } // IsDomainNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 130003 // - HTTP code: 400 // - message: "The domain name is taken: %s" func IsDomainNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 130003 } // IsPathInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 130004 // - HTTP code: 400 // - message: "The path is invalid: %s" func IsPathInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 130004 } // IsTotalPrivateDomainsExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 130005 // - HTTP code: 400 // - message: "The number of private domains exceeds the quota for organization: %s" func IsTotalPrivateDomainsExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 130005 } // IsServiceDoesNotSupportRoutesError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 130006 // - HTTP code: 400 // - message: "This service does not support route binding." func IsServiceDoesNotSupportRoutesError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 130006 } // IsRouteAlreadyBoundToServiceInstanceError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 130007 // - HTTP code: 400 // - message: "A route may only be bound to a single service instance" func IsRouteAlreadyBoundToServiceInstanceError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 130007 } // IsServiceInstanceAlreadyBoundToSameRouteError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 130008 // - HTTP code: 400 // - message: "The route and service instance are already bound." func IsServiceInstanceAlreadyBoundToSameRouteError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 130008 } // IsInternalDomainCannotBeDeletedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 130009 // - HTTP code: 422 // - message: "The domain '%s' cannot be deleted. It is reserved by the platform." func IsInternalDomainCannotBeDeletedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 130009 } // IsRouteServiceCannotBeBoundToInternalRouteError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 130010 // - HTTP code: 400 // - message: "Route services cannot be bound to internal routes." func IsRouteServiceCannotBeBoundToInternalRouteError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 130010 } // IsLegacyApiWithoutDefaultSpaceError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 140001 // - HTTP code: 400 // - message: "A legacy api call requiring a default app space was called, but no default app space is set for the user." func IsLegacyApiWithoutDefaultSpaceError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 140001 } // IsAppPackageInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 150001 // - HTTP code: 400 // - message: "The app package is invalid: %s" func IsAppPackageInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 150001 } // IsAppPackageNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 150002 // - HTTP code: 404 // - message: "The app package could not be found: %s" func IsAppPackageNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 150002 } // IsInsufficientRunningResourcesAvailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 150003 // - HTTP code: 503 // - message: "One or more instances could not be started because of insufficient running resources." func IsInsufficientRunningResourcesAvailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 150003 } // IsPackageBitsAlreadyUploadedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 150004 // - HTTP code: 400 // - message: "Bits may be uploaded only once. Create a new package to upload different bits." func IsPackageBitsAlreadyUploadedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 150004 } // IsBlobstoreNotLocalError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 150005 // - HTTP code: 400 // - message: "Downloading blobs can only be done directly to the blobstore." func IsBlobstoreNotLocalError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 150005 } // IsBlobstoreUnavailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 150006 // - HTTP code: 502 // - message: "Failed to perform operation due to blobstore unavailability." func IsBlobstoreUnavailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 150006 } // IsBlobstoreError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 150007 // - HTTP code: 500 // - message: "Failed to perform blobstore operation after three retries." func IsBlobstoreError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 150007 } // IsDockerImageMissingError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 150008 // - HTTP code: 400 // - message: "Docker credentials can only be supplied for apps with a 'docker_image'" func IsDockerImageMissingError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 150008 } // IsAppRecursiveDeleteFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 150009 // - HTTP code: 502 // - message: "Deletion of app %s failed because one or more associated resources could not be deleted.\n\n%s" func IsAppRecursiveDeleteFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 150009 } // IsAppBitsUploadInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 160001 // - HTTP code: 400 // - message: "The app upload is invalid: %s" func IsAppBitsUploadInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 160001 } // IsAppBitsCopyInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 160002 // - HTTP code: 400 // - message: "The app copy is invalid: %s" func IsAppBitsCopyInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 160002 } // IsAppResourcesFileModeInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 160003 // - HTTP code: 400 // - message: "The resource file mode is invalid: %s" func IsAppResourcesFileModeInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 160003 } // IsAppResourcesFilePathInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 160004 // - HTTP code: 400 // - message: "The resource file path is invalid: %s" func IsAppResourcesFilePathInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 160004 } // IsStagingError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170001 // - HTTP code: 400 // - message: "Staging error: %s" func IsStagingError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170001 } // IsNotStagedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170002 // - HTTP code: 400 // - message: "App has not finished staging" func IsNotStagedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170002 } // IsNoAppDetectedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170003 // - HTTP code: 400 // - message: "An app was not successfully detected by any available buildpack" func IsNoAppDetectedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170003 } // IsBuildpackCompileFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170004 // - HTTP code: 400 // - message: "App staging failed in the buildpack compile phase" func IsBuildpackCompileFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170004 } // IsBuildpackReleaseFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170005 // - HTTP code: 400 // - message: "App staging failed in the buildpack release phase" func IsBuildpackReleaseFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170005 } // IsNoBuildpacksFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170006 // - HTTP code: 400 // - message: "There are no buildpacks available" func IsNoBuildpacksFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170006 } // IsStagingTimeExpiredError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170007 // - HTTP code: 504 // - message: "Staging time expired: %s" func IsStagingTimeExpiredError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170007 } // IsInsufficientResourcesError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170008 // - HTTP code: 400 // - message: "Insufficient resources" func IsInsufficientResourcesError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170008 } // IsNoCompatibleCellError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170009 // - HTTP code: 400 // - message: "Found no compatible cell" func IsNoCompatibleCellError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170009 } // IsStagerUnavailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170010 // - HTTP code: 503 // - message: "Stager is unavailable: %s" func IsStagerUnavailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170010 } // IsStagerError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170011 // - HTTP code: 500 // - message: "Stager error: %s" func IsStagerError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170011 } // IsRunnerInvalidRequestError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170014 // - HTTP code: 500 // - message: "Runner invalid request: %s" func IsRunnerInvalidRequestError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170014 } // IsRunnerUnavailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170015 // - HTTP code: 503 // - message: "Runner is unavailable: %s" func IsRunnerUnavailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170015 } // IsRunnerError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170016 // - HTTP code: 500 // - message: "Runner error: %s" func IsRunnerError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170016 } // IsStagingInProgressError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170017 // - HTTP code: 422 // - message: "Only one build can be STAGING at a time per application." func IsStagingInProgressError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170017 } // IsInvalidTaskAddressError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170018 // - HTTP code: 500 // - message: "Invalid config: %s" func IsInvalidTaskAddressError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170018 } // IsTaskError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170019 // - HTTP code: 500 // - message: "Task failed: %s" func IsTaskError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170019 } // IsTaskWorkersUnavailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170020 // - HTTP code: 503 // - message: "Task workers are unavailable: %s" func IsTaskWorkersUnavailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170020 } // IsInvalidTaskRequestError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 170021 // - HTTP code: 422 // - message: "The task request is invalid: %s" func IsInvalidTaskRequestError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 170021 } // IsServiceGatewayError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 180002 // - HTTP code: 503 // - message: "Service gateway internal error: %s" func IsServiceGatewayError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 180002 } // IsServiceNotImplementedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 180003 // - HTTP code: 501 // - message: "Operation not supported for service" func IsServiceNotImplementedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 180003 } // IsSDSNotAvailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 180004 // - HTTP code: 501 // - message: "No serialization service backends available" func IsSDSNotAvailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 180004 } // IsFileError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 190001 // - HTTP code: 400 // - message: "File error: %s" func IsFileError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 190001 } // IsStatsError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 200001 // - HTTP code: 400 // - message: "Stats error: %s" func IsStatsError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 200001 } // IsStatsUnavailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 200002 // - HTTP code: 503 // - message: "Stats unavailable: %s" func IsStatsUnavailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 200002 } // IsAppStoppedStatsError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 200003 // - HTTP code: 400 // - message: "Could not fetch stats for stopped app: %s" func IsAppStoppedStatsError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 200003 } // IsRouteInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 210001 // - HTTP code: 400 // - message: "The route is invalid: %s" func IsRouteInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 210001 } // IsRouteNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 210002 // - HTTP code: 404 // - message: "The route could not be found: %s" func IsRouteNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 210002 } // IsRouteHostTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 210003 // - HTTP code: 400 // - message: "The host is taken: %s" func IsRouteHostTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 210003 } // IsRoutePathTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 210004 // - HTTP code: 400 // - message: "The path is taken: %s" func IsRoutePathTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 210004 } // IsRoutePortTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 210005 // - HTTP code: 400 // - message: "The port is taken: %s" func IsRoutePortTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 210005 } // IsRouteMappingTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 210006 // - HTTP code: 400 // - message: "The route mapping is taken: %s" func IsRouteMappingTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 210006 } // IsRouteMappingNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 210007 // - HTTP code: 404 // - message: "The route mapping could not be found: %s" func IsRouteMappingNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 210007 } // IsRouterGroupNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 210009 // - HTTP code: 404 // - message: "The router group could not be found: %s" func IsRouterGroupNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 210009 } // IsInstancesError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 220001 // - HTTP code: 400 // - message: "Instances error: %s" func IsInstancesError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 220001 } // IsInstancesUnavailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 220002 // - HTTP code: 503 // - message: "Instances information unavailable: %s" func IsInstancesUnavailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 220002 } // IsEventNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 230002 // - HTTP code: 404 // - message: "Event could not be found: %s" func IsEventNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 230002 } // IsQuotaDefinitionNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 240001 // - HTTP code: 404 // - message: "Quota Definition could not be found: %s" func IsQuotaDefinitionNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 240001 } // IsQuotaDefinitionNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 240002 // - HTTP code: 400 // - message: "Quota Definition is taken: %s" func IsQuotaDefinitionNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 240002 } // IsQuotaDefinitionInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 240003 // - HTTP code: 400 // - message: "Quota Definition is invalid: %s" func IsQuotaDefinitionInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 240003 } // IsQuotaDefinitionMemoryLimitInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 240004 // - HTTP code: 400 // - message: "Quota Definition memory limit cannot be less than -1" func IsQuotaDefinitionMemoryLimitInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 240004 } // IsStackInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 250001 // - HTTP code: 400 // - message: "The stack is invalid: %s" func IsStackInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 250001 } // IsStackNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 250002 // - HTTP code: 400 // - message: "The stack name is taken: %s" func IsStackNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 250002 } // IsStackNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 250003 // - HTTP code: 404 // - message: "The stack could not be found: %s" func IsStackNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 250003 } // IsServicePlanVisibilityInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 260001 // - HTTP code: 400 // - message: "Service Plan Visibility is invalid: %s" func IsServicePlanVisibilityInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 260001 } // IsServicePlanVisibilityAlreadyExistsError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 260002 // - HTTP code: 400 // - message: "This combination of ServicePlan and Organization is already taken: %s" func IsServicePlanVisibilityAlreadyExistsError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 260002 } // IsServicePlanVisibilityNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 260003 // - HTTP code: 404 // - message: "The service plan visibility could not be found: %s" func IsServicePlanVisibilityNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 260003 } // IsServiceBrokerInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270001 // - HTTP code: 400 // - message: "Service broker is invalid: %s" func IsServiceBrokerInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270001 } // IsServiceBrokerNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270002 // - HTTP code: 400 // - message: "The service broker name is taken" func IsServiceBrokerNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270002 } // IsServiceBrokerUrlTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270003 // - HTTP code: 400 // - message: "The service broker url is taken: %s" func IsServiceBrokerUrlTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270003 } // IsServiceBrokerNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270004 // - HTTP code: 404 // - message: "The service broker was not found: %s" func IsServiceBrokerNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270004 } // IsServiceBrokerNotRemovableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270010 // - HTTP code: 400 // - message: "Can not remove brokers that have associated service instances: %s" func IsServiceBrokerNotRemovableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270010 } // IsServiceBrokerUrlInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270011 // - HTTP code: 400 // - message: "%s is not a valid URL" func IsServiceBrokerUrlInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270011 } // IsServiceBrokerCatalogInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270012 // - HTTP code: 502 // - message: "Service broker catalog is invalid: %s" func IsServiceBrokerCatalogInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270012 } // IsServiceBrokerDashboardClientFailureError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270013 // - HTTP code: 502 // - message: "Service broker dashboard clients could not be modified: %s" func IsServiceBrokerDashboardClientFailureError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270013 } // IsServiceBrokerAsyncRequiredError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270014 // - HTTP code: 400 // - message: "This service plan requires client support for asynchronous service operations." func IsServiceBrokerAsyncRequiredError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270014 } // IsServiceDashboardClientMissingUrlError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270015 // - HTTP code: 502 // - message: "Service broker returned dashboard client configuration without a dashboard URL" func IsServiceDashboardClientMissingUrlError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270015 } // IsServiceBrokerUrlBasicAuthNotSupportedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270016 // - HTTP code: 400 // - message: "User name and password fields in the broker URI are not supported" func IsServiceBrokerUrlBasicAuthNotSupportedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270016 } // IsServiceBrokerRespondedAsyncWhenNotAllowedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270017 // - HTTP code: 502 // - message: "The service broker responded asynchronously to a request, but the accepts_incomplete query parameter was false or not given." func IsServiceBrokerRespondedAsyncWhenNotAllowedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270017 } // IsServiceBrokerConcurrencyError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270018 // - HTTP code: 422 // - message: "The service broker could not perform this operation in parallel with other running operations" func IsServiceBrokerConcurrencyError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270018 } // IsServiceBrokerCatalogIncompatibleError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 270019 // - HTTP code: 502 // - message: "Service broker catalog is incompatible: %s" func IsServiceBrokerCatalogIncompatibleError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 270019 } // IsBuildpackNameStackTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290000 // - HTTP code: 422 // - message: "The buildpack name %s is already in use for the stack %s" func IsBuildpackNameStackTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290000 } // IsBuildpackNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290001 // - HTTP code: 400 // - message: "The buildpack name is already in use: %s" func IsBuildpackNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290001 } // IsBuildpackBitsUploadInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290002 // - HTTP code: 400 // - message: "The buildpack upload is invalid: %s" func IsBuildpackBitsUploadInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290002 } // IsBuildpackInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290003 // - HTTP code: 400 // - message: "Buildpack is invalid: %s" func IsBuildpackInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290003 } // IsCustomBuildpacksDisabledError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290004 // - HTTP code: 400 // - message: "Custom buildpacks are disabled" func IsCustomBuildpacksDisabledError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290004 } // IsBuildpackLockedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290005 // - HTTP code: 409 // - message: "The buildpack is locked" func IsBuildpackLockedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290005 } // IsJobTimeoutError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290006 // - HTTP code: 524 // - message: "The job execution has timed out." func IsJobTimeoutError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290006 } // IsSpaceDeleteTimeoutError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290007 // - HTTP code: 524 // - message: "Deletion of space %s timed out before all resources within could be deleted" func IsSpaceDeleteTimeoutError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290007 } // IsSpaceDeletionFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290008 // - HTTP code: 502 // - message: "Deletion of space %s failed because one or more resources within could not be deleted.\n\n%s" func IsSpaceDeletionFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290008 } // IsOrganizationDeleteTimeoutError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290009 // - HTTP code: 524 // - message: "Delete of organization %s timed out before all resources within could be deleted" func IsOrganizationDeleteTimeoutError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290009 } // IsOrganizationDeletionFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290010 // - HTTP code: 502 // - message: "Deletion of organization %s failed because one or more resources within could not be deleted.\n\n%s" func IsOrganizationDeletionFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290010 } // IsNonrecursiveSpaceDeletionFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290011 // - HTTP code: 400 // - message: "Resource inside space %s must first be deleted, or specify recursive delete." func IsNonrecursiveSpaceDeletionFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290011 } // IsBitsServiceError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290012 // - HTTP code: 500 // - message: "The bits service returned an error: %s" func IsBitsServiceError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290012 } // IsSpaceRolesDeletionTimeoutError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290013 // - HTTP code: 524 // - message: "Deletion of roles for space %s timed out before all roles could be deleted" func IsSpaceRolesDeletionTimeoutError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290013 } // IsOrganizationRolesDeletionFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290014 // - HTTP code: 502 // - message: "Failed to delete one or more roles for organization %s" func IsOrganizationRolesDeletionFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290014 } // IsSpaceRolesDeletionFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 290016 // - HTTP code: 502 // - message: "Failed to delete one or more roles for space %s" func IsSpaceRolesDeletionFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 290016 } // IsSecurityGroupInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 300001 // - HTTP code: 400 // - message: "The security group is invalid: %s" func IsSecurityGroupInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 300001 } // IsSecurityGroupNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 300002 // - HTTP code: 404 // - message: "The security group could not be found: %s" func IsSecurityGroupNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 300002 } // IsSecurityGroupStagingDefaultInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 300003 // - HTTP code: 400 // - message: "The security group could not be found: %s" func IsSecurityGroupStagingDefaultInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 300003 } // IsSecurityGroupRunningDefaultInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 300004 // - HTTP code: 400 // - message: "The security group could not be found: %s" func IsSecurityGroupRunningDefaultInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 300004 } // IsSecurityGroupNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 300005 // - HTTP code: 400 // - message: "The security group name is taken: %s" func IsSecurityGroupNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 300005 } // IsSpaceQuotaDefinitionInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 310001 // - HTTP code: 400 // - message: "Space Quota Definition is invalid: %s" func IsSpaceQuotaDefinitionInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 310001 } // IsSpaceQuotaDefinitionNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 310002 // - HTTP code: 400 // - message: "The space quota definition name is taken: %s" func IsSpaceQuotaDefinitionNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 310002 } // IsSpaceQuotaMemoryLimitExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 310003 // - HTTP code: 400 // - message: "You have exceeded your space's memory limit: %s" func IsSpaceQuotaMemoryLimitExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 310003 } // IsSpaceQuotaInstanceMemoryLimitExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 310004 // - HTTP code: 400 // - message: "You have exceeded the instance memory limit for your space's quota." func IsSpaceQuotaInstanceMemoryLimitExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 310004 } // IsSpaceQuotaTotalRoutesExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 310005 // - HTTP code: 400 // - message: "You have exceeded the total routes for your space's quota." func IsSpaceQuotaTotalRoutesExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 310005 } // IsOrgQuotaTotalRoutesExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 310006 // - HTTP code: 400 // - message: "You have exceeded the total routes for your organization's quota." func IsOrgQuotaTotalRoutesExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 310006 } // IsSpaceQuotaDefinitionNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 310007 // - HTTP code: 404 // - message: "Space Quota Definition could not be found: %s" func IsSpaceQuotaDefinitionNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 310007 } // IsSpaceQuotaInstanceLimitExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 310008 // - HTTP code: 400 // - message: "You have exceeded the instance limit for your space's quota." func IsSpaceQuotaInstanceLimitExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 310008 } // IsOrgQuotaTotalReservedRoutePortsExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 310009 // - HTTP code: 400 // - message: "You have exceeded the total reserved route ports for your organization's quota." func IsOrgQuotaTotalReservedRoutePortsExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 310009 } // IsSpaceQuotaTotalReservedRoutePortsExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 310010 // - HTTP code: 400 // - message: "You have exceeded the total reserved route ports for your space's quota." func IsSpaceQuotaTotalReservedRoutePortsExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 310010 } // IsDiegoDisabledError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 320001 // - HTTP code: 400 // - message: "Diego has not been enabled." func IsDiegoDisabledError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 320001 } // IsDiegoDockerBuildpackConflictError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 320002 // - HTTP code: 400 // - message: "You cannot specify a custom buildpack and a docker image at the same time." func IsDiegoDockerBuildpackConflictError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 320002 } // IsDockerDisabledError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 320003 // - HTTP code: 400 // - message: "Docker support has not been enabled." func IsDockerDisabledError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 320003 } // IsStagingBackendInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 320004 // - HTTP code: 403 // - message: "The request staging completion endpoint only handles apps desired to stage on the Diego backend." func IsStagingBackendInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 320004 } // IsBackendSelectionNotAuthorizedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 320005 // - HTTP code: 403 // - message: "You cannot select the backend on which to run this application" func IsBackendSelectionNotAuthorizedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 320005 } // IsRevisionsEnabledError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 320006 // - HTTP code: 400 // - message: "V2 restaging is disabled when your app has revisions enabled" func IsRevisionsEnabledError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 320006 } // IsFeatureFlagNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 330000 // - HTTP code: 404 // - message: "The feature flag could not be found: %s" func IsFeatureFlagNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 330000 } // IsFeatureFlagInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 330001 // - HTTP code: 400 // - message: "The feature flag is invalid: %s" func IsFeatureFlagInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 330001 } // IsFeatureDisabledError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 330002 // - HTTP code: 403 // - message: "Feature Disabled: %s" func IsFeatureDisabledError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 330002 } // IsUserProvidedServiceInstanceNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 340001 // - HTTP code: 404 // - message: "The service instance could not be found: %s" func IsUserProvidedServiceInstanceNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 340001 } // IsUserProvidedServiceInstanceHandlerNeededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 340002 // - HTTP code: 400 // - message: "Please use the User Provided Services API to manage this resource." func IsUserProvidedServiceInstanceHandlerNeededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 340002 } // IsProcessInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 350001 // - HTTP code: 400 // - message: "The process is invalid: %s" func IsProcessInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 350001 } // IsUnableToDeleteError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 350002 // - HTTP code: 400 // - message: "Unable to perform delete action: %s" func IsUnableToDeleteError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 350002 } // IsProcessNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 350003 // - HTTP code: 404 // - message: "The process could not be found: %s" func IsProcessNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 350003 } // IsServiceKeyNameTakenError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 360001 // - HTTP code: 400 // - message: "The service key name is taken: %s" func IsServiceKeyNameTakenError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 360001 } // IsServiceKeyInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 360002 // - HTTP code: 400 // - message: "The service key is invalid: %s" func IsServiceKeyInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 360002 } // IsServiceKeyNotFoundError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 360003 // - HTTP code: 404 // - message: "The service key could not be found: %s" func IsServiceKeyNotFoundError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 360003 } // IsServiceKeyNotSupportedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 360004 // - HTTP code: 400 // - message: "%s" func IsServiceKeyNotSupportedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 360004 } // IsServiceKeyCredentialStoreUnavailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 360005 // - HTTP code: 503 // - message: "Credential store is unavailable" func IsServiceKeyCredentialStoreUnavailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 360005 } // IsRoutingApiUnavailableError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 370001 // - HTTP code: 503 // - message: "The Routing API is currently unavailable" func IsRoutingApiUnavailableError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 370001 } // IsRoutingApiDisabledError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 370003 // - HTTP code: 403 // - message: "Routing API is disabled" func IsRoutingApiDisabledError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 370003 } // IsEnvironmentVariableGroupInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 380001 // - HTTP code: 400 // - message: "The Environment Variable Group is invalid: %s" func IsEnvironmentVariableGroupInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 380001 } // IsDropletUploadInvalidError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 380002 // - HTTP code: 400 // - message: "The droplet upload is invalid: %s" func IsDropletUploadInvalidError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 380002 } // IsServiceInstanceUnshareFailedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390001 // - HTTP code: 502 // - message: "Unshare of service instance failed: \n\n%s" func IsServiceInstanceUnshareFailedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390001 } // IsServiceInstanceDeletionSharesExistsError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390002 // - HTTP code: 422 // - message: "Service instances must be unshared before they can be deleted. Unsharing %s will automatically delete any bindings that have been made to applications in other spaces." func IsServiceInstanceDeletionSharesExistsError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390002 } // IsSharedServiceInstanceCannotBeRenamedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390003 // - HTTP code: 422 // - message: "Service instances that have been shared cannot be renamed" func IsSharedServiceInstanceCannotBeRenamedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390003 } // IsSharedServiceInstanceNotUpdatableInTargetSpaceError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390004 // - HTTP code: 403 // - message: "You cannot update service instances that have been shared with you" func IsSharedServiceInstanceNotUpdatableInTargetSpaceError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390004 } // IsSharedServiceInstanceNotDeletableInTargetSpaceError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390005 // - HTTP code: 403 // - message: "You cannot delete service instances that have been shared with you" func IsSharedServiceInstanceNotDeletableInTargetSpaceError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390005 } // IsMaintenanceInfoNotSupportedError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390006 // - HTTP code: 422 // - message: "The service broker does not support upgrades for service instances created from this plan." func IsMaintenanceInfoNotSupportedError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390006 } // IsMaintenanceInfoNotSemverError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390007 // - HTTP code: 422 // - message: "maintenance_info.version should be a semantic version." func IsMaintenanceInfoNotSemverError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390007 } // IsMaintenanceInfoNotUpdatableWhenChangingPlanError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390008 // - HTTP code: 422 // - message: "maintenance_info should not be changed when switching to different plan." func IsMaintenanceInfoNotUpdatableWhenChangingPlanError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390008 } // IsMaintenanceInfoConflictError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390009 // - HTTP code: 422 // - message: "maintenance_info.version requested is invalid. Please ensure the catalog is up to date and you are providing a version supported by this service plan." func IsMaintenanceInfoConflictError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390009 } // IsBuildpackStacksDontMatchError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390011 // - HTTP code: 422 // - message: "Uploaded buildpack stack (%s) does not match %s" func IsBuildpackStacksDontMatchError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390011 } // IsBuildpackStackDoesNotExistError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390012 // - HTTP code: 422 // - message: "Uploaded buildpack stack (%s) does not exist" func IsBuildpackStackDoesNotExistError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390012 } // IsBuildpackZipError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390013 // - HTTP code: 422 // - message: "Buildpack zip error: %s" func IsBuildpackZipError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390013 } // IsDeploymentsDisabledError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390014 // - HTTP code: 403 // - message: "Deployments cannot be created due to manifest property 'temporary_disable_deployments'" func IsDeploymentsDisabledError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390014 } // IsNoCurrentEncryptionKeyError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390015 // - HTTP code: 422 // - message: "Please set the desired encryption key in the manifest at ‘cc.database_encryption.current_key_label’" func IsNoCurrentEncryptionKeyError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390015 } // IsScaleDisabledDuringDeploymentError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390016 // - HTTP code: 422 // - message: "Cannot scale this process while a deployment is in flight." func IsScaleDisabledDuringDeploymentError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390016 } // IsProcessUpdateDisabledDuringDeploymentError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390017 // - HTTP code: 422 // - message: "Cannot update this process while a deployment is in flight." func IsProcessUpdateDisabledDuringDeploymentError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390017 } // IsLabelLimitExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390020 // - HTTP code: 422 // - message: "Failed to add %d labels because it would exceed maximum of %d" func IsLabelLimitExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390020 } // IsAnnotationLimitExceededError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390023 // - HTTP code: 422 // - message: "Failed to add %d annotations because it would exceed maximum of %d" func IsAnnotationLimitExceededError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390023 } // IsStopDisabledDuringDeploymentError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 390024 // - HTTP code: 422 // - message: "Cannot stop the app while it is deploying, please cancel the deployment before stopping the app." func IsStopDisabledDuringDeploymentError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 390024 } // IsKubernetesRouteResourceError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 400001 // - HTTP code: 422 // - message: "Failed to create/update/delete Route resource with guid '%s' on Kubernetes" func IsKubernetesRouteResourceError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 400001 } // IsKpackImageError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 400002 // - HTTP code: 422 // - message: "Failed to %s Image resource for staging: '%s'" func IsKpackImageError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 400002 } // IsKpackBuilderError returns a boolean indicating whether // the error is known to report the Cloud Foundry error: // - Cloud Foundry code: 400003 // - HTTP code: 422 // - message: "Failed to %s Builder resource: '%s'" func IsKpackBuilderError(err error) bool { cause := errors.Cause(err) cferr, ok := cause.(CloudFoundryError) if !ok { return false } return cferr.Code == 400003 }
/** * @author Oleg Marchenko */ public class ParameterMetadata extends NativeElement<VariableElement> { public ParameterMetadata(VariableElement element) { super(element); } }
<filename>src/testhotelapp/Klant.java<gh_stars>0 package testhotelapp; public class Klant { private String naam; public Klant(String naam){ this.naam = naam; } }
<reponame>fabsrc/oasis<gh_stars>1-10 import { KV } from 'worktop/kv' import { User } from './user' import type { OpenAPI } from 'openapi-types' import { isValidSchema } from './validation' declare const KV_SCHEMAS: KV.Namespace interface Metadata { owner: string key: string id: string namespaceId: string name: string version: string storedAt: number path: string access?: 'PUBLIC' | 'PRIVATE' } export const SCHEMA_METADATA_HEADER_NAME = 'oasis-schema' const getSchemaKey = ( userName: string, namespaceId?: string, schemaId?: string, ) => [userName, namespaceId, schemaId].filter(Boolean).join(':') const getSchemaPath = (key: string) => key.replaceAll(':', '/') export const createSchema = async ( user: User, namespaceId: string, schemaId: string, schemaData: unknown, ): Promise<{ schema: OpenAPI.Document; metadata: Metadata }> => { if (!isValidSchema(schemaData)) { throw new Error('Data is not a valid Swagger or OpenAPI schema') } const key = getSchemaKey(user.login, namespaceId, schemaId) const metadata = { key, id: schemaId, owner: user.id, namespaceId: namespaceId, name: schemaData.info.title, version: schemaData.info.version, storedAt: Date.now(), path: getSchemaPath(key), access: 'PRIVATE' as const, } await KV_SCHEMAS.put<Metadata>(key, JSON.stringify(schemaData), { metadata }) return { schema: schemaData, metadata } } export const getSchema = async ( userName: string, namespaceId: string, schemaId: string, ): Promise<{ schema: OpenAPI.Document | null; metadata: Metadata | null }> => { const key = getSchemaKey(userName, namespaceId, schemaId) const data = await KV_SCHEMAS.getWithMetadata<OpenAPI.Document, Metadata>( key, { type: 'json' }, ) return { schema: data?.value ?? null, metadata: data?.metadata ?? null } } export const getSchemaList = async ( user: User, namespaceId?: string, ): Promise<KV.KeyInfo<Metadata>[]> => { const schemaList = await KV_SCHEMAS.list<Metadata>({ prefix: getSchemaKey(user.login, namespaceId) + ':', }) return schemaList.keys } export const deleteSchemaRaw = (key: string): Promise<void> => KV_SCHEMAS.delete(key) export const deleteSchema = async ( user: User, namespaceId: string, schemaId: string, ): Promise<boolean> => { const { schema } = await getSchema(user.login, namespaceId, schemaId) if (schema) { await deleteSchemaRaw(getSchemaKey(user.login, namespaceId, schemaId)) return true } return false } export const modifySchemaAccess = async ( user: User, namespaceId: string, schemaId: string, access: Metadata['access'], ): Promise<{ schema: OpenAPI.Document | null; metadata: Metadata | null }> => { const { schema, metadata } = await getSchema( user.login, namespaceId, schemaId, ) if (schema && metadata) { const newMetadata = { ...metadata, access, } await KV_SCHEMAS.put<Metadata>(metadata.key, JSON.stringify(schema), { metadata: newMetadata, }) return { schema, metadata: newMetadata } } return { schema, metadata } }
<reponame>aogburn/wildfly-elytron<filename>x500/cert/acme/src/main/java/org/wildfly/security/x500/cert/acme/CertificateAuthority.java /* * JBoss, Home of Professional Open Source. * Copyright 2019 Red Hat, Inc., and individual contributors * as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wildfly.security.x500.cert.acme; import org.wildfly.common.Assert; /** * A class that represents an <a href="https://tools.ietf.org/html/draft-ietf-acme-acme-18.txt">Automatic Certificate * Management Environment (ACME)</a> certificate authority endpoint. * * @author <a href="mailto:<EMAIL>"><NAME></a> * @since 1.9.0 */ public class CertificateAuthority { private static final String DIRECTORY = "directory"; private static final String LETS_ENCRYPT_STAGING_URL = "https://acme-staging-v02.api.letsencrypt.org/" + DIRECTORY; private static final String LETS_ENCRYPT_URL = "https://acme-v02.api.letsencrypt.org/" + DIRECTORY; private String name; private String url; private String stagingUrl; public static final CertificateAuthority LETS_ENCRYPT = new CertificateAuthority("LetsEncrypt", LETS_ENCRYPT_URL, LETS_ENCRYPT_STAGING_URL); public CertificateAuthority(String name, String url, String stagingUrl) { this.name = name; this.url = url; this.stagingUrl = stagingUrl; } /** * Get the default certificate authority endpoint. * * @return LETS_ENCRYPT certificate authority holding Let's Encrypt URLs */ public static CertificateAuthority getDefault() { return LETS_ENCRYPT; } /** * Get the name of certificate authority. * * @return name of the certificate authority */ public String getName() { return name; } /** * Get the certificate authority URL * * @return certificate authority URL */ public String getUrl() { return url; } /** * Get the certificate authority staging URL * * @return certificate authority staging URL */ public String getStagingUrl() { return stagingUrl; } /** * Set the name of certificate authority. * * @param name the name of certificate authority (must not be {@code null}) */ public void setName(String name) { Assert.checkNotNullParam("name", name); this.name = name; } /** * Set the URL of certificate authority. * * @param url URL of certificate authority (must not be {@code null}) */ public void setUrl(String url) { Assert.checkNotNullParam("url", url); this.url = url; } /** * Set the staging URL of certificate authority. * * @param stagingUrl staging URL of certificate authority */ public void setStagingUrl(String stagingUrl) { this.stagingUrl = stagingUrl; } }
def run(self): if self._show_help(): self._print_help() return correlation_id = self._info.name path = self._get_config_path() parameters = self._get_parameters() self.read_config_from_file(correlation_id, path, parameters) self._capture_errors(correlation_id) self.open(correlation_id) self._capture_exit(correlation_id) self.close(correlation_id)
// // Created by xetql on 13.12.17. // #ifndef NBMPI_UTILS_HPP #define NBMPI_UTILS_HPP #include <ctime> #include <vector> #include <stdexcept> #include <iomanip> #include <functional> #include <cassert> #include <cmath> #include <limits> #include <iostream> #include <fstream> #include <random> #include <cstring> #include <variant> #include "type.hpp" #ifdef DEBUG #define print(x) std::cout << (#x) <<" in "<< __FILE__ << ":"<< __LINE__ << "("<< __PRETTY_FUNCTION__<< ") = " << (x) << std::endl; #endif template<typename T> constexpr auto convert(T&& t) { if constexpr (std::is_same<std::remove_cv_t<std::remove_reference_t<T>>, std::string>::value) { return std::forward<T>(t).c_str(); } else { return std::forward<T>(t); } } /** * printf like formatting for C++ with std::string * Original source: https://stackoverflow.com/a/26221725/11722 */ template<typename ... Args> std::string stringFormatInternal(const std::string& format, Args&& ... args) { size_t size = snprintf(nullptr, 0, format.c_str(), std::forward<Args>(args) ...) + 1; if( size <= 0 ){ throw std::runtime_error( "Error during formatting." ); } std::unique_ptr<char[]> buf(new char[size]); snprintf(buf.get(), size, format.c_str(), args ...); return std::string(buf.get(), buf.get() + size - 1); } template<typename ... Args> std::string fmt(std::string fmt, Args&& ... args) { return stringFormatInternal(fmt, convert(std::forward<Args>(args))...); } inline std::string get_date_as_string(); bool file_exists(const std::string fileName); std::vector<std::string> split(const std::string &s, char delimiter); template <typename T, typename... Args> struct concatenator; template <typename... Args0, typename... Args1> struct concatenator<std::variant<Args0...>, Args1...> { using type = std::variant<Args0..., Args1...>; }; template<class T> struct MESH_DATA { std::vector<T> els; }; const double CUTOFF_RADIUS_FACTOR = 4.0; template<unsigned N> std::array<Real, 2*N> get_simbox(Real simwidth){ if constexpr (N==2) return {0,simwidth, 0,simwidth}; if constexpr (N==3) return {0,simwidth, 0,simwidth, 0,simwidth}; } template<unsigned N> std::array<Real, N> get_box_center(const std::array<Real, 2*N>& box){ std::array<Real, N> center{}; for(unsigned i = 0; i < N; ++i) { center[i] = (box.at(2*i) + box.at(2*i+1)) / static_cast<Real>(2.0); } return center; } template<unsigned N> std::array<Real, N> get_box_width(const std::array<Real, 2*N>& box){ std::array<Real, N> widths{}; for(unsigned i = 0; i < N; ++i){ widths[i] = (box.at(2*i+1) - box.at(2*i)); } return widths; } template<int N> std::array<double, N> get_as_double_array(const std::array<Real, N>& real_array){ if constexpr(N==2) return {(double) real_array[0], (double) real_array[1]}; else return {(double) real_array[0], (double) real_array[1], (double) real_array[2]}; } template<int N> inline void put_in_double_array(std::array<double, N>& double_array, const std::array<Real, N>& real_array){ double_array[0] = real_array[0]; double_array[1] = real_array[1]; if constexpr (N==3) double_array[2] = real_array[2]; } template<int N> inline void put_in_3d_double_array(std::array<double, 3>& double_array, const std::array<Real, N>& real_array){ double_array[0] = real_array[0]; double_array[1] = real_array[1]; if constexpr (N==3) double_array[2] = real_array[2]; else double_array[2] = 0.0; } template<int N, class F> inline void map(std::array<double, N>& double_array, F f){ double_array[0] = f(double_array[0]); double_array[1] = f(double_array[1]); if constexpr (N==3) double_array[2] = f(double_array[2]); } template<class T> inline void update_local_ids(std::vector<T>& els, std::function<void (T&, Integer)> setLidF) { Integer i = 0; for(auto& el : els) setLidF(els->at(i), i++); } template<class IntegerType, typename = std::enable_if<std::numeric_limits<IntegerType>::is_integer>> inline IntegerType bitselect(IntegerType condition, IntegerType truereturnvalue, IntegerType falsereturnvalue) { return (truereturnvalue & -condition) | (falsereturnvalue & ~(-condition)); //a when TRUE } // C++ template to print vector container elements template <typename T, size_t N> std::ostream& operator<<(std::ostream& os, const std::array<T, N>& v) { for (int i = 0; i < N; ++i) { os << v[i]; if (i != v.size() - 1) os << ","; } return os; } template <typename T> std::ostream& operator<<(std::ostream& os, const std::vector<T>& v) { const auto s = v.size(); for (int i = 0; i < s; ++i) { os << v[i]; if (i != s - 1) os << " "; } return os; } template<typename T> T dto(double v) { T ret = (T) v; if(std::isinf(ret)){ if(ret == -INFINITY){ ret = std::numeric_limits<T>::lowest(); } else { ret = std::numeric_limits<T>::max(); } } return ret; } template<unsigned N> using BoundingBox = std::array<Real, 2*N>; template<unsigned D, unsigned N> Real get_size(const BoundingBox<N>& bbox) { return std::max(static_cast<Real>(0.0), bbox.at(2*D+1) - bbox.at(2*D)); } template<unsigned D, unsigned N> constexpr Real get_min_dim(const BoundingBox<N>& bbox) { return bbox.at(2*D); } template<unsigned D, unsigned N> constexpr Real get_max_dim(const BoundingBox<N>& bbox) { return bbox.at(2*D+1); } template<int N> bool is_within(const BoundingBox<N>& bbox, std::array<Real, N>& xyz){ bool within = true; for(int i = 0; i < N; i++){ within = within && (bbox[2*i] <= xyz[i]) && (xyz[i] < bbox[2*i+1]); } return within; } template<int N, class GetPosFunc> void update_bbox_for_container(BoundingBox<N>& new_bbox, GetPosFunc getPosFunc) {} template <int N, class GetPosFunc, class First, class... Rest> void update_bbox_for_container(BoundingBox<N>& new_bbox, GetPosFunc getPosFunc, First& first, Rest&... rest) { Real pos; for(int i = 0; i < N; ++i) { for (auto &el : first) { pos = (getPosFunc(&el))->at(i); new_bbox.at(2*i) = std::min(new_bbox.at(2*i), pos); new_bbox.at(2*i+1) = std::max(new_bbox.at(2*i+1), pos); } } update_bbox_for_container<N>(new_bbox, getPosFunc, rest...); } template<class T> void apply_resize_strategy(std::vector<T>* vec, size_t required_size) { /*auto current_size = vec->size(); auto current_capacity = vec->capacity(); if(current_size < required_size) { vec->reserve(2 * required_size); // the capacity is twice the req size } else if(current_capacity >= 4.0 * required_size) { vec->resize(current_capacity / 2); // resize to 2*req size vec->shrink_to_fit(); // shrink to fit to 2*req size }*/ vec->resize(required_size); // the size is the one we needed } template<int N, class GetPosFunc, class... T> BoundingBox<N> get_bounding_box(Real rc, GetPosFunc getPosFunc, T&... elementContainers){ BoundingBox<N> new_bbox; if constexpr (N==3) { new_bbox = {std::numeric_limits<Real>::max(), std::numeric_limits<Real>::lowest(), std::numeric_limits<Real>::max(), std::numeric_limits<Real>::lowest(), std::numeric_limits<Real>::max(), std::numeric_limits<Real>::lowest()}; } else { new_bbox = {std::numeric_limits<Real>::max(), std::numeric_limits<Real>::lowest(), std::numeric_limits<Real>::max(), std::numeric_limits<Real>::lowest()}; } update_bbox_for_container<N>(new_bbox, getPosFunc, elementContainers...); /* hook to grid, resulting bbox is divisible by lc[i] forall i */ Real radius = rc; for(int i = 0; i < N; ++i) { new_bbox.at(2*i) = std::floor((new_bbox.at(2*i)) / rc) * rc - radius; new_bbox.at(2*i+1) = std::ceil((new_bbox.at(2*i+1)) / rc) * rc + radius; } return new_bbox; } template<int N, class GetPosFunc, class... T> BoundingBox<N> update_bounding_box(BoundingBox<N>& bbox, Real rc, GetPosFunc getPosFunc, T&... elementContainers){ update_bbox_for_container<N>(bbox, getPosFunc, elementContainers...); /* hook to grid, resulting bbox is divisible by lc[i] forall i */ Real radius = rc; for(int i = 0; i < N; ++i) { bbox.at(2*i) = std::floor((bbox.at(2*i)) / rc) * rc - radius; bbox.at(2*i+1) = std::ceil((bbox.at(2*i+1)) / rc) * rc + radius; } return bbox; } template<int N> inline std::array<Integer, N> get_cell_number_by_dimension(const BoundingBox<N>& bbox, Real rc) { std::array<Integer, N> lc; lc [0] = std::ceil(get_size<0, N>(bbox) / rc); lc [1] = std::ceil(get_size<1, N>(bbox) / rc); if constexpr(N==3) lc [2] = std::ceil(get_size<2, N>(bbox) / rc); return lc; } template<int N> Integer get_total_cell_number(const BoundingBox<N>& bbox, Real rc){ auto lc = get_cell_number_by_dimension<N>(bbox, rc); return std::accumulate(lc.begin(), lc.end(), (Integer) 1, [](auto prev, auto v){return prev * v;}); } template<int N> Integer position_to_local_cell_index(std::array<Real, N> const &position, Real rc, const BoundingBox<N>& bbox, const Integer c, const Integer r){ if constexpr(N==3) { return ((position.at(0) - bbox[0]) / rc) + c * ((Integer) ((position.at(1) - bbox[2]) / rc)) + c * r * ((Integer) std::floor((position.at(2) - bbox[4]) / rc)); } else { return ((position.at(0) - bbox[0]) / rc) + c * ((Integer) ((position.at(1) - bbox[2]) / rc)); } } template<int N> inline Integer position_to_cell(std::array<Real, N> const &position, const Real lsub, const Integer c, const Integer r = 0) { Integer idx = (Integer) std::floor(position.at(0) / lsub); idx += c * (Integer) std::floor(position.at(1) / lsub); if constexpr(N==3) idx += c * r * (Integer) std::floor(position.at(2) / lsub); return idx; } namespace math{ template<class InputIt> typename InputIt::value_type median(InputIt beg, InputIt end) { using T = typename InputIt::value_type; if(beg == end) return (T) 0.0; const auto N = std::distance(beg, end); std::vector<T> values(beg, end); std::nth_element(values.begin(), values.begin() + N / 2, values.end()); return values.at(N/2); } template<class InputIt> typename InputIt::value_type mean(InputIt beg, InputIt end) { using T = typename InputIt::value_type; const auto N = std::distance(beg, end); return N > 0 ? std::accumulate(beg, end, (T) 0.0) / N : 0.0; } } namespace functional { template<typename R> inline R slice(R const &v, size_t slice_start, size_t slice_size) { size_t slice_max_size = v.size(); slice_size = slice_size > slice_max_size ? slice_max_size : slice_size + 1; R s(v.begin() + slice_start, v.begin() + slice_size); return s; } template<typename To, template<typename...> class R=std::vector, typename StlFrom, typename F> R<To> map(StlFrom const &all, F const &map_func) { using std::begin; using std::end; R<To> accum; for (typename StlFrom::const_iterator it = begin(all); it != end(all); ++it) { std::back_insert_iterator<R<To> > back_it(accum); back_it = map_func(*it); } return accum; } template<typename To, template<typename...> class R=std::vector, typename StlFrom, typename ScanRightFunc> R<To> scan_left(StlFrom const &all, ScanRightFunc const &scan_func, To init_value) { using std::begin; using std::end; R<To> accum; std::back_insert_iterator<R<To> > back_it(accum); back_it = init_value; for (typename StlFrom::const_iterator it = begin(all); it != end(all); ++it) { std::back_insert_iterator<R<To> > back_it(accum); back_it = scan_func(*(end(accum) - 1), *it); } return accum; } template<class InputIt, class OutputIt, class To, class BinaryFunc> void scan_left(InputIt beg, InputIt end, OutputIt out, To init_value, BinaryFunc f) { *out = init_value; while(beg != end) { *(out+1) = f(*out, *beg); beg++; out++; } } template<class InputIt, class To, class BinaryFunc> To reduce(InputIt beg, InputIt end, To init_value, BinaryFunc f) { To accum = init_value; while(beg != end){ accum = f(accum, *beg); beg++; } return accum; } template<typename A, typename B> std::vector<std::pair<A, B>> zip(const std::vector<A> &a, const std::vector<B> &b) { std::vector<std::pair<A, B>> zipAB; int sizeAB = a.size(); for (int i = 0; i < sizeAB; ++i) zipAB.push_back(std::make_pair(a.at(i), b.at(i))); return zipAB; } template<typename A, typename B, template<typename...> class I1=std::vector, template<typename...> class R1=std::vector, template<typename...> class R2=std::vector> std::pair<R1<A>, R2<B>> unzip(const I1<std::pair<A, B>> &ab) { R1<A> left; R2<B> right; int sizeAB = ab.size(); for (int i = 0; i < sizeAB; ++i) { auto pair = ab.at(i); left.push_back(pair.first); right.push_back(pair.second); } return std::make_pair(left, right); } /** * Copied from https://stackoverflow.com/questions/17294629/merging-flattening-sub-vectors-into-a-single-vector-c-converting-2d-to-1d * @tparam R Return Container class * @tparam Top Top container class from the container * @tparam Sub Sub class deduced from the original container * @param all Container that contains the sub containers * @return flattened container */ template<template<typename...> class R=std::vector, typename Top, typename Sub = typename Top::value_type> R<typename Sub::value_type> flatten(Top const &all) { using std::begin; using std::end; R<typename Sub::value_type> accum; for (auto &sub : all) std::copy(begin(sub), end(sub), std::inserter(accum, end(accum))); return accum; } } namespace statistic { template<class RealType> std::tuple<RealType, RealType, RealType> sph2cart(RealType azimuth, RealType elevation, RealType r) { RealType x = r * std::cos(elevation) * std::cos(azimuth); RealType y = r * std::cos(elevation) * std::sin(azimuth); RealType z = r * std::sin(elevation); return std::make_tuple(x, y, z); } template<int N, class RealType> class UniformSphericalDistribution { const RealType sphere_radius, spherex, spherey, spherez; public: UniformSphericalDistribution(RealType sphere_radius, RealType spherex, RealType spherey, RealType spherez) : sphere_radius(sphere_radius), spherex(spherex), spherey(spherey), spherez(spherez) {} UniformSphericalDistribution(RealType sphere_radius, std::array<RealType, N> center) : sphere_radius(sphere_radius), spherex(center.at(0)), spherey(center.at(1)), spherez(N==3 ? center.at(2) : 0) {} std::array<RealType, N> operator()(std::mt19937 &gen) { RealType a = sphere_radius, b = 0.0; std::uniform_real_distribution<RealType> udist(0.0, 1.0); RealType r1 = std::pow((udist(gen) * (std::pow(b, 3) - std::pow(a, 3)) + std::pow(a, 3)), 1.0 / 3.0); RealType ph1 = std::acos(-1.0 + 2.0 * udist(gen)); RealType th1 = 2.0 * M_PI * udist(gen); auto p = std::make_tuple<RealType, RealType, RealType> ( r1 * std::sin(ph1) * std::sin(th1), r1 * std::sin(ph1) * std::cos(th1), r1 * std::cos(ph1) ); if constexpr (N == 3) return {(std::get<0>(p)) + spherex, std::get<1>(p) + spherey, std::get<2>(p) + spherez}; else return {std::get<0>(p) + spherex, std::get<1>(p) + spherey}; } }; template<int N, class RealType> class UniformOnSphereEdgeDistribution { const RealType sphere_radius, spherex, spherey, spherez; public: UniformOnSphereEdgeDistribution(RealType sphere_radius, RealType spherex, RealType spherey, RealType spherez) : sphere_radius(sphere_radius), spherex(spherex), spherey(spherey), spherez(spherez) {} UniformOnSphereEdgeDistribution(RealType sphere_radius, std::array<RealType, N> center) : sphere_radius(sphere_radius), spherex(center.at(0)), spherey(center.at(1)), spherez(N==3 ? center.at(2) : 0) {} std::array<RealType, N> operator()(std::mt19937 &gen) { RealType a = sphere_radius, b = 0.0; std::uniform_real_distribution<RealType> udist(0.0, 1.0); RealType r1 = sphere_radius; RealType ph1 = std::acos(-1.0 + 2.0 * udist(gen)); RealType th1 = 2.0 * M_PI * udist(gen); auto p = std::make_tuple<RealType, RealType, RealType> ( r1 * std::sin(ph1) * std::sin(th1), r1 * std::sin(ph1) * std::cos(th1), r1 * std::cos(ph1) ); if constexpr (N == 3) return {(std::get<0>(p)) + spherex, std::get<1>(p) + spherey, std::get<2>(p) + spherez}; else return {std::get<0>(p) + spherex, std::get<1>(p) + spherey}; } }; template<int N, class RealType> class NormalSphericalDistribution { const RealType sphere_size, spherex, spherey, spherez; public: NormalSphericalDistribution(RealType sphere_size, RealType spherex, RealType spherey, RealType spherez) : sphere_size(sphere_size), spherex(spherex), spherey(spherey), spherez(spherez) {} std::array<RealType, N> operator()(std::mt19937 &gen) { std::array<RealType, N> res; std::normal_distribution<RealType> ndistx(spherex, sphere_size / 2.0); // could do better std::normal_distribution<RealType> ndisty(spherey, sphere_size / 2.0); // could do better if (N == 3) { RealType x, y, z; do { std::normal_distribution<RealType> ndistz(spherez, sphere_size / 2.0); // could do better x = ndistx(gen); y = ndisty(gen); z = ndistz(gen); res[0] = x; res[1] = y; res[2] = z; } while ( (spherex - x) * (spherex - x) + (spherey - y) * (spherey - y) + (spherez - z) * (spherez - z) <= (sphere_size * sphere_size / 4.0)); } else { RealType x, y; do { x = ndistx(gen); y = ndisty(gen); res[0] = x; res[1] = y; } while ((spherex - x) * (spherex - x) + (spherey - y) * (spherey - y) <= (sphere_size * sphere_size / 4.0)); } return res; } }; /** * From http://www.tangentex.com/RegLin.htm * @tparam ContainerA * @tparam ContainerB * @param x x data * @param y y data * @return (a,b) of ax+b */ template<typename Realtype, typename ContainerA, typename ContainerB> std::pair<Realtype, Realtype> linear_regression(const ContainerA &x, const ContainerB &y) { int i; Realtype xsomme, ysomme, xysomme, xxsomme; Realtype ai, bi; xsomme = 0.0; ysomme = 0.0; xysomme = 0.0; xxsomme = 0.0; const int n = x.size(); for (i = 0; i < n; i++) { xsomme = xsomme + x[i]; ysomme = ysomme + y[i]; xysomme = xysomme + x[i] * y[i]; xxsomme = xxsomme + x[i] * x[i]; } ai = (n * xysomme - xsomme * ysomme) / (n * xxsomme - xsomme * xsomme); bi = (ysomme - ai * xsomme) / n; return std::make_pair(ai, bi); } } // end of namespace statistic namespace io { Real str_to_real(const std::string& str); template<int N> std::array<Real, N> load_one(std::ifstream& instream){ std::array<Real, N> pos; std::string line, token; std::getline(instream, line); std::istringstream ss(line); for(int i=0; std::getline(ss, token, ',') && i < N; ++i) { pos.at(i) = str_to_real(token); } return pos; } } #endif //NBMPI_UTILS_HPP
INVERMERE, B.C. — Hundreds of homes have burned and nearly half a billion dollars has been spent, but officials say there’s no end in sight for British Columbia’s devastating wildfire season. Kevin Skrepnek with the B.C. Wildfire Service said Wednesday that many areas of the province are still tinder dry and in desperate need of rain. The forecast is calling for showers across the province in the coming days, but Skrepnek said that won’t be enough to douse the flames, especially in the southeastern part of B.C., where a number of aggressive fires are burning. “A sprinkle of rain is only going to provide temporary relief. We need a long, sustained soaking of rain right across the province,” he said. More than 1,200 wildfires have sparked across the province since April 1, charring 11,500 square kilometres of land. Chris Duffy with Emergency Management B.C. said the flames have also burned 431 structures, including 220 homes. While some of the structures were destroyed, others have been damaged, he said. The Cariboo Regional District, which includes the City of Williams Lake in central B.C., said this week that at least 62 homes and 140 outbuildings have been destroyed by several massive wildfires that broke out following lightning storms in early July. HIKER HITS WALL OF FLAMES Wildfires have also prompted officials to close off hiking trails and parks, but the prohibitions haven’t kept all adventurers out of danger. Columbia Valley RCMP Sgt. Bob Vatamaniuk said a hiker was plucked from a trail in the southeastern corner of the province when he encountered a wall of flames. Vatamaniuk said the man had been on the trail in Mount Assiniboine Provincial Park for six days and was low on energy and supplies when he realized he was in danger and used his handheld satellite communication system to call for help. “Knowing that he couldn’t turn around and walk another six days, he initiated his SOS request,” Vatamaniuk said. The man was airlifted to safety by a helicopter crew working on the nearby wildfire before a search and rescue team could reach him, Vatamaniuk said. Federal and provincial ministers met Tuesday in Vancouver to discuss B.C.’s record-breaking wildfire season and how they can help communities rebuild. Federal Agriculture Minister Lawrence MacAulay announced $20 million to help ranchers recover, and First Nations leaders called for more funding for emergency preparedness in their communities. LUMBER INDUSTRY TO TAKE A HIT British Columbia’s Forests Minister said Wednesday the historic wildfire season will have lingering implications for the logging industry. Doug Donaldson said an estimated 53 million cubic metres of timber has burned, which calls for short-term plans to salvage any wood that can still be sold, and long-term plans for reforestation. Susan Yurkovich, president of the B.C. Council of Forest Industries, said salvaging usable timber to allow mills and the communities around them to get back to work is a priority. Although mill infrastructure hasn’t been destroyed by fires, Yurkovich said the shortage of timber and wood fibre, especially in the Interior, has left companies unable to operate. She said the industry is working closely with the provincial government on recovery efforts, and discussions about changing forest-management practices are expected to continue beyond the current fire season.
def delete_ride(ride): reservationsToDelete = db.session.query(models.Reserve).filter(models.Reserve.ride_no == ride.ride_no) for reservation in reservationsToDelete: db.session.delete(reservation) db.session.commit() db.session.delete(ride) db.session.commit() flash("Ride cancelled.")
export declare function foo<T extends unknown[]>(): T;
Bond-buyback miracle-nonsense flops. Shares, CoCo bonds plunge. Deutsche Bank – “the most important net contributor to systemic risks,” as the IMF put it last week after a lag of several years – is having a rough time. Shares dropped 4.2% today to close at a new three-decade low of €11.63, down 48% since July 31 last year, lower even than the low during the doom-and-gloom days of the euro debt crisis and the Global Financial Crisis. It’s not the only European bank in trouble. Credit Suisse dropped 1.7% today to CHF 9.92, another multi-decade low, down 63% since July 31. Other European banks are getting mauled too. The European Stoxx 600 banking index dropped 3% today to 117.69, approaching the Financial Crisis low of March 2009. If July 31, 2015, keeps showing up, it’s because this was the propitious day when Draghi’s harebrained experiment with negative interest rates and massive QE came unglued, when European stocks, and particularly European bank stocks began to crash. Deutsche Bank is so shaky that German Finance Minister Wolfgang Schäuble found it necessary to stick his neck out and explain to Bloomberg in February that he has “no concerns about Deutsche Bank.” Finance ministers don’t say this sort of thing about healthy banks. At the time, CEO John Cryan – whose main job these days is propping up Deutsche Bank with his rhetoric – explained ostensibly to frazzled employees that the bank’s position was “absolutely rock-solid, given our strong capital and risk position.” Days later, he followed up his rhetoric with a stunning ruse: On February 12, the bank announced that it would buy back $5.4 billion of its own bonds, including some issued only a month earlier. “The bank is using market conditions to buy back these bonds at attractive prices and to cut debt,” CFO Marcus Schenck said at the time. “By buying them back below their issuance value, the bank is making a profit. The bank is also using its financial strength to provide liquidity to bond investors in a difficult market environment.” Shares soared 12% on the spot! Its bonds rocketed higher. Even its contingent convertible bonds, the infamous CoCo bonds, though they weren’t part of the buyback plan, bounced. For example, its €1.75 billion of 6% CoCo notes soared from a record low of 70 cents on the euro on February 9 to 87 cents by March – a 24% move! The ruse had worked! During the miracle rally, short sellers got their heads handed to them. But it was one of the silliest, most desperate ways to prop up shares and bonds. And now the bond-buyback miracle-nonsense rally has collapsed, with shares at a new multi-decade low, and with bonds swooning. This is what these 6% CoCo notes did: they plunged 5.7% today to 75 cents on the euro. Nearly the entire bond-buy-back miracle-nonsense rally has re-collapsed… These CoCo bonds are a gem. To prop up Tier 1 capital, Deutsche Bank raised nearly €20 billion in 2010 and 2014, by selling shares, which diluted existing shareholders, and by issuing “contingent convertible” bonds, spread over four issues in dollars, euros, and pounds. CoCo bonds, designed to be “bailed in” so that taxpayers don’t have to foot the entire bill, are a measure of how likely investors think a bail-in is. Cocos are perpetual: they have no maturity date, and investors may never get their money back. But the bank can redeem them, usually after five years. Annual coupon payments are contingent on the bank’s ability to keep its capital above certain thresholds. If the bank fails to make that coupon payment, investors cannot call a default; they have to sit there and get used to it. And if the bank’s capital drops below certain thresholds, CoCos get “bailed in” by getting converted into increasingly worthless shares. In return for these risks, CoCos offer a juicy annual coupon of 6% or higher. Investors, blinded by NIRP, jumped on them at the time. For example, the 6% euro CoCo notes traded at 104 cents on the euro in early 2014 shortly after they’d been issued, and at 102 in April 2015. Investors are ruing the day they didn’t sell! The crashing shares and CoCos have a gloomy importance. Deutsche Bank will need to raise more capital to rebuild its buffer, fund more bad-loan losses, and pay more legal settlements for wrong-doing that keeps oozing from the woodwork. To raise capital, it will need to sell more shares and CoCos. With both crashing, it’s going to be tough. It’ll dilute existing shareholders, who are going to dump these shares in anticipation, which will sink them…. And issuing 6% CoCos when their brethren trade at 75 cents on the euro, or below, is going to be very expensive or perhaps impossible. Italy is in the middle of a white-hot banking crisis. Risk of contagion in Italy and far beyond is huge. Read… Investor Fears Spike as Italy (and the EU) Inch Closer to Doomsday Scenario Enjoy reading WOLF STREET and want to support it? Using ad blockers – I totally get why – but want to support the site? You can donate “beer money.” I appreciate it immensely. Click on the beer mug to find out how: Would you like to be notified via email when WOLF STREET publishes a new article? Sign up here.
<reponame>jdgutierrezj/scalable-web<gh_stars>0 package com.waes.jgu.service; import com.waes.jgu.domain.EntryData; import com.waes.jgu.dto.DiffResponse; import com.waes.jgu.enums.Side; import com.waes.jgu.exception.EntryIncompleteException; import com.waes.jgu.exception.EntryNotFoundException; import com.waes.jgu.exception.InmutableDataException; import com.waes.jgu.exception.InvalidDataException; /** * Interface that exposes the operations to handle storing and comparison capabilities * * @author <NAME> jdgutierrezj * */ public interface DiffService { /** * Save the entry with the base64 sequence for one side or the comparison * * @param id the unique identifier of the entry * @param side side of the comparison left or right * @param base64Data sequence of characters that contains a valid base64 encoded binary data * * @return object just created or updated * * @throws InvalidDataException when the request is trying to stored twice the same side of the comparison * @throws InmutableDataException when the request is trying to stored twice the same side of the comparison * */ EntryData saveData(String id, Side side, String base64Data) throws InvalidDataException, InmutableDataException ; /** * Get the differences between both sides of the comparison * * @param id the unique identifier of the entry * * @return response with the result processed * * @throws EntryNotFoundException when the entry was not found * @throws EntryIncompleteException when one or more sides of the comparison have not been received * */ DiffResponse getDiff(String id) throws EntryNotFoundException, EntryIncompleteException ; }
<filename>src/chemftr/thc/rank_reduce_thc.py """ THC rank reduction of ERIs """ import sys import time import numpy as np import uuid import h5py from chemftr.thc.utils import lbfgsb_opt_thc_l2reg, adagrad_opt_thc def thc_via_cp3(eri_full, nthc, thc_save_file=None, first_factor_thresh=1.0E-14, conv_eps=1.0E-4, perform_bfgs_opt=True, bfgs_maxiter=5000, random_start_thc=True, verify=False): """ THC-CP3 performs an SVD decomposition of the eri matrix followed by a CP decomposition via pybtas. The CP decomposition is assumes the tensor is symmetric in in the first two indices corresponding to a reshaped (and rescaled by the signular value) singular vector. Args: eri_full - (N x N x N x N) eri tensor in Mulliken (chemists) ordering nthc (int) - number of THC factors to use thc_save_file (str) - if not None, save output to the input filename (as HDF5) first_factor_thresh - SVD threshold on initial factorization of ERI conv_eps (float) - convergence threshold on CP3 ALS perform_bfgs_opt - Perform extra gradient optimization on top of CP3 decomp bfgs_maxiter - Maximum bfgs steps to take. Default 1500. random_start_thc - Perform random start for CP3. If false perform HOSVD start. verify - check eri properties. Default is False returns: eri_thc - (N x N x N x N) reconstructed ERIs from THC factorization thc_leaf - THC leaf tensor thc_central - THC central tensor info (dict) - arguments set during the THC factorization """ # fail fast if we don't have the tools to use this routine try: import pybtas except ImportError: raise ImportError("pybtas could not be imported. Is it installed and in your PYTHONPATH?") info = locals() info.pop('eri_full', None) # data too big for info dict info.pop('pybtas', None) # not needed for info dict norb = eri_full.shape[0] if verify: assert np.allclose(eri_full, eri_full.transpose(1, 0, 2, 3)) # (ij|kl) == (ji|kl) assert np.allclose(eri_full, eri_full.transpose(0, 1, 3, 2)) # (ij|kl) == (ij|lk) assert np.allclose(eri_full, eri_full.transpose(1, 0, 3, 2)) # (ij|kl) == (ji|lk) assert np.allclose(eri_full, eri_full.transpose(2, 3, 0, 1)) # (ij|kl) == (kl|ij) eri_mat = eri_full.transpose(0, 1, 3, 2).reshape((norb ** 2, norb ** 2)) if verify: assert np.allclose(eri_mat, eri_mat.T) u, sigma, vh = np.linalg.svd(eri_mat) if verify: assert np.allclose(u @ np.diag(sigma) @ vh, eri_mat) non_zero_sv = np.where(sigma >= first_factor_thresh)[0] u_chol = u[:, non_zero_sv] diag_sigma = np.diag(sigma[non_zero_sv]) u_chol = u_chol @ np.diag(np.sqrt(sigma[non_zero_sv])) if verify: test_eri_mat_mulliken = u[:, non_zero_sv] @ diag_sigma @ vh[non_zero_sv, :] assert np.allclose(test_eri_mat_mulliken, eri_mat) start_time = time.time() # timing results if requested by user beta, gamma, scale = pybtas.cp3_from_cholesky(u_chol.copy(), nthc, random_start=random_start_thc, conv_eps=conv_eps) cp3_calc_time = time.time() - start_time if verify: u_alpha = np.zeros((norb, norb, len(non_zero_sv))) for ii in range(len(non_zero_sv)): u_alpha[:, :, ii] = np.sqrt(sigma[ii]) * u[:, ii].reshape((norb, norb)) assert np.allclose(u_alpha[:, :, ii], u_alpha[:, :, ii].T) # consequence of working with Mulliken rep u_alpha_test = np.einsum("ar,br,xr,r->abx", beta, beta, gamma, scale.ravel()) print("\tu_alpha l2-norm ", np.linalg.norm(u_alpha_test - u_alpha)) thc_leaf = beta.T thc_gamma = np.einsum('xr,r->xr', gamma, scale.ravel()) thc_central = thc_gamma.T @ thc_gamma if verify: eri_thc = np.einsum("Pp,Pr,Qq,Qs,PQ->prqs", thc_leaf, thc_leaf, thc_leaf, thc_leaf, thc_central, optimize=True) print("\tERI L2 CP3-THC ", np.linalg.norm(eri_thc - eri_full)) print("\tCP3 timing: ", cp3_calc_time) if perform_bfgs_opt: x = np.hstack((thc_leaf.ravel(), thc_central.ravel())) lbfgs_start_time = time.time() x = lbfgsb_opt_thc_l2reg(eri_full, nthc, initial_guess=x, maxiter=bfgs_maxiter) lbfgs_calc_time = time.time() - lbfgs_start_time thc_leaf = x[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb thc_central = x[norb * nthc:norb * nthc + nthc * nthc].reshape(nthc, nthc) # central tensor total_calc_time = time.time() - start_time eri_thc = np.einsum("Pp,Pr,Qq,Qs,PQ->prqs", thc_leaf, thc_leaf, thc_leaf, thc_leaf, thc_central, optimize=True) if thc_save_file is not None: with h5py.File(thc_save_file+'.h5', 'w') as fid: fid.create_dataset('thc_leaf', data=thc_leaf) fid.create_dataset('thc_central', data=thc_central) fid.create_dataset('info', data=str(info)) return eri_thc, thc_leaf, thc_central, info
/** * The class that contains helpful information on interfaces. * * @author SeVen */ public final class InterfaceConstants { public static final int EQUIPMENT = 1688; public static final int INVENTORY = 3214; public static final int BANK = 5292; /** * The action for removing items from the inventory container interface. */ public static final int REMOVE_INVENTORY_ITEM = 3322; /** * The action for adding items from another container to the inventory container interface. */ public static final int ADD_INVENTORY_ITEM = 3415; public static final int INVENTORY_STORE = 5064; public static final int WITHDRAW_BANK = 5382; public static final int DEPOSIT_BOX = 7423; }
package javax.swing; import javax.swing.*; import java.awt.BorderLayout; import java.awt.Component; import java.awt.event.*; import java.util.logging.Level; public class button_1 implements ActionListener { JButton button=new JButton("Change Color"); JButton labelButton=new JButton("Trigger"); JLabel label=new JLabel("I 'm Label"); JFrame frame=new JFrame("<NAME>"); /** * @param args */ public static void main(String[] args) { // TODO Auto-generated method stub button_1 gui=new button_1(); gui.go(); } public void go(){//why it needs extra function ??? frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); //for exit,minimize or boarden button.addActionListener(new ColorListener());//registering button labelButton.addActionListener(new LabelListener());//registering labelButton MyDrawPanel obj=new MyDrawPanel(); //frame.getContentPane().add(BorderLayout.EAST,button); frame.getContentPane().add(BorderLayout.NORTH,label); frame.getContentPane().add(BorderLayout.WEST,button);//setting position of button frame.getContentPane().add(BorderLayout.EAST,labelButton);//setting position lebelButton frame.getContentPane().add(BorderLayout.CENTER,obj);//setting position of the work frame.setSize(300,300); frame.setVisible(true); } class LabelListener implements ActionListener { /** * @param args */ public void actionPerformed(ActionEvent event) { label.setText("A CALM SEA NEVER MAKE A SKILLED SAILOR"); } } class ColorListener implements ActionListener{ /** * @param args */ public void actionPerformed(ActionEvent event) { frame.repaint(); } } @Override public void actionPerformed(ActionEvent arg0) { // TODO Auto-generated method stub } }
Organizing Teaching Activities Through Capitalizing Educational Resources In The Online The study "Organizing teaching activities through capitalizing educational resources in the online" aims in the applicative-practical dimension to emphasize that under the influence of covid-19, the educational environment is reorganized in the virtual space, and the communication of the educational binomial under the influence of the covid-19 pandemic in the last year and a half transferred to cyberspace. From this point of view, the organization and development of teaching activities through technology and the Internet have been regulated by changes in the legislative, educational and curricular framework and by the approval of the framework methodology. Teachers in teaching-learning-digital assessment required were a key factor in streamlining teaching activities through the use of technology and the Internet. The reorganization of the educational approach has become one of the major current objectives, because new technologies do not only provide the environment for the educational act, but stimulate learning to contribute to modeling cognitive and metacognitive learning strategies, generating alternative behavior and correcting self-regulation deficits.
<filename>astar.py<gh_stars>0 import math class Node: def __init__(self, position=(), parent=()): (self.x, self.y) = position self.parent = parent self.g = 0 self.h = 0 self.f = 0 def __eq__(self, other): return (self.x, self.y) == (other.x, other.y) def __lt__(self, other): return self.f < other.f def calculate_heuristic(current, goal, heuristic): (x1, y1) = current (x2, y2) = goal # Manhattan distance heuristic (4 directions of movement) if heuristic == 0: return abs(x1 - x2) + abs(y1 - y2) # Diagonal distance heuristic (8 directions of movement) else: dx = abs(x1 - x2) dy = abs(y1 - y2) D = 1 D2 = math.sqrt(2) return D * (dx + dy) + (D2 - 2 * D) * min(dx, dy) def reconstruct_path(draw, grid, start, current): while current != start: current = current.parent # the shortest path nodes (PURPLE) grid[current.y][current.x] = 4 draw() def astar(draw, grid, start_position, end_position, heuristic): start = Node(start_position, None) end = Node(end_position, None) open = [start] # set of unexplored nodes adjacent to explored nodes closed = [] # set of explored nodes while len(open) > 0: open.sort() current = open.pop(0) # get node with the lowest f value closed.append(current) if current == end: reconstruct_path(draw, grid, start, current) grid[start.y][start.x] = grid[current.y][current.x] = 0 return True (x, y) = (current.x, current.y) neighbors = [] # 4 directions of movement if heuristic == 0: neighbors = [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)] # 8 directions of movement else: neighbors = [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), (x - 1, y), (x, y), (x + 1, y), (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)] for next in neighbors: # check if node is within search space bounds if next[1] > (len(grid) - 1) or next[1] < 0 or next[0] > (len(grid[len(grid) - 1]) - 1) or next[0] < 0: continue # check if node is a wall if grid[next[1]][next[0]] == 1: continue neighbor = Node(next, current) # if node has been already explored (ORANGE) - skip if neighbor in closed: grid[current.y][current.x] = 3 continue neighbor.g = current.g + 1 # weight of every edge is 1 neighbor.h = calculate_heuristic((neighbor.x, neighbor.y), (end.x, end.y), heuristic) neighbor.f = neighbor.g + neighbor.h # add current node's neighbors to the open set if neighbor not in open: open.append(neighbor) # nodes in open set (YELLOW) grid[neighbor.y][neighbor.x] = 2 draw() return False
/** * Created by afalko on 10/19/17. */ public class TestCommon { private static final Logger log = LoggerFactory.getLogger(TestCommon.class); public static final List<String> ORGS = Arrays.asList( "dockerfile-image-update-itest", "dockerfile-image-update-itest-2", "dockerfile-image-update-itest-3"); public static void initializeRepos(GHOrganization org, List<String> repos, String image, List<GHRepository> createdRepos, GitHubUtil gitHubUtil) throws Exception { for (String repoName : repos) { GHRepository repo = org.createRepository(repoName) .description("Delete if this exists. If it exists, then an integration test crashed somewhere.") .private_(false) .create(); // Ensure that repository exists for (int attempts = 0; attempts < 5; attempts++) { try { repo = gitHubUtil.getRepo(repo.getFullName()); break; } catch (Exception e) { log.info("Waiting for {} to be created", repo.getFullName()); Thread.sleep(TimeUnit.SECONDS.toMillis(1)); } } repo.createContent("FROM " + image + ":test", "Integration Testing", "Dockerfile"); createdRepos.add(repo); log.info("Initializing {}/{}", org.getLogin(), repoName); gitHubUtil.tryRetrievingContent(repo, "Dockerfile", repo.getDefaultBranch()); } } public static void printCollectedExceptionsAndFail(List<Exception> exceptions, boolean exitWithFail) { for (int i = 0; i < exceptions.size(); i++) { log.error("Hit exception {}/{} while cleaning up.", i+1, exceptions.size()); log.error("", exceptions.get(i)); } if (exitWithFail && exceptions.size() > 0) { throw new RuntimeException(exceptions.get(0)); } } public static void cleanAllRepos(List<GHRepository> createdRepos, boolean exitWithFail) throws Exception { List<Exception> exceptions = new ArrayList<>(); exceptions.addAll(checkAndDelete(createdRepos)); TestCommon.printCollectedExceptionsAndFail(exceptions, false); } private static Exception checkAndDelete(GHRepository repo) { log.info("deleting {}", repo.getFullName()); try { repo.delete(); } catch (Exception e) { return e; } return null; } private static List<Exception> checkAndDelete(List<GHRepository> repos) throws IOException { List<Exception> exceptions = new ArrayList<>(); for (GHRepository repo : repos) { PagedIterable<GHRepository> forks; try { forks = repo.listForks(); for (GHRepository fork : forks) { Exception forkDeleteException = checkAndDelete(fork); if (forkDeleteException != null) { exceptions.add(forkDeleteException); } } } catch (Exception getForksException) { log.error("Could not get forks for repo: ", repo.getFullName()); exceptions.add(getForksException); } Exception repoDeleteException = checkAndDelete(repo); if (repoDeleteException != null) { exceptions.add(repoDeleteException); } } return exceptions; } public static void cleanBefore(List<String> repos, List<String> duplicatesCreatedByGithub, String storeName, GitHub github) throws Exception { checkAndDeleteBefore(repos, storeName, github); checkAndDeleteBefore(duplicatesCreatedByGithub, storeName, github); } private static void checkAndDeleteBefore(List<String> repoNames, String storeName, GitHub github) throws IOException, InterruptedException { String user = github.getMyself().getLogin(); for (String repoName : repoNames) { for (String org : ORGS) { checkAndDeleteBefore(Paths.get(user, repoName).toString(), github); checkAndDeleteBefore(Paths.get(org, repoName).toString(), github); } } checkAndDeleteBefore(Paths.get(user, storeName).toString(), github); } public static void checkAndDeleteBefore(String repoName, GitHub github) throws IOException, InterruptedException { GHRepository repo; try { repo = github.getRepository(repoName); } catch (FileNotFoundException fileNotFoundException) { return; } repo.delete(); // Make sure the repo is actually deleted for (int attempts = 0; attempts < 60; attempts++) { try { github.getRepository(repoName); } catch (FileNotFoundException fileNotFoundException) { return; } log.info("Waiting for {} to fully delete...", repoName); Thread.sleep(TimeUnit.SECONDS.toMillis(1)); } throw new FileNotFoundException(String.format("Unable to pre-delete repository %s during pre-test cleanup", repoName)); } public static void addVersionStoreRepo(GitHub github, List<GHRepository> createdRepos, String storeName) throws IOException { String login = github.getMyself().getLogin(); GHRepository storeRepo = github.getRepository(Paths.get(login, storeName).toString()); createdRepos.add(storeRepo); } }
<reponame>e23z/papyro import React, { FunctionComponent } from 'react'; import { FormGroup, Label } from 'reactstrap'; import RichTextEditor, { EditorValue } from 'react-rte'; import '../../styles/css/wysiwyg.css'; import { ContentBlock, EditorState, RichUtils, ContentState } from 'draft-js'; import { FontAwesomeIcon } from '@fortawesome/react-fontawesome'; import { faAlignCenter, faAlignRight, faAlignLeft, faAlignJustify, faParagraph } from '@fortawesome/free-solid-svg-icons'; type GetControlState = (key: string) => string | undefined; type SetControlState = (key: string, value: string) => void; /** * @const * @description The default configuration for all wysiwyg editors. */ const EditorConfigs: any = { display: [ 'INLINE_STYLE_BUTTONS', 'BLOCK_TYPE_BUTTONS', 'LINK_BUTTONS', 'IMAGE_BUTTON', 'BLOCK_TYPE_DROPDOWN', 'HISTORY_BUTTONS' ], INLINE_STYLE_BUTTONS: [ { label: 'Bold', style: 'BOLD' }, { label: 'Italic', style: 'ITALIC' }, { label: 'Underline', style: 'UNDERLINE' }, { label: 'StrikeThrough', style: 'STRIKETHROUGH' } ], BLOCK_TYPE_DROPDOWN: [ { label: 'Normal', style: 'unstyled' }, { label: 'Heading Large', style: 'header-one' }, { label: 'Heading Medium', style: 'header-two' }, { label: 'Heading Small', style: 'header-three' } ], BLOCK_TYPE_BUTTONS: [ { label: 'UL', style: 'unordered-list-item' }, { label: 'OL', style: 'ordered-list-item' }, { label: 'Blockquote', style: 'blockquote' } ] }; const EditorBasicConfigs: any = { display: [ 'INLINE_STYLE_BUTTONS', 'LINK_BUTTONS', 'HISTORY_BUTTONS' ], INLINE_STYLE_BUTTONS: [ { label: 'Bold', style: 'BOLD' }, { label: 'Italic', style: 'ITALIC' }, { label: 'Underline', style: 'UNDERLINE' }, { label: 'StrikeThrough', style: 'STRIKETHROUGH' } ] }; const textCenterRef = React.createRef() as any; const textRightRef = React.createRef() as any; const textLeftRef = React.createRef() as any; export class RichEditor { static createEmpty() { return RichTextEditor.createEmptyValue(); } static createFromHtmlString(htmlString: string): EditorValue { return RichTextEditor.createValueFromString(htmlString, 'html', { customBlockFn: (el: Element): any => { switch ((el as HTMLElement).style.textAlign) { case 'left': return { data: { textAlign: 'left' } }; case 'center': return { data: { textAlign: 'center' } }; case 'right': return { data: { textAlign: 'right' } }; case 'justify': return { data: { textAlign: 'justify' } }; } } }); } static valueToHtml(value: EditorValue): string { return value.toString('html', { // blockRenderers: { // 'TEXTCENTER': (block: ContentBlock) => { // return '<div class="text-center">' + block.getText() + '</div>'; // } // }, blockStyleFn: (block: ContentBlock): any => { const align = block.getData().get('textAlign'); switch (align) { case 'left': return { style: { textAlign: 'left' } }; case 'center': return { style: { textAlign: 'center' } }; case 'right': return { style: { textAlign: 'right' } }; case 'justify': return { style: { textAlign: 'justify' } }; } } }); } } const WYSIWYGEnhancer = (props: any) => { const children = React.cloneElement(props.children, { // blockRendererFn: (block: any): any => { // const type = block.getType(); // if (type === 'TEXTCENTER') { // return { // component: (props: any) => { // return <div className='text-center'>{block.getText()}</div>; // } // }; // } // } }); return <>{children}</>; } const blockStyleFn = (block: ContentBlock): string | undefined => { const align = block.getData().get('textAlign'); switch (align) { case 'left': return 'text-left'; case 'center': return 'text-center'; case 'right': return 'text-right'; case 'justify': return 'text-justify'; } return ''; } const toggleBtnStates = (align: string | undefined) => { if (textCenterRef.current) { if (align === 'center') textCenterRef.current.classList.add('active'); else textCenterRef.current.classList.remove('active'); } if (textRightRef.current) { if (align === 'right') textRightRef.current.classList.add('active'); else textRightRef.current.classList.remove('active'); } if (textLeftRef.current) { if (align === 'left' || !align) textLeftRef.current.classList.add('active'); else textLeftRef.current.classList.remove('active'); } }; const btnApplyStyle = ( e: React.MouseEvent<HTMLButtonElement, MouseEvent>, align: string, state: EditorState, onChange: (value: EditorValue) => any ): void => { const content = state.getCurrentContent(); const blockKey = state.getSelection().getStartKey(); const block = content.getBlockForKey(blockKey); const blockData = block.getData(); let newBlockData; if (blockData.get('textAlign') === align) newBlockData = blockData.remove('textAlign'); else newBlockData = blockData.set('textAlign', align); const newBlock = block.set('data', newBlockData) as ContentBlock; const newContent = content.merge({ blockMap: content.getBlockMap().set(blockKey, newBlock) }) as ContentState; const newState = EditorState.push(state, newContent, 'change-block-data'); toggleBtnStates(align); onChange(EditorValue.createFromState(newState)); e.stopPropagation(); e.preventDefault(); }; const BREAKPOINT = (set: SetControlState, get: GetControlState, state: EditorState) => ( <div key={4}></div> ); const TEXTCENTER = (onChange: (value: EditorValue) => any) => (set: SetControlState, get: GetControlState, state: EditorState) => { return <button ref={textCenterRef} className='custom-btn text-align-btn' key={0} onClick={e => btnApplyStyle(e, 'center', state, onChange)}> <FontAwesomeIcon icon={faAlignCenter} /> </button>; }; const TEXTRIGHT = (onChange: (value: EditorValue) => any) => (set: SetControlState, get: GetControlState, state: EditorState) => { return <button ref={textRightRef} className='custom-btn text-align-btn text-align-btn-last' key={1} onClick={e => btnApplyStyle(e, 'right', state, onChange)}> <FontAwesomeIcon icon={faAlignRight} /> </button>; }; const TEXTLEFT = (onChange: (value: EditorValue) => any) => (set: SetControlState, get: GetControlState, state: EditorState) => { return <button ref={textLeftRef} className='custom-btn text-align-btn text-align-btn-first' key={2} onClick={e => btnApplyStyle(e, 'left', state, onChange)}> <FontAwesomeIcon icon={faAlignLeft} /> </button>; }; const TEXTJUSTIFY = (onChange: (value: EditorValue) => any) => (set: SetControlState, get: GetControlState, state: EditorState) => { return <button className='custom-btn text-align-btn text-align-btn-last' key={3} onClick={e => btnApplyStyle(e, 'justify', state, onChange)}> <FontAwesomeIcon icon={faAlignJustify} /> </button>; }; const BREAKLINE = (onChange: (value: EditorValue) => any) => (set: SetControlState, get: GetControlState, state: EditorState) => { return <button className='custom-btn' key={5} onClick={e => { onChange(EditorValue.createFromState(RichUtils.insertSoftNewline(state))); e.stopPropagation(); e.preventDefault(); }}> <FontAwesomeIcon icon={faParagraph} /> </button>; }; /** * @interface WYSIWYGProps * @description WYSIWYG component props */ interface WYSIWYGProps { value: EditorValue; onChange: (value: EditorValue) => any; basicControls?: boolean; title: string; required?: boolean; } /** * @function WYSIWYG * @description Creates a WYSIWYG editor. */ const WYSIWYG: FunctionComponent<WYSIWYGProps> = props => ( <FormGroup className='flex-fill'> <Label for='content' className={props.required ? 'required' : ''}>{props.title}</Label> <WYSIWYGEnhancer> <RichTextEditor customControls={ props.basicControls ? [] : [ BREAKPOINT, TEXTLEFT(props.onChange), TEXTCENTER(props.onChange), TEXTRIGHT(props.onChange), // TEXTJUSTIFY(props.onChange) BREAKLINE(props.onChange) ]} blockStyleFn={blockStyleFn} toolbarConfig={props.basicControls ? EditorBasicConfigs : EditorConfigs} editorClassName='wysiwyg-editor' value={props.value} onChange={value => { const state = value.getEditorState(); const block = state.getCurrentContent().getBlockForKey(state.getSelection().getStartKey()); const textAlign = block.getData().get('textAlign'); toggleBtnStates(textAlign); // console.log(value.toString('html')); // console.log(RichEditor.valueToHtml(value)); props.onChange(value); }} /> </WYSIWYGEnhancer> </FormGroup> ); export default WYSIWYG;
/** * Refreshs the string that holds the serialised JSON object of the function description. This method should be called after * every manipulation of the function description (to keep the states consistent). */ private void refreshFunctionDescriptionString() { if (functionDescription == null) { functionDescriptionString = null; return; } functionDescriptionString = functionDescription.toString().getBytes(Charsets.UTF_8); }
<reponame>budavariam/advent_of_code<filename>2021/18_2/solution_test.py<gh_stars>0 """ Advent of code 2021 day 18 / 2 """ import unittest from solution import solution class MyTest(unittest.TestCase): """Unist tests for actual day""" def test_basic(self): """ Test from the task """ self.assertEqual(solution("""[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]] [[[5,[2,8]],4],[5,[[9,9],0]]] [6,[[[6,2],[5,6]],[[7,6],[4,7]]]] [[[6,[0,7]],[0,9]],[4,[9,[9,0]]]] [[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]] [[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]] [[[[5,4],[7,7]],8],[[8,3],8]] [[9,3],[[9,9],[6,[4,9]]]] [[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]] [[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]"""), 3993) if __name__ == '__main__': unittest.main()
<gh_stars>0 /***********************************/ /* SPICE Modeling for VPR */ /* <NAME>, EPFL/LSI */ /***********************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <assert.h> #include <sys/stat.h> #include <unistd.h> /* Include vpr structs*/ #include "util.h" #include "physical_types.h" #include "vpr_types.h" #include "globals.h" #include "rr_graph_util.h" #include "rr_graph.h" #include "rr_graph2.h" #include "vpr_utils.h" /* Include spice support headers*/ #include "linkedlist.h" #include "fpga_spice_globals.h" #include "spice_globals.h" #include "fpga_spice_utils.h" #include "spice_utils.h" #include "spice_routing.h" #include "spice_subckt.h" /* Global parameters */ static int num_segments; static t_segment_inf* segments; static int testbench_load_cnt = 0; static int upbound_sim_num_clock_cycles = 2; static int max_sim_num_clock_cycles = 2; static int auto_select_max_sim_num_clock_cycles = TRUE; static void init_spice_routing_testbench_globals(t_spice spice) { auto_select_max_sim_num_clock_cycles = spice.spice_params.meas_params.auto_select_sim_num_clk_cycle; upbound_sim_num_clock_cycles = spice.spice_params.meas_params.sim_num_clock_cycle + 1; if (FALSE == auto_select_max_sim_num_clock_cycles) { max_sim_num_clock_cycles = spice.spice_params.meas_params.sim_num_clock_cycle + 1; } else { max_sim_num_clock_cycles = 2; } } static void fprint_spice_cb_testbench_global_ports(FILE* fp, t_spice spice) { /* Declare the global SRAM ports */ fprintf(fp, ".global %s\n", spice_tb_global_vdd_cb_sram_port_name); return; } static void fprint_spice_sb_testbench_global_ports(FILE* fp, t_spice spice) { /* Declare the global SRAM ports */ fprintf(fp, ".global %s\n", spice_tb_global_vdd_sb_sram_port_name); return; } static void fprint_spice_routing_testbench_global_ports(FILE* fp, t_spice spice) { /* A valid file handler*/ if (NULL == fp) { vpr_printf(TIO_MESSAGE_ERROR,"(FILE:%s,LINE[%d])Invalid File Handler!\n",__FILE__, __LINE__); exit(1); } /* Print generic global ports*/ fprint_spice_generic_testbench_global_ports(fp, sram_spice_orgz_info, global_ports_head); return; } static void fprintf_spice_routing_testbench_generic_stimuli(FILE* fp, int num_clocks) { /* Give global vdd, gnd, voltage sources*/ /* A valid file handler */ if (NULL == fp) { vpr_printf(TIO_MESSAGE_ERROR, "(File:%s, [LINE%d])Invalid File Handler!\n", __FILE__, __LINE__); exit(1); } /* Print generic stimuli */ fprint_spice_testbench_generic_global_ports_stimuli(fp, num_clocks); /* Generate global ports stimuli */ fprint_spice_testbench_global_ports_stimuli(fp, global_ports_head); /* SRAM ports */ fprintf(fp, "***** Global Inputs for SRAMs *****\n"); fprint_spice_testbench_global_sram_inport_stimuli(fp, sram_spice_orgz_info); fprintf(fp, "***** Global VDD for SRAMs *****\n"); fprint_spice_testbench_global_vdd_port_stimuli(fp, spice_tb_global_vdd_sram_port_name, "vsp"); fprintf(fp, "***** Global VDD for load inverters *****\n"); fprint_spice_testbench_global_vdd_port_stimuli(fp, spice_tb_global_vdd_load_port_name, "vsp"); return; } /** In a testbench, we call the subckt of defined connection box (cbx[x][y] or cby[x][y]) * For each input of connection box (channel track rr_nodes), * we find their activities and generate input voltage pulses. * For each output of connection box, we add all the non-inverter downstream components as load. */ static int fprint_spice_routing_testbench_call_one_cb_tb(FILE* fp, t_spice spice, t_rr_type chan_type, int x, int y, t_ivec*** LL_rr_node_indices) { int itrack, inode, side, ipin_height; int side_cnt = 0; int used = 0; t_cb cur_cb_info; float input_density; float input_probability; int input_init_value; float average_cb_input_density = 0.; int avg_density_cnt = 0; int num_sim_clock_cycles = 0; /* Check the file handler*/ if (NULL == fp) { vpr_printf(TIO_MESSAGE_ERROR,"(File:%s,[LINE%d])Invalid file handler.\n", __FILE__, __LINE__); exit(1); } /* Check */ assert((!(0 > x))&&(!(x > (nx + 1)))); assert((!(0 > y))&&(!(y > (ny + 1)))); /* call the defined switch block sb[x][y]*/ fprintf(fp, "***** Call defined Connection Box[%d][%d] *****\n", x, y); switch(chan_type) { case CHANX: cur_cb_info = cbx_info[x][y]; break; case CHANY: cur_cb_info = cby_info[x][y]; break; default: vpr_printf(TIO_MESSAGE_ERROR, "(File:%s, [LINE%d])Invalid type of channel!\n", __FILE__, __LINE__); exit(1); } fprint_call_defined_one_connection_box(fp, cur_cb_info); /* Print input voltage pulses */ /* connect to the mid point of a track*/ side_cnt = 0; for (side = 0; side < cur_cb_info.num_sides; side++) { /* Bypass side with zero channel width */ if (0 == cur_cb_info.chan_width[side]) { continue; } assert (0 < cur_cb_info.chan_width[side]); side_cnt++; for (itrack = 0; itrack < cur_cb_info.chan_width[side]; itrack++) { /* Add input voltage pulses*/ input_density = get_rr_node_net_density(*cur_cb_info.chan_rr_node[side][itrack]); input_probability = get_rr_node_net_probability(*cur_cb_info.chan_rr_node[side][itrack]); input_init_value = get_rr_node_net_init_value(*cur_cb_info.chan_rr_node[side][itrack]); fprintf(fp, "***** Signal %s[%d][%d]_midout[%d] density = %g, probability=%g.*****\n", convert_chan_type_to_string(cur_cb_info.type), cur_cb_info.x, cur_cb_info.y, itrack, input_density, input_probability); fprintf(fp, "V%s[%d][%d]_midout[%d] %s[%d][%d]_midout[%d] 0 \n", convert_chan_type_to_string(cur_cb_info.type), cur_cb_info.x, cur_cb_info.y, itrack, convert_chan_type_to_string(cur_cb_info.type), cur_cb_info.x, cur_cb_info.y, itrack); fprint_voltage_pulse_params(fp, input_init_value, input_density, input_probability); /* Update statistics */ average_cb_input_density += input_density; if (0. < input_density) { avg_density_cnt++; } } } /*check side_cnt */ assert(1 == side_cnt); /* Add loads */ side_cnt = 0; /* Print the ports of grids*/ /* only check ipin_rr_nodes of cur_cb_info */ for (side = 0; side < cur_cb_info.num_sides; side++) { /* Bypass side with zero IPINs*/ if (0 == cur_cb_info.num_ipin_rr_nodes[side]) { continue; } side_cnt++; assert(0 < cur_cb_info.num_ipin_rr_nodes[side]); assert(NULL != cur_cb_info.ipin_rr_node[side]); for (inode = 0; inode < cur_cb_info.num_ipin_rr_nodes[side]; inode++) { /* Print each INPUT Pins of a grid */ ipin_height = get_grid_pin_height(cur_cb_info.ipin_rr_node[side][inode]->xlow, cur_cb_info.ipin_rr_node[side][inode]->ylow, cur_cb_info.ipin_rr_node[side][inode]->ptc_num); if (TRUE == run_testbench_load_extraction) { /* Additional switch, default on! */ fprint_spice_testbench_one_grid_pin_loads(fp, cur_cb_info.ipin_rr_node[side][inode]->xlow, cur_cb_info.ipin_rr_node[side][inode]->ylow, ipin_height, cur_cb_info.ipin_rr_node_grid_side[side][inode], cur_cb_info.ipin_rr_node[side][inode]->ptc_num, &testbench_load_cnt, LL_rr_node_indices); } fprintf(fp, "\n"); /* Get signal activity */ input_density = get_rr_node_net_density(*cur_cb_info.ipin_rr_node[side][inode]); input_probability = get_rr_node_net_probability(*cur_cb_info.ipin_rr_node[side][inode]); input_init_value = get_rr_node_net_init_value(*cur_cb_info.ipin_rr_node[side][inode]); /* Update statistics */ average_cb_input_density += input_density; if (0. < input_density) { avg_density_cnt++; } } } /* Make sure only 2 sides of IPINs are printed */ assert((1== side_cnt)||(2 == side_cnt)); /* Voltage stilumli */ /* Connect to VDD supply */ fprintf(fp, "***** Voltage supplies *****\n"); switch(chan_type) { case CHANX: /* Connect to VDD supply */ fprintf(fp, "***** Voltage supplies *****\n"); fprintf(fp, "Vgvdd_cb[%d][%d] gvdd_cbx[%d][%d] 0 vsp\n", x, y, x, y); break; case CHANY: /* Connect to VDD supply */ fprintf(fp, "***** Voltage supplies *****\n"); fprintf(fp, "Vgvdd_cb[%d][%d] gvdd_cby[%d][%d] 0 vsp\n", x, y, x, y); break; default: vpr_printf(TIO_MESSAGE_ERROR, "(File:%s, [LINE%d])Invalid type of channel!\n", __FILE__, __LINE__); exit(1); } /* SRAM Voltage stimulit */ fprintf(fp, "V%s %s 0 vsp\n", spice_tb_global_vdd_cb_sram_port_name, spice_tb_global_vdd_cb_sram_port_name); /* Calculate the num_sim_clock_cycle for this MUX, update global max_sim_clock_cycle in this testbench */ if (0 < avg_density_cnt) { average_cb_input_density = average_cb_input_density/avg_density_cnt; num_sim_clock_cycles = (int)(1/average_cb_input_density) + 1; used = 1; } else { assert(0 == avg_density_cnt); average_cb_input_density = 0.; num_sim_clock_cycles = 2; used = 0; } if (TRUE == auto_select_max_sim_num_clock_cycles) { /* for idle blocks, 2 clock cycle is well enough... */ if (2 < num_sim_clock_cycles) { num_sim_clock_cycles = upbound_sim_num_clock_cycles; } else { num_sim_clock_cycles = 2; } if (max_sim_num_clock_cycles < num_sim_clock_cycles) { max_sim_num_clock_cycles = num_sim_clock_cycles; } } else { num_sim_clock_cycles = max_sim_num_clock_cycles; } /* Measurements */ fprint_spice_netlist_transient_setting(fp, spice, num_sim_clock_cycles, FALSE); fprint_spice_netlist_generic_measurements(fp, spice.spice_params.mc_params, spice.num_spice_model, spice.spice_models); /* Measure the delay of MUX */ fprintf(fp, "***** Measurements *****\n"); /* Measure the leakage power of MUX */ fprintf(fp, "***** Leakage Power Measurement *****\n"); fprintf(fp, ".meas tran leakage_power_cb avg p(Vgvdd_cb[%d][%d]) from=0 to='clock_period'\n", x, y); /* Measure the leakage power of SRAMs */ fprintf(fp, ".meas tran leakage_power_sram_cb avg p(V%s) from=0 to='clock_period'\n", spice_tb_global_vdd_cb_sram_port_name); /* Measure the dynamic power of MUX */ fprintf(fp, "***** Dynamic Power Measurement *****\n"); fprintf(fp, ".meas tran dynamic_power_cb avg p(Vgvdd_cb[%d][%d]) from='clock_period' to='%d*clock_period'\n", x, y, num_sim_clock_cycles); fprintf(fp, ".meas tran energy_per_cycle_cb param='dynamic_power_cb*clock_period'\n"); /* Measure the dynamic power of SRAMs */ fprintf(fp, ".meas tran dynamic_power_sram_cb avg p(V%s) from='clock_period' to='%d*clock_period'\n", spice_tb_global_vdd_cb_sram_port_name, num_sim_clock_cycles); fprintf(fp, ".meas tran energy_per_cycle_sram_cb param='dynamic_power_sram_cb*clock_period'\n"); /* print average cb input density */ switch(chan_type) { case CHANX: /* vpr_printf(TIO_MESSAGE_INFO,"Average density of CBX[%d][%d] inputs is %.2g.\n", x, y, average_cb_input_density); */ break; case CHANY: /* vpr_printf(TIO_MESSAGE_INFO,"Average density of CBY[%d][%d] inputs is %.2g.\n", x, y, average_cb_input_density); */ break; default: vpr_printf(TIO_MESSAGE_ERROR, "(File:%s, [LINE%d])Invalid type of channel!\n", __FILE__, __LINE__); exit(1); } return used; } /** In a testbench, we call the subckt of a defined switch block (sb[x][y]) * For each input of switch block, we find their activities and generate input voltage pulses. * For each output of switch block, we add all the non-inverter downstream components as load. */ static int fprint_spice_routing_testbench_call_one_sb_tb(FILE* fp, t_spice spice, int x, int y, t_ivec*** LL_rr_node_indices) { int itrack, inode, side, ipin_height, ix, iy; int used = 0; t_sb cur_sb_info; char* outport_name = NULL; char* rr_node_outport_name = NULL; float input_density; float input_probability; int input_init_value; float average_sb_input_density = 0.; int avg_density_cnt = 0; int num_sim_clock_cycles = 0; /* Check the file handler*/ if (NULL == fp) { vpr_printf(TIO_MESSAGE_ERROR,"(File:%s,[LINE%d])Invalid file handler.\n", __FILE__, __LINE__); exit(1); } /* Check */ assert((!(0 > x))&&(!(x > (nx + 1)))); assert((!(0 > y))&&(!(y > (ny + 1)))); /* call the defined switch block sb[x][y]*/ fprintf(fp, "***** Call defined Switch Box[%d][%d] *****\n", x, y); fprint_call_defined_one_switch_box(fp, sb_info[x][y]); cur_sb_info = sb_info[x][y]; /* For each input of switch block, we generate a input voltage pulse * For each output of switch block, we generate downstream loads */ /* Find all rr_nodes of channels */ for (side = 0; side < cur_sb_info.num_sides; side++) { determine_sb_port_coordinator(cur_sb_info, side, &ix, &iy); for (itrack = 0; itrack < cur_sb_info.chan_width[side]; itrack++) { /* Print voltage stimuli and loads */ switch (cur_sb_info.chan_rr_node_direction[side][itrack]) { case OUT_PORT: /* Output port requires loads*/ /* We should not add any loads to those outputs that are driven simply by a wire in this switch box! if (1 == is_sb_interc_between_segments(cur_sb_info.x, cur_sb_info.y, cur_sb_info.chan_rr_node[side][itrack], side)) { break; } */ /* Only consider the outputs that are driven by a multiplexer */ outport_name = (char*)my_malloc(sizeof(char)*( strlen(convert_chan_type_to_string(cur_sb_info.chan_rr_node[side][itrack]->type)) + 1 + strlen(my_itoa(cur_sb_info.x)) + 2 + strlen(my_itoa(cur_sb_info.y)) + 6 + strlen(my_itoa(itrack)) + 1 + 1)); sprintf(outport_name, "%s[%d][%d]_out[%d]", convert_chan_type_to_string(cur_sb_info.chan_rr_node[side][itrack]->type), ix, iy, itrack); if (TRUE == run_testbench_load_extraction) { /* Additional switch, default on! */ fprintf(fp, "**** Load for rr_node[%ld] *****\n", cur_sb_info.chan_rr_node[side][itrack] - rr_node); rr_node_outport_name = fprint_spice_testbench_rr_node_load_version(fp, &testbench_load_cnt, num_segments, segments, 0, *cur_sb_info.chan_rr_node[side][itrack], outport_name); } /* Free */ my_free(rr_node_outport_name); break; case IN_PORT: /* Get signal activity */ input_density = get_rr_node_net_density(*cur_sb_info.chan_rr_node[side][itrack]); input_probability = get_rr_node_net_probability(*cur_sb_info.chan_rr_node[side][itrack]); input_init_value = get_rr_node_net_init_value(*cur_sb_info.chan_rr_node[side][itrack]); /* Update statistics */ average_sb_input_density += input_density; if (0. < input_density) { avg_density_cnt++; } /* Input port requires a voltage stimuli */ /* Add input voltage pulses*/ fprintf(fp, "***** Signal %s[%d][%d]_in[%d] density = %g, probability=%g.*****\n", convert_chan_type_to_string(cur_sb_info.chan_rr_node[side][itrack]->type), ix, iy, itrack, input_density, input_probability); fprintf(fp, "V%s[%d][%d]_in[%d] %s[%d][%d]_in[%d] 0 \n", convert_chan_type_to_string(cur_sb_info.chan_rr_node[side][itrack]->type), ix, iy, itrack, convert_chan_type_to_string(cur_sb_info.chan_rr_node[side][itrack]->type), ix, iy, itrack); fprint_voltage_pulse_params(fp, input_init_value, input_density, input_probability); break; default: vpr_printf(TIO_MESSAGE_ERROR, "(File: %s [LINE%d]) Invalid direction of sb[%d][%d] side[%d] track[%d]!\n", __FILE__, __LINE__, cur_sb_info.x, cur_sb_info.y, side, itrack); exit(1); } } /* OPINs of adjacent CLBs are inputs and requires a voltage stimuli */ /* Input port requires a voltage stimuli */ for (inode = 0; inode < cur_sb_info.num_opin_rr_nodes[side]; inode++) { /* Print voltage stimuli of each OPIN */ ipin_height = get_grid_pin_height(cur_sb_info.opin_rr_node[side][inode]->xlow, cur_sb_info.opin_rr_node[side][inode]->ylow, cur_sb_info.opin_rr_node[side][inode]->ptc_num); fprint_spice_testbench_one_grid_pin_stimulation(fp, cur_sb_info.opin_rr_node[side][inode]->xlow, cur_sb_info.opin_rr_node[side][inode]->ylow, ipin_height, cur_sb_info.opin_rr_node_grid_side[side][inode], cur_sb_info.opin_rr_node[side][inode]->ptc_num, LL_rr_node_indices); /* Get signal activity */ input_density = get_rr_node_net_density(*cur_sb_info.opin_rr_node[side][inode]); input_probability = get_rr_node_net_probability(*cur_sb_info.opin_rr_node[side][inode]); input_init_value = get_rr_node_net_init_value(*cur_sb_info.opin_rr_node[side][inode]); /* Update statistics */ average_sb_input_density += input_density; if (0. < input_density) { avg_density_cnt++; } } fprintf(fp, "\n"); } /* Connect to VDD supply */ fprintf(fp, "***** Voltage supplies *****\n"); fprintf(fp, "Vgvdd_sb[%d][%d] gvdd_sb[%d][%d] 0 vsp\n", x, y, x, y); /* SRAM Voltage stimulit */ fprintf(fp, "V%s %s 0 vsp\n", spice_tb_global_vdd_sb_sram_port_name, spice_tb_global_vdd_sb_sram_port_name); /* Calculate the num_sim_clock_cycle for this MUX, update global max_sim_clock_cycle in this testbench */ if (0 < avg_density_cnt) { average_sb_input_density = average_sb_input_density/avg_density_cnt; num_sim_clock_cycles = (int)(1/average_sb_input_density) + 1; used = 1; } else { assert(0 == avg_density_cnt); average_sb_input_density = 0.; num_sim_clock_cycles = 2; used = 0; } if (TRUE == auto_select_max_sim_num_clock_cycles) { /* for idle blocks, 2 clock cycle is well enough... */ if (2 < num_sim_clock_cycles) { num_sim_clock_cycles = upbound_sim_num_clock_cycles; } else { num_sim_clock_cycles = 2; } if (max_sim_num_clock_cycles < num_sim_clock_cycles) { max_sim_num_clock_cycles = num_sim_clock_cycles; } } else { num_sim_clock_cycles = max_sim_num_clock_cycles; } /* Measurements */ fprint_spice_netlist_transient_setting(fp, spice, num_sim_clock_cycles, FALSE); fprint_spice_netlist_generic_measurements(fp, spice.spice_params.mc_params, spice.num_spice_model, spice.spice_models); /* Measure the delay of MUX */ fprintf(fp, "***** Measurements *****\n"); /* Measure the leakage power of MUX */ fprintf(fp, "***** Leakage Power Measurement *****\n"); fprintf(fp, ".meas tran leakage_power_sb avg p(Vgvdd_sb[%d][%d]) from=0 to='clock_period'\n", x, y); /* Measure the leakage power of SRAMs */ fprintf(fp, ".meas tran leakage_power_sram_sb avg p(V%s) from=0 to='clock_period'\n", spice_tb_global_vdd_sb_sram_port_name); /* Measure the dynamic power of MUX */ fprintf(fp, "***** Dynamic Power Measurement *****\n"); fprintf(fp, ".meas tran dynamic_power_sb avg p(Vgvdd_sb[%d][%d]) from='clock_period' to='%d*clock_period'\n", x, y, num_sim_clock_cycles); fprintf(fp, ".meas tran energy_per_cycle_sb param='dynamic_power_sb*clock_period'\n"); /* Measure the dynamic power of SRAMs */ fprintf(fp, ".meas tran dynamic_power_sram_sb avg p(V%s) from='clock_period' to='%d*clock_period'\n", spice_tb_global_vdd_sb_sram_port_name, num_sim_clock_cycles); fprintf(fp, ".meas tran energy_per_cycle_sram_sb param='dynamic_power_sram_sb*clock_period'\n"); /* print average sb input density */ /* vpr_printf(TIO_MESSAGE_INFO,"Average density of SB[%d][%d] inputs is %.2g.\n", x, y, average_sb_input_density); */ /* Free */ return used; } int fprint_spice_one_cb_testbench(char* formatted_spice_dir, char* circuit_name, char* cb_testbench_name, char* include_dir_path, char* subckt_dir_path, t_ivec*** LL_rr_node_indices, int num_clocks, t_arch arch, int grid_x, int grid_y, t_rr_type cb_type, boolean leakage_only) { FILE* fp = NULL; char* formatted_subckt_dir_path = format_dir_path(subckt_dir_path); char* title = my_strcat("FPGA SPICE Connection Box Testbench Bench for Design: ", circuit_name); char* cb_testbench_file_path = my_strcat(formatted_spice_dir, cb_testbench_name); char* cb_tb_name = NULL; int used = 0; char* temp_include_file_path = NULL; /* one cbx, one cby*/ switch (cb_type) { case CHANX: cb_tb_name = "Connection Box X-channel "; temp_include_file_path = fpga_spice_create_one_subckt_filename(cbx_spice_file_name_prefix, grid_x, grid_y, spice_netlist_file_postfix); break; case CHANY: cb_tb_name = "Connection Box Y-channel "; temp_include_file_path = fpga_spice_create_one_subckt_filename(cby_spice_file_name_prefix, grid_x, grid_y, spice_netlist_file_postfix); break; default: vpr_printf(TIO_MESSAGE_ERROR, "(File:%s, [LINE%d]) Invalid connection_box_type!\n", __FILE__, __LINE__); exit(1); } /* Check if the path exists*/ fp = fopen(cb_testbench_file_path,"w"); if (NULL == fp) { vpr_printf(TIO_MESSAGE_ERROR,"(FILE:%s,LINE[%d])Failure in create SPICE %s Test bench netlist %s!\n", __FILE__, __LINE__, cb_tb_name, cb_testbench_file_path); exit(1); } /* Load global vars in this source file */ num_segments = arch.num_segments; segments = arch.Segments; testbench_load_cnt = 0; /* Print the title */ fprint_spice_head(fp, title); my_free(title); /* print technology library and design parameters*/ /* fprint_tech_lib(fp, arch.spice->tech_lib); */ /* Include parameter header files */ fprint_spice_include_param_headers(fp, include_dir_path); /* Include Key subckts */ fprint_spice_include_key_subckts(fp, formatted_subckt_dir_path); /* Include user-defined sub-circuit netlist */ init_include_user_defined_netlists(*(arch.spice)); fprint_include_user_defined_netlists(fp, *(arch.spice)); /* Print simulation temperature and other options for SPICE */ fprint_spice_options(fp, arch.spice->spice_params); /* Global nodes: Vdd for SRAMs, Logic Blocks(Include IO), Switch Boxes, Connection Boxes */ fprint_spice_routing_testbench_global_ports(fp, *(arch.spice)); fprint_spice_cb_testbench_global_ports(fp, *(arch.spice)); /* Quote defined Logic blocks subckts (Grids) */ init_spice_routing_testbench_globals(*(arch.spice)); /* one cbx, one cby*/ switch (cb_type) { case CHANX: case CHANY: /* Generate filename */ fprintf(fp, "****** Include subckt netlists: %s [%d][%d] *****\n", cb_tb_name, grid_x, grid_y); /* Check if we include an existing file! */ if (FALSE == check_subckt_file_exist_in_llist(routing_spice_subckt_file_path_head, my_strcat(formatted_subckt_dir_path, temp_include_file_path))) { vpr_printf(TIO_MESSAGE_ERROR,"(FILE:%s,LINE[%d])Intend to include a non-existed SPICE netlist %s!", __FILE__, __LINE__, temp_include_file_path); exit(1); } spice_print_one_include_subckt_line(fp, formatted_subckt_dir_path, temp_include_file_path); used = fprint_spice_routing_testbench_call_one_cb_tb(fp, *(arch.spice), cb_type, grid_x, grid_y, LL_rr_node_indices); break; default: vpr_printf(TIO_MESSAGE_ERROR, "(File:%s, [LINE%d]) Invalid connection_box_type!\n", __FILE__, __LINE__); exit(1); } /* Generate SPICE routing testbench generic stimuli*/ fprintf_spice_routing_testbench_generic_stimuli(fp, num_clocks); /* SPICE ends*/ fprintf(fp, ".end\n"); /* Close the file*/ fclose(fp); /* Push the testbench to the linked list */ tb_head = add_one_spice_tb_info_to_llist(tb_head, cb_testbench_file_path, max_sim_num_clock_cycles); used = 1; /* Free */ my_free(temp_include_file_path); return used; } int fprint_spice_one_sb_testbench(char* formatted_spice_dir, char* circuit_name, char* sb_testbench_name, char* include_dir_path, char* subckt_dir_path, t_ivec*** LL_rr_node_indices, int num_clocks, t_arch arch, int grid_x, int grid_y, boolean leakage_only) { FILE* fp = NULL; char* formatted_subckt_dir_path = format_dir_path(subckt_dir_path); char* title = my_strcat("FPGA SPICE Switch Block Testbench Bench for Design: ", circuit_name); char* sb_testbench_file_path = my_strcat(formatted_spice_dir, sb_testbench_name); char* sb_tb_name = NULL; int used = 0; char* temp_include_file_path = NULL; sb_tb_name = "Switch Block "; /* Check if the path exists*/ fp = fopen(sb_testbench_file_path,"w"); if (NULL == fp) { vpr_printf(TIO_MESSAGE_ERROR,"(FILE:%s,LINE[%d])Failure in create SPICE %s Test bench netlist %s!\n", __FILE__, __LINE__, sb_tb_name, sb_testbench_file_path); exit(1); } /* Load global vars in this source file */ num_segments = arch.num_segments; segments = arch.Segments; testbench_load_cnt = 0; /* Print the title */ fprint_spice_head(fp, title); my_free(title); /* print technology library and design parameters*/ /* Include parameter header files */ fprint_spice_include_param_headers(fp, include_dir_path); /* Include Key subckts */ fprint_spice_include_key_subckts(fp, formatted_subckt_dir_path); /* Include user-defined sub-circuit netlist */ init_include_user_defined_netlists(*(arch.spice)); fprint_include_user_defined_netlists(fp, *(arch.spice)); /* Print simulation temperature and other options for SPICE */ fprint_spice_options(fp, arch.spice->spice_params); /* Global nodes: Vdd for SRAMs, Logic Blocks(Include IO), Switch Boxes, Connection Boxes */ fprint_spice_routing_testbench_global_ports(fp, *(arch.spice)); fprint_spice_sb_testbench_global_ports(fp, *(arch.spice)); /* Quote defined Logic blocks subckts (Grids) */ init_spice_routing_testbench_globals(*(arch.spice)); /* Generate filename */ fprintf(fp, "****** Include subckt netlists: Switch Block[%d][%d] *****\n", grid_x, grid_y); temp_include_file_path = fpga_spice_create_one_subckt_filename(sb_spice_file_name_prefix, grid_x, grid_y, spice_netlist_file_postfix); /* Check if we include an existing file! */ if (FALSE == check_subckt_file_exist_in_llist(routing_spice_subckt_file_path_head, my_strcat(formatted_subckt_dir_path, temp_include_file_path))) { vpr_printf(TIO_MESSAGE_ERROR,"(FILE:%s,LINE[%d])Intend to include a non-existed SPICE netlist %s!", __FILE__, __LINE__, temp_include_file_path); exit(1); } spice_print_one_include_subckt_line(fp, formatted_subckt_dir_path, temp_include_file_path); used = fprint_spice_routing_testbench_call_one_sb_tb(fp, *(arch.spice), grid_x, grid_y, LL_rr_node_indices); /* Generate SPICE routing testbench generic stimuli*/ fprintf_spice_routing_testbench_generic_stimuli(fp, num_clocks); /* SPICE ends*/ fprintf(fp, ".end\n"); /* Close the file*/ fclose(fp); /* Push the testbench to the linked list */ tb_head = add_one_spice_tb_info_to_llist(tb_head, sb_testbench_file_path, max_sim_num_clock_cycles); used = 1; return used; } /* Top function: Generate testbenches for all Connection Boxes */ void spice_print_cb_testbench(char* formatted_spice_dir, char* circuit_name, char* include_dir_path, char* subckt_dir_path, t_ivec*** LL_rr_node_indices, int num_clocks, t_arch arch, boolean leakage_only) { char* cb_testbench_name = NULL; int ix, iy; int cnt = 0; int used = 0; /* X-channel Connection Blocks */ vpr_printf(TIO_MESSAGE_INFO,"Generating X-channel Connection Block testbench...\n"); for (iy = 0; iy < (ny+1); iy++) { for (ix = 1; ix < (nx+1); ix++) { /* Bypass non-exist CBs */ if ((FALSE == is_cb_exist(CHANX, ix, iy)) ||(0 == count_cb_info_num_ipin_rr_nodes(cbx_info[ix][iy]))) { continue; } cb_testbench_name = (char*)my_malloc(sizeof(char)*( strlen(circuit_name) + 4 + strlen(my_itoa(ix)) + 2 + strlen(my_itoa(iy)) + 1 + strlen(spice_cb_testbench_postfix) + 1 )); sprintf(cb_testbench_name, "%s_cbx%d_%d%s", circuit_name, ix, iy, spice_cb_testbench_postfix); used = fprint_spice_one_cb_testbench(formatted_spice_dir, circuit_name, cb_testbench_name, include_dir_path, subckt_dir_path, LL_rr_node_indices, num_clocks, arch, ix, iy, CHANX, leakage_only); if (1 == used) { cnt += used; } /* free */ my_free(cb_testbench_name); } } /* Y-channel Connection Blocks */ vpr_printf(TIO_MESSAGE_INFO,"Generating Y-channel Connection Block testbench...\n"); for (ix = 0; ix < (nx+1); ix++) { for (iy = 1; iy < (ny+1); iy++) { /* Bypass non-exist CBs */ if ((FALSE == is_cb_exist(CHANY, ix, iy)) ||(0 == count_cb_info_num_ipin_rr_nodes(cby_info[ix][iy]))) { continue; } cb_testbench_name = (char*)my_malloc(sizeof(char)*( strlen(circuit_name) + 4 + strlen(my_itoa(ix)) + 2 + strlen(my_itoa(iy)) + 1 + strlen(spice_cb_testbench_postfix) + 1 )); sprintf(cb_testbench_name, "%s_cby%d_%d%s", circuit_name, ix, iy, spice_cb_testbench_postfix); used = fprint_spice_one_cb_testbench(formatted_spice_dir, circuit_name, cb_testbench_name, include_dir_path, subckt_dir_path, LL_rr_node_indices, num_clocks, arch, ix, iy, CHANY, leakage_only); if (1 == used) { cnt += used; } /* free */ my_free(cb_testbench_name); } } /* Update the global counter */ num_used_cb_tb = cnt; vpr_printf(TIO_MESSAGE_INFO,"No. of generated Connection Block testbench = %d\n", num_used_cb_tb); return; } /* Top function: Generate testbenches for all Switch Blocks */ void spice_print_sb_testbench(char* formatted_spice_dir, char* circuit_name, char* include_dir_path, char* subckt_dir_path, t_ivec*** LL_rr_node_indices, int num_clocks, t_arch arch, boolean leakage_only) { char* sb_testbench_name = NULL; int ix, iy; int cnt = 0; int used = 0; vpr_printf(TIO_MESSAGE_INFO,"Generating Switch Block testbench...\n"); for (ix = 0; ix < (nx+1); ix++) { for (iy = 0; iy < (ny+1); iy++) { sb_testbench_name = (char*)my_malloc(sizeof(char)*( strlen(circuit_name) + 4 + strlen(my_itoa(ix)) + 2 + strlen(my_itoa(iy)) + 1 + strlen(spice_sb_testbench_postfix) + 1 )); sprintf(sb_testbench_name, "%s_sb%d_%d%s", circuit_name, ix, iy, spice_sb_testbench_postfix); used = fprint_spice_one_sb_testbench(formatted_spice_dir, circuit_name, sb_testbench_name, include_dir_path, subckt_dir_path, LL_rr_node_indices, num_clocks, arch, ix, iy, leakage_only); if (1 == used) { cnt += used; } /* free */ my_free(sb_testbench_name); } } /* Update the global counter */ num_used_sb_tb = cnt; vpr_printf(TIO_MESSAGE_INFO,"No. of generated Switch Block testbench = %d\n", num_used_sb_tb); return; }
<filename>src/types.ts import { Index } from 'parsimmon'; export type Location = { start: Index, end: Index } function defaultLocation(): Location { return { start: { column: 1, offset: 0, line: 1 }, end: { column: 1, offset: 0, line: 1 }, }; } type WithKind<T, Kind extends string> = T & { kind: Kind } export const DICE_MAX = 'DICE MAX'; export const DICE_MIN = 'DICE_MIN'; export type ModifierOperator = '=' | '<' | '>'; export type BinaryOperator = '+' | '-' | '*' | '/' | '**' | '%' interface DiceAttrs extends DiceModifiers { diceSides: number[] | Expression; noDice: number | Expression; } export interface DiceModifiers { success?: { op: ModifierOperator, number: number }; failure?: { op: ModifierOperator, number: number }; exploding?: { op: ModifierOperator, number: number | typeof DICE_MAX }; compounding?: { op: ModifierOperator, number: number | typeof DICE_MAX }; penetrating?: { op: ModifierOperator, number: number | typeof DICE_MAX }; keep?: { number: number, direction: 'h' | 'l' }; drop?: { number: number, direction: 'h' | 'l' }; rerollOnce?: { op: ModifierOperator, number: number | typeof DICE_MIN }; reroll?: ({ op: ModifierOperator, number: number | typeof DICE_MIN })[]; sort?: ({ direction: 'a' | 'd' }); } export type EDice = WithKind<DiceAttrs, 'dice'> & { loc: Location } export function dice(dice: DiceAttrs & { loc?: Location }): EDice { return { kind: 'dice', ...dice, loc: dice.loc || defaultLocation(), }; } interface NumberAttrs { value: number; } export type ENumber = WithKind<NumberAttrs, 'number'> & { loc: Location } export function number(num: NumberAttrs & { loc?: Location } | number): ENumber { if (typeof num === 'number') { return number({ value: num }); } return { kind: 'number', ...num, loc: num.loc || defaultLocation(), }; } interface BinExpressionAttrs { op: BinaryOperator, lhs: Expression, rhs: Expression, } export type BinExpression = WithKind<BinExpressionAttrs, 'binExpression'> & { loc: Location } export function binExpression(expr: BinExpressionAttrs & { loc?: Location }): BinExpression { return { kind: 'binExpression', ...expr, loc: expr.loc || defaultLocation(), }; } interface FuncExpressionAttrs { func: string, arg: Expression, } export type FuncExpression = WithKind<FuncExpressionAttrs, 'funcExpression'> & { loc: Location } export function funcExpression(expr: FuncExpressionAttrs & { loc?: Location }): FuncExpression { return { kind: 'funcExpression', ...expr, loc: expr.loc || defaultLocation(), }; } export interface DiceGroupModifiers { success?: { op: ModifierOperator, number: number }; failure?: { op: ModifierOperator, number: number }; keep?: { number: number, direction: 'h' | 'l' }; drop?: { number: number, direction: 'h' | 'l' }; } interface DiceGroupAttrs extends DiceGroupModifiers { elements: Expression[]; } export type DiceGroup = WithKind<DiceGroupAttrs, 'diceGroup'> & { loc: Location }; export function diceGroup(expr: DiceGroupAttrs & { loc?: Location }): DiceGroup { return { kind: 'diceGroup', ...expr, loc: expr.loc || defaultLocation(), }; } export type Expression = | EDice | ENumber | BinExpression | DiceGroup | FuncExpression
// A Target allows custom selection of colours in a Palette's generation class Target { private: void setDefaultWeights(); void setTargetDefaultValues(std::vector<float> &); protected: bool isExclusive_; std::vector<float> lightnessTargets; std::vector<float> saturationTargets; std::vector<float> weights; void setDefaultLightLightnessValues(); void setDefaultNormalLightnessValues(); void setDefaultDarkLightnessValues(); void setDefaultVibrantSaturationValues(); void setDefaultMutedSaturationValues(); public: Target(); Target(const Target &); Target(const Target *); float getMinimumSaturation() const; float getTargetSaturation() const; float getMaximumSaturation() const; float getSaturationWeight() const; float getMinimumLightness() const; float getTargetLightness() const; float getMaximumLightness() const; float getLightnessWeight() const; float getPopulationWeight() const; bool isExclusive() const; void normalizeWeights(); class Builder { private: Target * target; public: Builder(); Builder(Target &); Builder & setMinimumSaturation(float); Builder & setTargetSaturation(float); Builder & setMaximumSaturation(float); Builder & setSaturationWeight(float); Builder & setMinimumLightness(float); Builder & setTargetLightness(float); Builder & setMaximumLightness(float); Builder & setLightnessWeight(float); Builder & setPopulationWeight(float); Builder & setExclusive(bool); Target build(); ~Builder(); }; bool operator==(const Target) const; }
/** * Initializes all the REST services for this agent (worker/coordinator). * <p> * If no port can be determined to listen on, an exception will be thrown * </p> * * @param ms This is the scheduler that is in charge of managing microservice REST calls * @param prop This is a properties object with all runtime properties of the agent * (worker/coord) */ public void init(CthulhuScheduler ms, Properties prop) { this.gson = new Gson(); this.ms = ms; LOGGER.info("Creating REST paths"); String sf = prop.getProperty("staticfiles"); workspace = prop.getProperty("workspace"); int port = Integer.parseInt(prop.getProperty("port")); setupRestCalls(sf, port); LOGGER.info("Ready!"); }
n=int(input()) a=list(map(int, input().split())) j, s, tot=0, 0, 0 for i in range(n): while j<n: if (s+a[j])==(s^a[j]): s+=a[j] j+=1 else: break tot+=(j-i) s-=a[i] print(tot)
#include <bits/stdc++.h> #define all(x) (x).begin(),(x).end() #define pb push_back #define fi first #define se second typedef unsigned long long ul; typedef long long ll; using namespace std; const int maxN = 100005; int n, a[maxN], b[maxN], poz[maxN], x; ll swaps = 0; void interclasare(int a[], int left, int mid, int right){ int i = left; int j = mid + 1; int index = left; while (i <= mid && j <= right){ if (a[i] < a[j]){ poz[index] = a[i]; index++; i++; } else{ poz[index] = a[j]; index++; j++; swaps += (ll) (mid - i + 1); } } while(i <= mid){ poz[index] = a[i]; index++; i++; } while(j <= right){ poz[index] = a[j]; index++; j++; } for (int i = left; i <= right; i++) a[i] = poz[i]; } void mergesort(int a[], int left, int right){ if (left < right){ int mid = left + (right - left) / 2; mergesort(a, left, mid); mergesort(a, mid + 1, right); interclasare(a, left, mid, right); } } int main(){ ios_base::sync_with_stdio(0); cin.tie(); cout.tie(); ifstream cin("permutariab.in"); ofstream cout("permutariab.out"); cin >> n; for (int i = 1; i <= n; i++){ cin >> a[i]; poz[a[i]] = i; } for (int i = 1; i <= n; i++){ cin >> x; b[i] = poz[x]; } mergesort(b, 1, n); cout << swaps; return 0; }
def process_accum(self, trail, target=None): if target is not None: vec = target else: vec = NP.zeros((len(self.namespace), self.veclen), dtype=NP.float32) for c in trail: poi_id = self.namespace.index(c['poi']) tickslot = self.get_timeslot(c['tick']) vec[poi_id, tickslot] += 1 return vec
// // Copyright (c) SAS Institute Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package xmldsig import ( "crypto" "crypto/ecdsa" "crypto/hmac" "crypto/rsa" "crypto/x509" "encoding/base64" "encoding/xml" "errors" "fmt" "math/big" "strings" "github.com/sassoftware/relic/v7/lib/x509tools" "github.com/sassoftware/relic/v7/signers/sigerrors" "github.com/beevik/etree" ) type Signature struct { PublicKey crypto.PublicKey Certificates []*x509.Certificate Hash crypto.Hash EncryptedDigest []byte Reference *etree.Element } func (s Signature) Leaf() *x509.Certificate { for _, cert := range s.Certificates { if x509tools.SameKey(cert.PublicKey, s.PublicKey) { return cert } } return nil } // Extract and verify an enveloped signature at the given root func Verify(root *etree.Element, sigpath string, extraCerts []*x509.Certificate) (*Signature, error) { root = root.Copy() sigs := root.FindElements(sigpath) if len(sigs) == 0 { return nil, sigerrors.NotSignedError{Type: "xmldsig"} } else if len(sigs) > 1 { return nil, errors.New("xmldsig: multiple signatures found") } sigEl := sigs[0] // parse signature tree sigbytes, err := SerializeCanonical(sigEl) if err != nil { return nil, fmt.Errorf("xmldsig: %w", err) } var sig signature if err := xml.Unmarshal(sigbytes, &sig); err != nil { return nil, fmt.Errorf("xmldsig: %w", err) } // parse algorithms if sig.CanonicalizationMethod.Algorithm != AlgXMLExcC14n && sig.CanonicalizationMethod.Algorithm != AlgXMLExcC14nRec { return nil, errors.New("xmldsig: unsupported canonicalization method") } hash, pubtype, err := parseAlgs(sig.Reference.DigestMethod.Algorithm, sig.SignatureMethod.Algorithm) if err != nil { return nil, err } // parse public key var pubkey crypto.PublicKey if sig.KeyValue != nil { pubkey, err = parseKey(sig.KeyValue, pubtype) if err != nil { return nil, err } } // parse x509 certs certs := make([]*x509.Certificate, len(extraCerts)) copy(certs, extraCerts) for _, b64 := range sig.X509Certificates { der, err := base64.StdEncoding.DecodeString(b64) if err != nil { return nil, fmt.Errorf("xmldsig: invalid X509 certificate") } cert, err := x509.ParseCertificate(der) if err != nil { return nil, fmt.Errorf("xmldsig: invalid X509 certificate: %w", err) } certs = append(certs, cert) } // check signature signedinfo := sigEl.SelectElement("SignedInfo") if signedinfo == nil { return nil, errors.New("xmldsig: invalid signature") } siCalc, err := hashCanon(signedinfo, hash) if err != nil { return nil, err } sigv, err := base64.StdEncoding.DecodeString(sig.SignatureValue) if err != nil { return nil, errors.New("xmldsig: invalid signature") } if pubtype == "ecdsa" { // reformat with ASN.1 structure sig, err := x509tools.UnpackEcdsaSignature(sigv) if err != nil { return nil, err } sigv = sig.Marshal() } if pubkey == nil { // if no KeyValue is present then use the X509 certificate if len(certs) == 0 { return nil, errors.New("xmldsig: missing public key") } // no guarantee is made about the order in which certs appear, so try all of them for _, cert := range certs { err = x509tools.Verify(cert.PublicKey, hash, siCalc, sigv) if err == nil { pubkey = cert.PublicKey break } } } else { err = x509tools.Verify(pubkey, hash, siCalc, sigv) } if err != nil { return nil, fmt.Errorf("xmldsig: %w", err) } // check reference digest var reference *etree.Element if sig.Reference.URI == "" { // enveloped signature if len(sig.Reference.Transforms) != 2 || sig.Reference.Transforms[0].Algorithm != AlgDsigEnvelopedSignature || (sig.Reference.Transforms[1].Algorithm != AlgXMLExcC14n && sig.Reference.Transforms[1].Algorithm != AlgXMLExcC14nRec) { return nil, errors.New("xmldsig: unsupported reference transform") } sigEl.Parent().RemoveChild(sigEl) reference = root } else { // enveloping signature if len(sig.Reference.Transforms) != 1 || (sig.Reference.Transforms[0].Algorithm != AlgXMLExcC14n && sig.Reference.Transforms[0].Algorithm != AlgXMLExcC14nRec) { return nil, errors.New("xmldsig: unsupported reference transform") } if sig.Reference.URI[0] != '#' { return nil, errors.New("xmldsig: unsupported reference URI") } reference = root.FindElement(fmt.Sprintf("[@Id='%s']", sig.Reference.URI[1:])) } if reference == nil { return nil, errors.New("xmldsig: unable to locate reference") } refCalc, err := hashCanon(reference, hash) if err != nil { return nil, err } refGiven, err := base64.StdEncoding.DecodeString(sig.Reference.DigestValue) if len(refGiven) != len(refCalc) || err != nil { return nil, errors.New("xmldsig: invalid signature") } if !hmac.Equal(refGiven, refCalc) { return nil, fmt.Errorf("xmldsig: digest mismatch: calculated %x, found %x", refCalc, refGiven) } return &Signature{ PublicKey: pubkey, Certificates: certs, Hash: hash, EncryptedDigest: sigv, Reference: reference, }, nil } func HashAlgorithm(hashAlg string) (string, crypto.Hash) { for _, prefix := range nsPrefixes { if strings.HasPrefix(hashAlg, prefix) { hashAlg = hashAlg[len(prefix):] break } } for hash, name := range hashNames { if hashAlg == name { return hashAlg, hash } } return hashAlg, 0 } func parseAlgs(hashAlg, sigAlg string) (crypto.Hash, string, error) { hashAlg, hash := HashAlgorithm(hashAlg) if !hash.Available() { return 0, "", errors.New("xmldsig: unsupported digest algorithm") } for _, prefix := range nsPrefixes { if strings.HasPrefix(sigAlg, prefix) { sigAlg = sigAlg[len(prefix):] break } } if !strings.HasSuffix(sigAlg, "-"+hashAlg) { return 0, "", errors.New("xmldsig: unsupported signature algorithm") } sigAlg = sigAlg[:len(sigAlg)-len(hashAlg)-1] if sigAlg != "rsa" && sigAlg != "ecdsa" { return 0, "", errors.New("xmldsig: unsupported signature algorithm") } return hash, sigAlg, nil } func parseKey(kv *keyValue, pubtype string) (crypto.PublicKey, error) { switch pubtype { case "rsa": nbytes, err := base64.StdEncoding.DecodeString(kv.Modulus) if len(nbytes) == 0 || err != nil { return nil, errors.New("xmldsig: invalid public key") } n := new(big.Int).SetBytes(nbytes) ebytes, err := base64.StdEncoding.DecodeString(kv.Exponent) if len(ebytes) == 0 || err != nil { return nil, errors.New("xmldsig: invalid public key") } ebig := new(big.Int).SetBytes(ebytes) if ebig.BitLen() > 30 { return nil, errors.New("xmldsig: invalid public key") } e := int(ebig.Int64()) return &rsa.PublicKey{N: n, E: e}, nil case "ecdsa": if !strings.HasPrefix(kv.NamedCurve.URN, "urn:oid:") { return nil, errors.New("xmldsig: unsupported ECDSA curve") } curve, err := x509tools.CurveByOidString(kv.NamedCurve.URN[8:]) if err != nil { return nil, fmt.Errorf("xmldsig: %w", err) } x, ok := new(big.Int).SetString(kv.X.Value, 10) if !ok { return nil, errors.New("xmldsig: invalid public key") } y, ok := new(big.Int).SetString(kv.Y.Value, 10) if !ok { return nil, errors.New("xmldsig: invalid public key") } if !curve.Curve.IsOnCurve(x, y) { return nil, errors.New("xmldsig: invalid public key") } return &ecdsa.PublicKey{Curve: curve.Curve, X: x, Y: y}, nil default: return nil, errors.New("xmldsig: unsupported signature algorithm") } }
/* * Client call: send packet to be transmitted into the switch */ uint32 pli_send_packet(int devNo, uint32 src_port, uint32 count, int len, unsigned char *buf) { verinet_t *v = &verinet[devNo]; rpc_cmd_t command; make_rpc(&command,RPC_SEND_PACKET,RPC_OK,3,src_port,count,len); if(write_command(v->sockfd, &command) != RPC_OK) { cli_out("Error: pli_send_packet cmd failed\n"); return -1; } if (writen(v->sockfd,buf,len) != len) { cli_out("Error: pli_send_packet data failed\n"); return -1; } return 0; }
Aggregation rules for cost-benefit analysis: a health economics perspective. Few willingness-to-pay (WTP) studies in the health sector have used their results within a cost-benefit analysis (CBA), an essential step to informing resource allocation decisions. This paper provides an overview of aggregation methods, reviews current evidence of practice in the health sector, and presents estimates of the total economic value of a women's group programme to improve mother and newborn health using different aggregation rules. A contingent valuation survey was conducted with 93 women's group members, 70 female non-members and 33 husbands. Aggregation was conducted with and without the values of non-users, and with different units of aggregation. The unadjusted mean, median and a weighted mean transfer were used to aggregate values. Equity weights were introduced to adjust WTP for income. Total WTP more than doubled when the values of husbands were added to that of women, and increased over 10-fold when the values of women who were not members of the group were added. The inclusion of non-use values, and the unit of aggregation, had the greatest effect on results. Researchers must reach agreement on the most acceptable method of aggregating WTP values to promote the use of WTP in resource allocation decisions in the health sector.
# Copyright 2019 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Tests for the various Tensorflow-specific symbolic options of the frontend/backend. """ # pylint: disable=expression-not-assigned,too-many-public-methods,pointless-statement import pytest # this test file is only supported by the TF backend pytestmark = pytest.mark.backends("tf") import numpy as np from scipy.special import factorial try: import tensorflow as tf except (ImportError, ModuleNotFoundError): pytestmark = pytest.mark.skip("TensorFlow not installed") else: if tf.__version__[:3] != "1.3": pytestmark = pytest.mark.skip("Test only runs with TensorFlow 1.3") from strawberryfields.ops import Dgate, MeasureX ALPHA = 0.5 evalf = {'eval': False} def coherent_state(alpha, cutoff): """Returns the Fock representation of the coherent state |alpha> up to dimension given by cutoff""" n = np.arange(cutoff) return np.exp(-0.5 * np.abs(alpha) ** 2) * alpha ** n / np.sqrt(factorial(n)) def _vac_ket(cutoff): """Returns the ket of the vacuum state up to dimension given by cutoff""" vac = np.zeros(cutoff) vac[0] = 1 return vac def _vac_dm(cutoff): """Returns the density matrix of the vacuum state up to dimension given by cutoff""" vac = _vac_ket(cutoff) return np.outer(vac, np.conj(vac)) class TestOneModeSymbolic: """Tests for symbolic workflows on one mode.""" ######################################### # tests basic eval behaviour of eng.run and states class. def test_eng_run_eval_false_returns_tensor(self, setup_eng): """Tests whether the eval=False option to the `eng.run` command successfully returns an unevaluated Tensor.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog, state_options=evalf).state state_data = state.data assert isinstance(state_data, tf.Tensor) def test_eng_run_eval_false_measurements_are_tensors(self, setup_eng): """Tests whether the eval=False option to the `eng.run` command successfully returns a unevaluated Tensors for measurment results.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q MeasureX | q eng.run(prog, eval=False) val = q[0].val assert isinstance(val, tf.Tensor) def test_eng_run_with_session_and_feed_dict(self, setup_eng, batch_size, cutoff, tol): """Tests whether passing a tf Session and feed_dict through `eng.run` leads to proper numerical simulation.""" a = tf.Variable(0.5) sess = tf.Session() sess.run(tf.global_variables_initializer()) tf_params = {'session': sess, 'feed_dict': {a: 0.0}} eng, prog = setup_eng(1) with prog.context as q: Dgate(a) | q state = eng.run(prog, state_options=tf_params).state if state.is_pure: k = state.ket() if batch_size is not None: dm = np.einsum("bi,bj->bij", k, np.conj(k)) else: dm = np.outer(k, np.conj(k)) else: dm = state.dm() vac_dm = _vac_dm(cutoff) assert np.allclose(dm, vac_dm, atol=tol, rtol=0.0) ######################################### # tests of eval behaviour of ket method def test_eng_run_eval_false_state_ket(self, setup_eng, pure): """Tests whether the ket of the returned state is an unevaluated Tensor object when eval=False is passed to `eng.run`.""" if not pure: pytest.skip("Tested only for pure states") eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog, state_options=evalf).state ket = state.ket() assert isinstance(ket, tf.Tensor) def test_eval_false_state_ket(self, setup_eng, pure): """Tests whether the ket of the returned state is an unevaluated Tensor object when eval=False is passed to the ket method of a state.""" if not pure: pytest.skip("Tested only for pure states") eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog).state ket = state.ket(eval=False) assert isinstance(ket, tf.Tensor) def test_eval_true_state_ket(self, setup_eng, pure, cutoff, tol): """Tests whether the ket of the returned state is equal to the correct value when eval=True is passed to the ket method of a state.""" if not pure: pytest.skip("Tested only for pure states") eng, prog = setup_eng(1) state = eng.run(prog).state ket = state.ket(eval=True) vac = _vac_ket(cutoff) assert np.allclose(ket, vac, atol=tol, rtol=0.0) ######################################### # tests of eval behaviour of dm method def test_eng_run_eval_false_state_dm(self, pure, setup_eng): """Tests whether the density matrix of the returned state is an unevaluated Tensor object when eval=False is passed to `eng.run`.""" if not pure: pytest.skip("Tested only for pure states") eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog, state_options=evalf).state dm = state.dm() assert isinstance(dm, tf.Tensor) def test_eval_false_state_dm(self, pure, setup_eng): """Tests whether the density matrix of the returned state is an unevaluated Tensor object when eval=False is passed to the ket method of a state.""" if not pure: pytest.skip("Tested only for pure states") eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog).state dm = state.dm(eval=False) assert isinstance(dm, tf.Tensor) def test_eval_true_state_dm(self, setup_eng, pure, cutoff, tol): """Tests whether the density matrix of the returned state is equal to the correct value when eval=True is passed to the ket method of a state.""" if not pure: pytest.skip("Tested only for pure states") eng, prog = setup_eng(1) state = eng.run(prog).state dm = state.dm(eval=True) vac_dm = _vac_dm(cutoff) assert np.allclose(dm, vac_dm, atol=tol, rtol=0.0) ######################################### # tests of eval behaviour of trace method def test_eng_run_eval_false_state_trace(self, setup_eng): """Tests whether the trace of the returned state is an unevaluated Tensor object when eval=False is passed to `eng.run`.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog, state_options=evalf).state tr = state.trace() assert isinstance(tr, tf.Tensor) def test_eval_false_state_trace(self, setup_eng): """Tests whether the trace of the returned state is an unevaluated Tensor object when eval=False is passed to the trace method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog).state tr = state.trace(eval=False) assert isinstance(tr, tf.Tensor) def test_eval_true_state_trace(self, setup_eng, tol): """Tests whether the trace of the returned state is equal to the correct value when eval=True is passed to the trace method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog).state tr = state.trace(eval=True) assert np.allclose(tr, 1, atol=tol, rtol=0.0) ######################################### # tests of eval behaviour of reduced_dm method def test_eng_run_eval_false_state_reduced_dm(self, setup_eng): """Tests whether the reduced_density matrix of the returned state is an unevaluated Tensor object when eval=False is passed to `eng.run`.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog, state_options=evalf).state rho = state.reduced_dm([0]) assert isinstance(rho, tf.Tensor) def test_eval_false_state_reduced_dm(self, setup_eng): """Tests whether the reduced density matrix of the returned state is an unevaluated Tensor object when eval=False is passed to the reduced_dm method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog).state rho = state.reduced_dm([0], eval=False) assert isinstance(rho, tf.Tensor) def test_eval_true_state_reduced_dm(self, setup_eng, cutoff, tol): """Tests whether the reduced density matrix of the returned state is equal to the correct value when eval=True is passed to the reduced_dm method of a state.""" eng, prog = setup_eng(1) state = eng.run(prog).state rho = state.reduced_dm([0], eval=True) vac_dm = _vac_dm(cutoff) assert np.allclose(rho, vac_dm, atol=tol, rtol=0.0) ######################################### # tests of eval behaviour of fidelity_vacuum method def test_eng_run_eval_false_state_fidelity_vacuum(self, setup_eng): """Tests whether the fidelity_vacuum method of the state returns an unevaluated Tensor object when eval=False is passed to `eng.run`.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog, state_options=evalf).state fidel_vac = state.fidelity_vacuum() assert isinstance(fidel_vac, tf.Tensor) def test_eval_false_state_fidelity_vacuum(self, setup_eng): """Tests whether the vacuum fidelity of the returned state is an unevaluated Tensor object when eval=False is passed to the fidelity_vacuum method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog).state fidel_vac = state.fidelity_vacuum(eval=False) assert isinstance(fidel_vac, tf.Tensor) def test_eval_true_state_fidelity_vacuum(self, setup_eng, tol): """Tests whether the vacuum fidelity of the returned state is equal to the correct value when eval=True is passed to the fidelity_vacuum method of a state.""" eng, prog = setup_eng(1) state = eng.run(prog).state fidel_vac = state.fidelity_vacuum(eval=True) assert np.allclose(fidel_vac, 1.0, atol=tol, rtol=0.0) ######################################### # tests of eval behaviour of is_vacuum method def test_eng_run_eval_false_state_is_vacuum(self, setup_eng): """Tests whether the is_vacuum method of the state returns an unevaluated Tensor object when eval=False is passed to `eng.run`.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog, state_options=evalf).state is_vac = state.is_vacuum() assert isinstance(is_vac, tf.Tensor) def test_eval_false_state_is_vacuum(self, setup_eng): """Tests whether the is_vacuum method of the state returns an unevaluated Tensor object when eval=False is passed to the is_vacuum method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(0.5) | q state = eng.run(prog).state is_vac = state.is_vacuum(eval=False) assert isinstance(is_vac, tf.Tensor) def test_eval_true_state_is_vacuum(self, setup_eng): """Tests whether the is_vacuum method of the state returns the correct value when eval=True is passed to the is_vacuum method of a state.""" eng, prog = setup_eng(1) state = eng.run(prog).state is_vac = state.is_vacuum(eval=True) assert np.all(is_vac) ######################################### # tests of eval behaviour of fidelity_coherent method def test_eng_run_eval_false_state_fidelity_coherent(self, setup_eng): """Tests whether the fidelity of the state with respect to coherent states is an unevaluated Tensor object when eval=False is passed to `eng.run`.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog, state_options=evalf).state fidel_coh = state.fidelity_coherent([ALPHA]) assert isinstance(fidel_coh, tf.Tensor) def test_eval_false_state_fidelity_coherent(self, setup_eng): """Tests whether the fidelity of the state with respect to coherent states is an unevaluated Tensor object when eval=False is passed to the fidelity_coherent method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog).state fidel_coh = state.fidelity_coherent([ALPHA], eval=False) assert isinstance(fidel_coh, tf.Tensor) def test_eval_true_state_fidelity_coherent(self, setup_eng, tol): """Tests whether the fidelity of the state with respect to coherent states returns the correct value when eval=True is passed to the fidelity_coherent method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog).state fidel_coh = state.fidelity_coherent([ALPHA], eval=True) assert np.allclose(fidel_coh, 1, atol=tol, rtol=0.0) ######################################### # tests of eval behaviour of fidelity method def test_eng_run_eval_false_state_fidelity(self, setup_eng, cutoff): """Tests whether the fidelity of the state with respect to a local state is an unevaluated Tensor object when eval=False is passed to `eng.run`.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog, state_options=evalf).state fidel = state.fidelity(coherent_state(ALPHA, cutoff), 0) assert isinstance(fidel, tf.Tensor) def test_eval_false_state_fidelity(self, setup_eng, cutoff): """Tests whether the fidelity of the state with respect to a local state is an unevaluated Tensor object when eval=False is passed to the fidelity method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog).state fidel = state.fidelity(coherent_state(ALPHA, cutoff), 0, eval=False) assert isinstance(fidel, tf.Tensor) def test_eval_true_state_fidelity(self, setup_eng, cutoff, tol): """Tests whether the fidelity of the state with respect to a local state returns the correct value when eval=True is passed to the fidelity method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog).state fidel_coh = state.fidelity(coherent_state(ALPHA, cutoff), 0, eval=True) assert np.allclose(fidel_coh, 1, atol=tol, rtol=0.0) ######################################### # tests of eval behaviour of quad_expectation method def test_eng_run_eval_false_state_quad_expectation(self, setup_eng): """Tests whether the local quadrature expectation of the state is unevaluated Tensor object when eval=False is passed to `eng.run`.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog, state_options=evalf).state e, v = state.quad_expectation(0, 0) assert isinstance(e, tf.Tensor) assert isinstance(v, tf.Tensor) def test_eval_false_state_quad_expectation(self, setup_eng): """Tests whether the local quadrature expectation value of the state is an unevaluated Tensor object when eval=False is passed to the quad_expectation method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog).state e, v = state.quad_expectation(0, 0, eval=False) assert isinstance(e, tf.Tensor) assert isinstance(v, tf.Tensor) def test_eval_true_state_quad_expectation(self, setup_eng, tol, hbar): """Tests whether the local quadrature expectation value of the state returns the correct value when eval=True is passed to the quad_expectation method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog).state e, v = state.quad_expectation(0, 0, eval=True) true_exp = np.sqrt(hbar / 2.0) * (ALPHA + np.conj(ALPHA)) true_var = hbar / 2.0 assert np.allclose(e, true_exp, atol=tol, rtol=0.0) assert np.allclose(v, true_var, atol=tol, rtol=0.0) ######################################### # tests of eval behaviour of mean_photon method def test_eng_run_eval_false_state_mean_photon(self, setup_eng): """Tests whether the local mean photon number of the state is unevaluated Tensor object when eval=False is passed to `eng.run`.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog, state_options=evalf).state nbar, var = state.mean_photon(0) assert isinstance(nbar, tf.Tensor) assert isinstance(var, tf.Tensor) def test_eval_false_state_mean_photon(self, setup_eng): """Tests whether the local mean photon number of the state is an unevaluated Tensor object when eval=False is passed to the mean_photon_number method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog).state nbar, var = state.mean_photon(0, eval=False) assert isinstance(nbar, tf.Tensor) assert isinstance(var, tf.Tensor) def test_eval_true_state_mean_photon(self, setup_eng, tol): """Tests whether the local mean photon number of the state returns the correct value when eval=True is passed to the mean_photon method of a state.""" eng, prog = setup_eng(1) with prog.context as q: Dgate(ALPHA) | q state = eng.run(prog).state nbar, var = state.mean_photon(0, eval=True) ref_nbar = np.abs(ALPHA) ** 2 ref_var = np.abs(ALPHA) ** 2 assert np.allclose(nbar, ref_nbar, atol=tol, rtol=0.0) assert np.allclose(var, ref_var, atol=tol, rtol=0.0) class TestTwoModeSymbolic: """Tests for symbolic workflows on two modes.""" ######################################### # tests of eval behaviour of all_fock_probs method def test_eng_run_eval_false_state_all_fock_probs(self, setup_eng): """Tests whether the Fock-basis probabilities of the state are an unevaluated Tensor object when eval=False is passed to `eng.run`.""" eng, prog = setup_eng(2) with prog.context as q: Dgate(ALPHA) | q[0] Dgate(-ALPHA) | q[1] state = eng.run(prog, state_options=evalf).state probs = state.all_fock_probs() assert isinstance(probs, tf.Tensor) def test_eval_false_state_all_fock_probs(self, setup_eng): """Tests whether the Fock-basis probabilities of the state are an unevaluated Tensor object when eval=False is passed to the all_fock_probs method of a state.""" eng, prog = setup_eng(2) with prog.context as q: Dgate(ALPHA) | q[0] Dgate(-ALPHA) | q[1] state = eng.run(prog).state probs = state.all_fock_probs(eval=False) assert isinstance(probs, tf.Tensor) def test_eval_true_state_all_fock_probs(self, setup_eng, cutoff, batch_size, tol): """Tests whether the Fock-basis probabilities of the state return the correct value when eval=True is passed to the all_fock_probs method of a state.""" eng, prog = setup_eng(2) with prog.context as q: Dgate(ALPHA) | q[0] Dgate(-ALPHA) | q[1] state = eng.run(prog).state probs = state.all_fock_probs(eval=True).flatten() ref_probs = np.abs(np.outer(coherent_state(ALPHA, cutoff),\ coherent_state(-ALPHA, cutoff))).flatten() ** 2 if batch_size is not None: ref_probs = np.tile(ref_probs, batch_size) assert np.allclose(probs, ref_probs, atol=tol, rtol=0.0) ######################################### # tests of eval behaviour of fock_prob method def test_eng_run_eval_false_state_fock_prob(self, setup_eng, cutoff): """Tests whether the probability of a Fock measurement outcome on the state is an unevaluated Tensor object when eval=False is passed to `eng.run`.""" eng, prog = setup_eng(2) with prog.context as q: Dgate(ALPHA) | q[0] Dgate(-ALPHA) | q[1] state = eng.run(prog, state_options=evalf).state prob = state.fock_prob([cutoff // 2, cutoff // 2]) assert isinstance(prob, tf.Tensor) def test_eval_false_state_fock_prob(self, setup_eng, cutoff): """Tests whether the probability of a Fock measurement outcome on the state is an unevaluated Tensor object when eval=False is passed to the fock_prob method of a state.""" eng, prog = setup_eng(2) with prog.context as q: Dgate(ALPHA) | q[0] Dgate(-ALPHA) | q[1] state = eng.run(prog).state prob = state.fock_prob([cutoff // 2, cutoff // 2], eval=False) assert isinstance(prob, tf.Tensor) def test_eval_true_state_fock_prob(self, setup_eng, cutoff, tol): """Tests whether the probability of a Fock measurement outcome on the state returns the correct value when eval=True is passed to the fock_prob method of a state.""" n1 = cutoff // 2 n2 = cutoff // 3 eng, prog = setup_eng(2) with prog.context as q: Dgate(ALPHA) | q[0] Dgate(-ALPHA) | q[1] state = eng.run(prog).state prob = state.fock_prob([n1, n2], eval=True) ref_prob = np.abs( np.outer(coherent_state(ALPHA, cutoff), coherent_state(-ALPHA, cutoff)) ** 2 )[n1, n2] assert np.allclose(prob, ref_prob, atol=tol, rtol=0.0)
Starbucks sticks to its guns Greg Dement, of Kent, shows his opposition to gun-control advocates by wearing a Colt 1911 during a press event at Victor Steinbrueck Park organized by Washington CeaseFire, the Brady Campaign to Prevent Gun Violence, and Washington State Million Mom March. Organizers of the event hope to pressure Starbucks into adopting a no-guns policy in all its stores. less Greg Dement, of Kent, shows his opposition to gun-control advocates by wearing a Colt 1911 during a press event at Victor Steinbrueck Park organized by Washington CeaseFire, the Brady Campaign to Prevent Gun ... more Photo: Joshua Trujillo/seattlepi.com Photo: Joshua Trujillo/seattlepi.com Image 1 of / 1 Caption Close Starbucks sticks to its guns 1 / 1 Back to Gallery Starbucks is asking nicely: Could we please, please stop talking about guns? The Seattle-based coffee giant was thrust in the middle of a gun-control debate it never wanted to have when it refused a request from gun-control groups to ban guns in its stores this winter. The company gave short, curt statements as the issue gained traction over the past several weeks. On Wednesday, as gun-control advocates prepared a Seattle news conference in view of the original Starbucks store, the company spoke loud and clear: We've made our decision. Can we please move on? "Advocacy groups from both sides of this issue have chosen to use Starbucks as a way to draw attention to their positions," Starbucks said in the lengthy statement it released Wednesday morning. "As the public debate continues, we are asking all interested parties to refrain from putting Starbucks or our partners [employees] into the middle of this divisive issue." That's a tough request, considering how useful the international coffee brand has become to both sides of the debate -- and how aggressively gun-control groups are going after the company. "Starbucks put out a statement today saying they don't want to be in the middle of this fight. Well, they are. They are the middle," Brian Malte, director of federal and state mobilization at the Brady Campaign to Prevent Gun Violence, a national gun-control group, told reporters at Victor Steinbrueck Park at Pike Place Market. "Why are they in middle? Because they chose -- they chose -- not to bar guns, so far, in their stores." A gun enthusiast in the crowd yelled in response: "They chose liberty!" Few even cared about Starbucks' gun policy -- or anyone other company's, for matter -- until January, when word spread that gun-toting advocates of open-carry laws were meeting in coffeehouses and restaurants in California's Bay Area. Seeing an opportunity to further their cause, the Brady Campaign asked two of the businesses -- California Pizza Kitchen and Peet's Coffee and Tea -- to exercise their legal right to ban guns in their stores. When they complied, the group aimed higher, asking the same of the most powerful name in coffee. Starbucks refused, citing existing safety procedures, but the Brady Campaign persisted. Gun-toting activists, many of them aligned with the organization Open Carry, had also been meeting in Starbucks stores. "We didn't choose Starbucks," Malte insisted. "Open Carry chose Starbucks." An online petition the organization circulated this month to demand Starbucks "do the right thing" has collected nearly 30,000 signatures, Malte told the crowd gathered at Wednesday's news conference. Saying the Brady Campaign is "representing Starbucks customers," he read one customer complaint out loud and said the group planned to deliver the petitions to Starbucks headquarters Wednesday afternoon. Malte also brought up Starbucks' efforts to maintain ethical buying practices as reason to expect it would ban guns from its stores. As a "socially responsible" company, he said, Starbucks should do the "socially responsible" thing. Heidi Yewman, president of the Vancouver, Wash., chapter of Million Mom March, followed his nudge with an accusation: "By allowing people to openly carry guns in their stores, [Starbucks is] violating the public's trust, they're violating their customers' trust, they're violating their employees' trust and their violating the community's trust," she said to loud objections. "Fear-mongerer!" someone yelled. After the news conference, gun enthusiast Brick Loomis gathered with other protesting advocates behind the podium. A veteran who spent 30 years training soldiers on weaponry, Loomis showed up with his two dogs -- Ransom and Basil -- and a sign: "Thank you Starbucks! Coffee, God, Guns, All-American values." Loomis said he'd fight changes to Washington state's open-carry laws, but not a business's right to set its own gun policy. "[Businesses] shouldn't be coerced, like they're being coerced by the Brady Campaign," Loomis said to nods of agreement. "We're not trying to abridge Starbucks' rights. ... They are."
package com.tersesystems.echopraxia.logstash.jackson; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.ser.std.StdSerializer; import com.tersesystems.echopraxia.api.Field; import com.tersesystems.echopraxia.api.Value; import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; import java.util.List; /** * The ValueSerializer class plugs into the Jackson serializer system to serialize Value into JSON. */ public class ValueSerializer extends StdSerializer<Value> { static final ValueSerializer INSTANCE = new ValueSerializer(); public ValueSerializer() { super(Value.class); } @Override public void serialize(Value value, JsonGenerator gen, SerializerProvider provider) throws IOException { switch (value.type()) { case ARRAY: List<Value<?>> arrayValues = ((Value.ArrayValue) value).raw(); gen.writeStartArray(); for (Value<?> arrayValue : arrayValues) { gen.writeObject(arrayValue); } gen.writeEndArray(); break; case OBJECT: List<Field> objFields = ((Value.ObjectValue) value).raw(); gen.writeStartObject(); for (Field objField : objFields) { gen.writeObject(objField); } gen.writeEndObject(); break; case STRING: gen.writeString(value.raw().toString()); break; case NUMBER: Number n = ((Value.NumberValue) value).raw(); if (n instanceof Byte) { gen.writeNumber(n.byteValue()); } else if (n instanceof Short) { gen.writeNumber(n.shortValue()); } else if (n instanceof Integer) { gen.writeNumber(n.intValue()); } else if (n instanceof Long) { gen.writeNumber(n.longValue()); } else if (n instanceof Double) { gen.writeNumber(n.doubleValue()); } else if (n instanceof BigInteger) { gen.writeNumber((BigInteger) n); } else if (n instanceof BigDecimal) { gen.writeNumber((BigDecimal) n); } break; case BOOLEAN: boolean b = ((Value.BooleanValue) value).raw(); gen.writeBoolean(b); break; case EXCEPTION: final Throwable throwable = (Throwable) value.raw(); gen.writeString(throwable.toString()); break; case NULL: gen.writeNull(); break; } } }
// Please migrate to cf-java-logging-support-servlet. @Deprecated @Provider public class RequestMetricsClientResponseFilter implements ClientResponseFilter { private final ResponseHandler handler; public RequestMetricsClientResponseFilter() { handler = new ResponseHandler(); } @Override public void filter(ClientRequestContext requestContext, ClientResponseContext responseContext) throws IOException { try { handler.handle(new ClientResponseContextAdapter(responseContext), (RequestRecord) requestContext .getProperty(REQ_METRICS_KEY)); } catch (Exception ex) { LoggerFactory.getLogger(RequestMetricsClientResponseFilter.class).error("Can't handle client response", ex); } } }
/** * Function that uses a supplied {@link Transform} to transform a pair into a record. * * Non-serializable fields are lazily created since this is used in a Spark closure. * * @param <OUT> The type of the Output Object * @param <IN_KEY> the type of the input key * @param <IN_VAL> the type of the input value */ public class TransformFromPairFunction<OUT, IN_KEY, IN_VAL> implements FlatMapFunction<Tuple2<IN_KEY, IN_VAL>, OUT> { private final Transform<KeyValue<IN_KEY, IN_VAL>, OUT> transform; private transient DefaultEmitter<OUT> emitter; public TransformFromPairFunction(Transform<KeyValue<IN_KEY, IN_VAL>, OUT> transform) { this.transform = transform; } @Override public Iterator<OUT> call(Tuple2<IN_KEY, IN_VAL> input) throws Exception { if (emitter == null) { emitter = new DefaultEmitter<>(); } transform.transform(new KeyValue<>(input._1(), input._2()), emitter); return emitter.getEntries().iterator(); } }
<filename>services/eventbrite/dbManager.go package main import ( "database/sql" "fmt" "os" "strconv" _ "github.com/lib/pq" ) var db *sql.DB func dbSetup() error { dbhost := os.Getenv("DBHOST") dbport, err := strconv.Atoi(os.Getenv("DBPORT")) dbuser := os.Getenv("DBUSER") dbpassword := <PASSWORD>("<PASSWORD>") dbname := os.Getenv("DBNAME") sqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+ "password=%s dbname=%s sslmode=disable", dbhost, dbport, dbuser, dbpassword, dbname) db, err = sql.Open("postgres", sqlInfo) if err != nil { return err } return nil } func dbInsertEventbrite(order_number string) error { sqlStmt := `INSERT INTO public.eventbrite (order_number, used) VALUES ($1, false)` _, err := db.Exec(sqlStmt, order_number) if err != nil { return err } return nil }
h,w,k=map(int,input().split()) tbl=[list(input()) for _ in range(h)] ans=[[0]*w for _ in range(h)] firsth=0 for i in range(h): if "#" in tbl[i]: break else: firsth+=1 c=0 for i in range(firsth,h): if "#" in tbl[i]: c+=1 firstw=tbl[i].index("#") for j in range(firstw+1): ans[i][j]=c for j in range(firstw+1,w): if tbl[i][j]=="#": c+=1 ans[i][j]=c else: for j in range(w): ans[i][j]=ans[i-1][j] for i in range(firsth): for j in range(w): ans[i][j]=ans[firsth][j] for i in range(h): print(*ans[i])
#include <stdio.h> #define forn(i, n) for(int i = 0; i < (int)(n); i++) #define si(x) scanf("%d", &x) #define pri(x) printf("%d ", &x) #define deb(x) printf("deb: %d\n", x) #define endl printf("\n") typedef long long i64; void swap(int *xp, int *yp) { int temp = *xp; *xp = *yp; *yp = temp; } void sort(int *mas) { for(int i = 0; i < 4; i++) { for(int j = i + 1; j < 4; j++) { if(mas[i] > mas[j]) { swap(&mas[i], &mas[j]); } } } } int main() { int mas[5], e = 3; for(int i = 0; i < 4; i++) { scanf("%d", &mas[i]); } sort(mas); for(int i = 0; i < 3; i++) { if(mas[i] != mas[i+1]) e--; } printf("%d\n", e); return 0; }
<reponame>LMarsiske/twilio-video-app-react<gh_stars>0 import React, { useRef, useEffect, useState } from 'react'; import { createStyles, makeStyles, Theme } from '@material-ui/core/styles'; import { Button, Tooltip, Menu, MenuItem, IconButton, ListItemIcon, ListItemText, FormControl, FormControlLabel, Checkbox, } from '@material-ui/core'; import { Clear, ExpandLess, ExpandMore } from '@material-ui/icons'; import HoverMenu from 'material-ui-popup-state/HoverMenu'; import { usePopupState, bindHover, bindMenu } from 'material-ui-popup-state/hooks'; import useOverlayContext from '../../hooks/useOverlayContext/useOverlayContext'; export const SCREEN_SHARE_TEXT = 'Share Screen'; export const STOP_SCREEN_SHARE_TEXT = 'Stop Sharing Screen'; export const SHARE_IN_PROGRESS_TEXT = 'Cannot share screen when another user is sharing'; export const SHARE_NOT_SUPPORTED_TEXT = 'Screen sharing is not supported with this browser'; const useStyles = makeStyles((theme: Theme) => createStyles({ button: { '&[disabled]': { color: '#bbb', '& svg *': { fill: '#bbb', }, }, }, }) ); interface CustomHoverMenuProps { disabled?: boolean; tightFit?: boolean; children?: React.ReactNode; } export default function CustomHoverMenu({ disabled = false, tightFit = true, children }: CustomHoverMenuProps) { const classes = useStyles(); const { isResetAllowed, setMarkers } = useOverlayContext(); const isDisabled = false; let tooltipMessage = ''; if (!isResetAllowed) { tooltipMessage = 'The overlay cannot be cleared until it has been saved at least once.'; } const popupState = usePopupState({ variant: 'popover', popupId: 'test' }); return ( <> <IconButton style={{ padding: tightFit ? 0 : '16px' }} {...bindHover(popupState)} disabled={disabled}> <ExpandLess /> </IconButton> <HoverMenu {...bindMenu(popupState)} getContentAnchorEl={null} anchorOrigin={{ vertical: 'bottom', horizontal: 'left' }} transformOrigin={{ vertical: 'top', horizontal: 'left' }} > {children} </HoverMenu> </> ); }
import { Component, OnInit, ViewEncapsulation } from '@angular/core'; import { DRLoadingService } from './dr-loading.service'; @Component({ selector: 'dr-loading', templateUrl: './dr-loading.component.html', styleUrls: ['./dr-loading.component.scss'], providers: [DRLoadingService], encapsulation: ViewEncapsulation.None, }) export class LoadingComponent implements OnInit { // // Properties. public active: boolean = false; public message: string = ''; // // Construction. constructor(protected loadingSrv: DRLoadingService) { } // // Public methods. public ngOnInit(): void { this.loadingSrv.listen().subscribe(status => { this.active = status.active; this.message = status.message; }); } }
/** * Tests a simple split: {A,B} and {C,D} need to merge back into one subgroup. Checks how many MergeViews are installed */ public void testSplitInTheMiddle2() throws Exception { View v1=View.create(a.getAddress(), 10, a.getAddress(), b.getAddress()); View v2=View.create(c.getAddress(), 10, c.getAddress(), d.getAddress()); injectView(v1, a,b); injectView(v2, c,d); enableInfoSender(false, a,b,c,d); Util.waitUntilAllChannelsHaveSameView(10000, 500, a, b); Util.waitUntilAllChannelsHaveSameView(10000, 500, c, d); enableInfoSender(false, a,b,c,d); for(JChannel ch: Arrays.asList(a,b,c,d)) System.out.println(ch.getName() + ": " + ch.getView()); System.out.println("\nEnabling INFO sending in merge protocols to merge subclusters"); enableInfoSender(true, a, b, c, d); Util.waitUntilAllChannelsHaveSameView(30000, 1000, a, b, c, d); System.out.println("\nResulting views:"); for(JChannel ch: Arrays.asList(a,b,c,d)) { GMS gms=ch.getProtocolStack().findProtocol(GMS.class); View mv=gms.view(); System.out.println(mv); } for(JChannel ch: Arrays.asList(a,b,c,d)) { GMS gms=ch.getProtocolStack().findProtocol(GMS.class); View mv=gms.view(); assert mv instanceof MergeView; assert mv.size() == 4; assert ((MergeView)mv).getSubgroups().size() == 2; } for(JChannel ch: Arrays.asList(a,b,c,d)) { View view=ch.getView(); assert view.size() == 4 : "view should have 4 members: " + view; } }
def flatten(self, geometry): try: for geo in geometry: if geo is not None: self.flatten(geometry=geo) except TypeError: self.flat_geometry.append(geometry) return self.flat_geometry
<filename>constant/authorization.go package constant //for account userrole, we use bit to identify which userrole you are at //by shifting the bit, we can get different userrole //such as: //0000 0001 superadmin //0000 0010 admin //0000 0100 normal user //then, when checking if they are authorized, we can use bitwise or //such as: //0000 0011 << only admin or above is authorized //0000 0100 | 0000 0011 => 0000 0111, not equals to 0000 0011 (original value) //0000 0010 | 0000 0011 => 0000 0011, equals to 0000 0011 (original value) func GetAdminAccount() uint { return 1 } func GetStandardAccount() uint { return 1 << 1 } func GetAdminRole() uint { return GetAdminAccount() } func GetStandardRole() uint { return GetStandardAccount() & GetAdminAccount() }
<filename>src/main/java/com/rest/api/utils/Utils.java<gh_stars>0 package com.rest.api.utils; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.LineNumberReader; import org.springframework.core.io.ClassPathResource; public class Utils { public static final ClassPathResource csvFileResource() throws FileNotFoundException { ClassPathResource fileResource = new ClassPathResource("riddles.csv"); return fileResource; } /** * Get the total number of riddles in the CSV file. Taken from: * https://stackoverflow.com/a/5342096/11701504. * * @return The total count of riddles in the CSV. * @throws IOException */ public static final int totalRiddlesCount() throws IOException { int result = 0; InputStream is = csvFileResource().getInputStream(); try (InputStreamReader input = new InputStreamReader(is); LineNumberReader count = new LineNumberReader(input);) { while (count.skip(Long.MAX_VALUE) > 0) { } result = count.getLineNumber() + 1; } is.close(); return result; } }
<reponame>pkosiec/capact package main import ( "context" "fmt" grakn "capact.io/capact/poc/graph-db/grakn/go-grakn/gograkn/session" "capact.io/capact/poc/graph-db/grakn/graphql" ) type MyResolver struct { } func NewRootResolver() *MyResolver { return &MyResolver{} } func (r *MyResolver) Query() graphql.QueryResolver { return r } func (r *MyResolver) InterfaceGroups(ctx context.Context, filter *graphql.InterfaceGroupFilter) ([]*graphql.InterfaceGroup, error) { preloads := GetPreloads(ctx) q := toQuery(preloads) res, err := query(q) if err != nil { return []*graphql.InterfaceGroup{}, err } fmt.Printf("%+v", res) return []*graphql.InterfaceGroup{toInterfaceGroup(res, preloads)}, nil } func toQuery(fields []string) string { query := "match $ifaceGroup isa interfaceGroup" relations := "" mapped := map[string]bool{} for _, k := range fields { mapped[k] = true } if _, ok := mapped["interfaces"]; ok { query += ";$iface isa interface" relations += ";$gr1 (groups: $ifaceGroup, grouped: $iface) isa grouping" if _, ok := mapped["interfaces.name"]; ok { query += ", has name $ifaceName" } // this would go to a new funciton which would get implementation query if _, ok := mapped["interfaces.revisions.implementations"]; ok { query += ";$impl isa implementation" relations += ";$impl-iface (defines: $iface, implements: $impl) isa implementator" } if _, ok := mapped["interfaces.revisions.implementations.name"]; ok { query += ", has name $implName" } } query += relations + ";get;" fmt.Printf("\n%s\n\n", query) return query } ///XXX ignores relations func toInterfaceGroup(concepts []map[string]*grakn.Concept, fields []string) *graphql.InterfaceGroup { ig := &graphql.InterfaceGroup{} mapped := map[string]bool{} for _, k := range fields { mapped[k] = true } if _, ok := mapped["interfaces"]; ok { interfaces := []*graphql.Interface{} for _, m := range concepts { if v, ok := m["ifaceName"]; ok { iface := &graphql.Interface{} iface.Name = v.ValueRes.Value.GetString_() interfaces = append(interfaces, iface) } } ig.Interfaces = append(ig.Interfaces, interfaces...) } return ig }
MasterCard has announced a “P2P” debit card-based remittance system which it says customers will find “more appealing” than the Bitcoin protocol. MasterCard Send, currently in a pre-signup phase in the US only, is designed to allow transactions between “banked and unbanked consumers,” international and domestic payments at a near-instant speed rather than the several days required with the traditional bank clearing system. Speaking to PYMNTS, Barb King, a group head in the MasterCard Payment Systems Integrity Group, called the new tool “a breakthrough platform in the industry.” King continued: “[It works off] the model that gives the consumer the ability to get…funds in a way that they’re already very comfortable with.” The response was in reference to a question regarding MasterCard Send’s advantages over Bitcoin, which King identified as being off-putting to consumers due to the apparent need to share personal details with third parties. “[C]onsumers are much more comfortable giving their personal details to their financial institution than they are to many other types of entities,” she added. While concrete details of the scheme have yet to be published, preliminary literature is notable in its arguably misleading use of certain terms used to qualify its “breakthrough” status. MasterCard’s description of Send being a P2P service is debatable, transactions needing to be submitted from a US debit card and processed, as opposed to ‘true’ P2P architecture found in digital currency transactions. Further, the disadvantages associated with both centralized and bank-based payment systems are still present. Notably, chargebacks could still occur several months after the approximately minute-long transaction occurred. The MasterCard Send scheme is thus seemingly more of a competitor for PayPal and Venmo than for Bitcoin or Ripple. Indeed, as purely a method of speeding up bank-based transactions to banked recipients, the announcement contains little new technology. Third party add-ons for the banking sector have existed for several years to serve this purpose, such as the UK’s Faster Payments scheme which began offering near-instant transfers in 2008. Further complications arise from the information presented by MasterCard itself. Details of the exact remittance process for unbanked recipients have not been made clear, and while some sources speculate that transactions could be facilitated via partnerships with fiat money transfer operators such as Western Union, the fees involved have so far not been mentioned. Regarding security, MasterCard makes passing reference to what it calls “cross-border blocks.” At the present time, however, no further information has been made available regarding how and under which circumstances such measures would be implemented. King nonetheless remains buoyant about the scheme’s future. “We think [personal payments] is a growing space in the industry, [be it] person-to-person payments, business to consumer, [or] government to consumer,” she said. “When you look at some of the industry statistics that exist, in terms of the opportunity to displace cash and checks, we’re talking trillions of dollars.”
/** * A completion provider for Perl. It provides: * * <ul> * <li>Auto-completion for standard Perl 5.10 functions (read from an * XML file).</li> * <li>Crude auto-completion for variables. Only variables in scope at the * current caret position are suggested, but there may still be issues * with variable types, etc.</li> * </ul> * * To toggle whether parameter assistance wraps your parameters in parens, * use the {@link #setUseParensWithFunctions(boolean)} method. * * @author Robert Futrell * @version 1.0 */ public class PerlCompletionProvider extends CCompletionProvider { private boolean useParensWithFunctions; /** * {@inheritDoc} */ @Override protected void addShorthandCompletions(DefaultCompletionProvider codeCP) { // Add nothing for now. } /** * Creates an "AST" for Perl code, representing code blocks and variables * inside of those blocks. * * @param textArea The text area. * @return A "code block" representing the entire Perl source file. */ private CodeBlock createAst(RSyntaxTextArea textArea) { CodeBlock ast = new CodeBlock(0); TokenScanner scanner = new TokenScanner(textArea); parseCodeBlock(scanner, ast); return ast; } /** * {@inheritDoc} */ @Override protected CompletionProvider createCodeCompletionProvider() { DefaultCompletionProvider cp = new PerlCodeCompletionProvider(this); loadCodeCompletionsFromXml(cp); addShorthandCompletions(cp); return cp; } /** * {@inheritDoc} */ @Override protected CompletionProvider createStringCompletionProvider() { DefaultCompletionProvider cp = new DefaultCompletionProvider(); return cp; } /** * {@inheritDoc} */ @Override protected List<Completion> getCompletionsImpl(JTextComponent comp) { List<Completion> completions = super.getCompletionsImpl(comp); SortedSet<Completion> varCompletions = getVariableCompletions(comp); if (varCompletions!=null) { completions.addAll(varCompletions); Collections.sort(completions); } return completions; } /** * Overridden to return the null char (meaning "no end character") if the * user doesn't want to use parens around their functions. * * @return The end character for parameters list, or the null char if * none. * @see #getUseParensWithFunctions() */ @Override public char getParameterListEnd() { return getUseParensWithFunctions() ? ')' : 0; } /** * Overridden to return the null char (meaning "no start character") if the * user doesn't want to use parens around their functions. * * @return The start character for parameters list, or the null char if * none. * @see #getUseParensWithFunctions() */ @Override public char getParameterListStart() { return getUseParensWithFunctions() ? '(' : ' '; } /** * Returns whether the user wants to use parens around parameters to * functions. * * @return Whether to use parens around parameters to functions. * @see #setUseParensWithFunctions(boolean) */ public boolean getUseParensWithFunctions() { return useParensWithFunctions; } /** * Does a crude search for variables up to the caret position. This * method does not care whether the variables are in scope at the caret * position. * * @param comp The text area. * @return The completions for variables, or <code>null</code> if there * were none. */ private SortedSet<Completion> getVariableCompletions(JTextComponent comp) { RSyntaxTextArea textArea = (RSyntaxTextArea)comp; int dot = textArea.getCaretPosition(); SortedSet<Completion> varCompletions = new TreeSet<>(comparator); CompletionProvider p = getDefaultCompletionProvider(); String text = p.getAlreadyEnteredText(comp); char firstChar = text.length()==0 ? 0 : text.charAt(0); if (firstChar!='$' && firstChar!='@' && firstChar!='%') { System.out.println("DEBUG: No use matching variables, exiting"); return null; } // Go through all code blocks in scope and look for variables // declared before the caret. CodeBlock block = createAst(textArea); recursivelyAddLocalVars(varCompletions, block, dot, firstChar); // Get only those that match what's typed if (varCompletions.size()>0) { Completion from = new BasicCompletion(p, text); Completion to = new BasicCompletion(p, text + '{'); varCompletions = varCompletions.subSet(from, to); } return varCompletions; } private CaseInsensitiveComparator comparator = new CaseInsensitiveComparator(); /** * A comparator that compares the input text of two {@link Completion}s * lexicographically, ignoring case. */ private static class CaseInsensitiveComparator implements Comparator<Completion>, Serializable { @Override public int compare(Completion c1, Completion c2) { String s1 = c1.getInputText(); String s2 = c2.getInputText(); return String.CASE_INSENSITIVE_ORDER.compare(s1, s2); } } /** * {@inheritDoc} */ @Override protected String getXmlResource() { return "data/perl5.xml"; } /** * Recursively adds code blocks, remembering variables in them. * * @param scanner * @param block */ private void parseCodeBlock(TokenScanner scanner, CodeBlock block) { Token t = scanner.next(); while (t != null) { if (t.isRightCurly()) { block.setEndOffset(t.getOffset()); return; } else if (t.isLeftCurly()) { CodeBlock child = block.addChildCodeBlock(t.getOffset()); parseCodeBlock(scanner, child); } else if (t.getType()==Token.VARIABLE) { VariableDeclaration varDec = new VariableDeclaration( t.getLexeme(), t.getOffset()); block.addVariable(varDec); } t = scanner.next(); } } /** * Recursively adds any local variables defined before the given caret * offset, and in the given code block (and any nested children the caret * is in). * * @param completions The list to add to. * @param block The code block to search through. * @param dot The caret position. */ private void recursivelyAddLocalVars(SortedSet<Completion> completions, CodeBlock block, int dot, int firstChar) { if (!block.contains(dot)) { return; } // Add local variables declared in this code block for (int i = 0; i < block.getVariableDeclarationCount(); i++) { VariableDeclaration dec = block.getVariableDeclaration(i); int decOffs = dec.getOffset(); if (decOffs < dot) { String name = dec.getName(); char ch = name.charAt(0); if (firstChar<=ch) { // '$' comes before '@'/'%' in ascii if (firstChar<ch) { // Use first char they entered name = firstChar + name.substring(1); } BasicCompletion c = new BasicCompletion(this, name); completions.add(c); } } else { // A variable declared past the caret -> nothing more to add break; } } // Add any local variables declared in a child code block for (int i = 0; i < block.getChildCodeBlockCount(); i++) { CodeBlock child = block.getChildCodeBlock(i); if (child.contains(dot)) { recursivelyAddLocalVars(completions, child, dot, firstChar); return; // No other child blocks can contain the dot } } } /** * Sets whether the user wants to use parens around parameters to * functions. * * @param use Whether to use parens around parameters to functions. * @see #getUseParensWithFunctions() */ public void setUseParensWithFunctions(boolean use) { useParensWithFunctions = use; } }
// RetrieveFiles retrieves list of files from remote directory to the local directory. // The implementation can be changed if the use-case arises. As of now, we're doing a best effort // to collect every log possible. If a retrieval of file fails, we would proceed with retrieval // of other log files. func (w *windowsVM) RetrieveFiles(remoteDir, localDir string) error { if w.sshClient == nil { return fmt.Errorf("RetrieveFile cannot be called without a ssh client") } err := os.MkdirAll(localDir, os.ModePerm) if err != nil { log.Printf("could not create %s: %s", localDir, err) } sftp, err := sftp.NewClient(w.sshClient) if err != nil { return fmt.Errorf("sftp initialization failed: %v", err) } defer sftp.Close() remoteFiles, err := sftp.ReadDir(remoteDir) if err != nil { return fmt.Errorf("error opening remote file: %v", err) } for _, remoteFile := range remoteFiles { if remoteFile.IsDir() { continue } fileName := remoteFile.Name() dstFile, err := os.Create(filepath.Join(localDir, fileName)) if err != nil { log.Printf("error creating file locally: %v", err) continue } srcFile, err := sftp.Open(remoteDir + "\\" + fileName) if err != nil { log.Printf("error while opening remote directory on the Windows VM: %v", err) continue } _, err = io.Copy(dstFile, srcFile) if err != nil { log.Printf("error retrieving file %v from Windows VM: %v", fileName, err) continue } if err = dstFile.Sync(); err != nil { log.Printf("error flusing memory: %v", err) continue } if err := srcFile.Close(); err != nil { log.Printf("error closing file on the remote host %s", fileName) continue } if err := dstFile.Close(); err != nil { log.Printf("error closing file %s locally", fileName) continue } } return nil }
def _dirty(self): return reduce( operator.or_, [ o.dirty() for b,e,o in self._slices ], 0 )
for _ in range (int(input())): s = input(); dem1=0; dem2=0; dem3=0; result = "" dem1 = s.count('R') dem2 = s.count('S') dem3 = s.count('P') m = max(dem1,dem2,dem3) if(dem1 == m) : ans = 'P' elif(dem2>=dem3 and dem2>=dem1): ans = 'R' else: ans = 'S' print(ans*len(s))
import React from 'react' import { render } from '@testing-library/react' import { CoreEmbed } from '.' describe('embed', () => { it('should render with the youtube provider name slug', () => { const { getByTestId } = render( <CoreEmbed attributes={{ url: 'https://www.youtube.com/embed/UGFCbmvk0vo', providerNameSlug: 'youtube', align: '' }} __typename="CoreEmbedBlock" /> ) expect(getByTestId('youtube')).toBeInTheDocument() }) it('should render with the vimeo provider name slug', () => { const { getByTestId } = render( <CoreEmbed attributes={{ url: 'https://player.vimeo.com/video/10679287', providerNameSlug: 'vimeo', align: '' }} __typename="CoreEmbedBlock" /> ) expect(getByTestId('vimeo')).toBeInTheDocument() }) it('should render with the iframe provider name slug', () => { const { getByTestId } = render( <CoreEmbed attributes={{ url: 'https://www.youtube.com/embed/UGFCbmvk0vo', providerNameSlug: 'embed-handler', align: 'wide' }} __typename="CoreEmbedBlock" /> ) expect(getByTestId('embed-handler')).toBeInTheDocument() }) it('should render with the default provider name slug', () => { const { getByText } = render( <CoreEmbed attributes={{ url: 'https://player.vimeo.com/video/10679287', providerNameSlug: 'vim', align: '' }} __typename="CoreEmbedBlock" /> ) expect(getByText('vim')).toBeInTheDocument() }) it('should render null', () => { const { container } = render( <CoreEmbed __typename="CoreEmbedBlock" attributes={{}} /> ) expect(container).toBeEmptyDOMElement() }) })
/** * Base class for testing the Backend Auth filter. Makes a simple request * with no query parameters in the request URL. */ class BackendAuthFilterTest : public ::testing::Test { protected: void SetUp() override { mock_filter_config_parser_ = std::make_shared<NiceMock<MockFilterConfigParser>>(); mock_filter_config_ = std::make_shared<NiceMock<MockFilterConfig>>(); EXPECT_CALL(*mock_filter_config_, stats).WillRepeatedly(ReturnRef(stats_)); EXPECT_CALL(*mock_filter_config_, cfg_parser) .WillRepeatedly(ReturnRef(*mock_filter_config_parser_)); mock_route_ = std::make_shared<NiceMock<Envoy::Router::MockRoute>>(); filter_ = std::make_unique<Filter>(mock_filter_config_); filter_->setDecoderFilterCallbacks(mock_decoder_callbacks_); } void setPerRouteJwtAudience(const std::string& jwt_audience) { ::espv2::api::envoy::v9::http::backend_auth::PerRouteFilterConfig per_route_cfg; per_route_cfg.set_jwt_audience(jwt_audience); auto per_route = std::make_shared<PerRouteFilterConfig>(per_route_cfg); EXPECT_CALL(mock_decoder_callbacks_, route()) .WillRepeatedly(Return(mock_route_)); EXPECT_CALL(mock_route_->route_entry_, perFilterConfig(kFilterName)) .WillRepeatedly( Invoke([per_route](const std::string&) -> const Envoy::Router::RouteSpecificFilterConfig* { return per_route.get(); })); } testing::NiceMock<Envoy::Stats::MockIsolatedStatsStore> scope_; FilterStats stats_{ALL_BACKEND_AUTH_FILTER_STATS( POOL_COUNTER_PREFIX(scope_, "backend_auth."))}; std::shared_ptr<MockFilterConfigParser> mock_filter_config_parser_; std::shared_ptr<MockFilterConfig> mock_filter_config_; testing::NiceMock<Envoy::Http::MockStreamDecoderFilterCallbacks> mock_decoder_callbacks_; std::shared_ptr<NiceMock<Envoy::Router::MockRoute>> mock_route_; std::unique_ptr<Filter> filter_; }
/* Copyright [2019] - [2021], PERSISTENCE TECHNOLOGIES PTE. LTD. and the persistenceBridge contributors SPDX-License-Identifier: Apache-2.0 */ package utils import ( "github.com/Shopify/sarama" ) // NewProducer is a producer to send messages to kafka func NewProducer(kafkaPorts []string, config *sarama.Config) sarama.SyncProducer { producer, err := sarama.NewSyncProducer(kafkaPorts, config) if err != nil { panic(err) } return producer } // ProducerDeliverMessage : delivers messages to kafka func ProducerDeliverMessage(msgBytes []byte, topic string, producer sarama.SyncProducer) error { sendMsg := sarama.ProducerMessage{ Topic: topic, Value: sarama.ByteEncoder(msgBytes), } _, _, err := producer.SendMessage(&sendMsg) if err != nil { return err } return nil }
def isSelfSigned(self): if self.issuer_hash == self.subject_hash: return self.isIssuerCert(self) return False
/** * Broadcast receiver which gets POI directives. */ public class LocalSearchTemplateRuntimeReceiver extends BroadcastReceiver { private static final String TAG = LocalSearchTemplateRuntimeReceiver.class.getSimpleName(); @Inject LocalSearchDirectiveHandler mLocalSearchDirectiveHandler; @Override public void onReceive(Context context, Intent intent) { Log.i(TAG, this + " | onReceive: intent: " + intent); if (mLocalSearchDirectiveHandler == null) { Log.i(TAG, this + " | first onReceive so doing injection"); DaggerNavigationComponent.builder() .androidModule(new AndroidModule(context)) .build() .injectPOIReceiver(this); } if (intent != null && intent.getExtras() != null) { AACSMessageBuilder.parseEmbeddedIntent(intent).ifPresent(message -> { if (message.action.equals(Action.TemplateRuntime.RENDER_TEMPLATE)) { TemplateRuntimeMessages.getTemplateType(message.payload).ifPresent(type -> { if (type.equals(TemplateRuntimeConstants.TEMPLATE_TYPE_LOCAL_SEARCH_LIST)) { mLocalSearchDirectiveHandler.renderLocalSearchListTemplate(message); } else if (type.equals(TemplateRuntimeConstants.TEMPLATE_TYPE_LOCAL_SEARCH_DETAIL)) { mLocalSearchDirectiveHandler.renderLocalSearchDetailTemplate(message); } }); } else if (message.action.equals(Action.TemplateRuntime.CLEAR_TEMPLATE)) { mLocalSearchDirectiveHandler.clearTemplate(); } }); } } }
/** * Prints the header if the file was good * @param inFile * @param ext * @return the header as a string */ std::string printGoodHeader(std::ifstream& inFile, std::string& ext) { std::string line = "HTTP/1.1 200 OK\r\n"; line += "Server: SimpleServer\r\n"; line += "Content-Length: "; line += getFileSize(inFile); line += "\r\n"; line += "Connection: Close\r\n"; line += "Content-Type: "; line += detType(ext); line += "\r\n\r\n"; return line; }
// The Java class gets mapped onto the C++ class and behaves as if it is a Java class. public class main { static { try { System.loadLibrary("gdcm"); } catch (UnsatisfiedLinkError e) { System.err.println("Native code library failed to load. See the chapter on Dynamic Linking Problems in the SWIG Java documentation for help.\n" + e); System.exit(1); } } public static void main(String argv[]) { // ----- Object creation ----- System.out.println( "Creating some objects:" ); Reader r = new Reader(); UIDGenerator uid = new UIDGenerator(); String s = uid.Generate(); System.out.println( uid.Generate() ); System.out.println( "Goodbye" ); } }
<filename>contrib/libs/pire/pire/any.h /* * any.h -- a wrapper capable of holding a value of arbitrary type. * * Copyright (c) 2007-2010, <NAME> <<EMAIL>>, * <NAME> <<EMAIL>> * * This file is part of Pire, the Perl Incompatible * Regular Expressions library. * * Pire is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Pire is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser Public License for more details. * You should have received a copy of the GNU Lesser Public License * along with Pire. If not, see <http://www.gnu.org/licenses>. */ #ifndef PIRE_ANY_H #define PIRE_ANY_H #include <typeinfo> #include <contrib/libs/pire/pire/stub/stl.h> namespace Pire { class Any { public: Any() : h(0) { } Any(const Any& any) : h(0) { if (any.h) h = any.h->Duplicate(); } Any& operator= (Any any) { any.Swap(*this); return *this; } ~Any() { delete h; } template <class T> Any(const T& t) : h(new Holder<T>(t)) { } bool Empty() const { return !h; } template <class T> bool IsA() const { return h && h->IsA(typeid(T)); } template <class T> T& As() { if (h && IsA<T>()) return *reinterpret_cast<T*>(h->Ptr()); else throw Pire::Error("type mismatch"); } template <class T> const T& As() const { if (h && IsA<T>()) return *reinterpret_cast<const T*>(h->Ptr()); else throw Pire::Error("type mismatch"); } void Swap(Any& a) throw () { DoSwap(h, a.h); } private: struct AbstractHolder { virtual ~AbstractHolder() { } virtual AbstractHolder* Duplicate() const = 0; virtual bool IsA(const std::type_info& id) const = 0; virtual void* Ptr() = 0; virtual const void* Ptr() const = 0; }; template <class T> struct Holder: public AbstractHolder { Holder(T t) : d(t) { } AbstractHolder* Duplicate() const { return new Holder<T>(d); } bool IsA(const std::type_info& id) const { return id == typeid(T); } void* Ptr() { return &d; } const void* Ptr() const { return &d; } private: T d; }; AbstractHolder* h; }; } namespace std { inline void swap(Pire::Any& a, Pire::Any& b) { a.Swap(b); } } #endif
def list_processes(self, visibility=None, page=None, limit=None, sort=None, total=False, ): search_filters = {} if visibility is None: visibility = VISIBILITY_VALUES if isinstance(visibility, str): visibility = [visibility] for v in visibility: if v not in VISIBILITY_VALUES: raise ValueError("Invalid visibility value '{0!s}' is not one of {1!s}" .format(v, list(VISIBILITY_VALUES))) search_filters["visibility"] = {"$in": list(visibility)} if sort == SORT_CREATED: sort = "_id" if sort in [SORT_ID, SORT_PROCESS]: sort = SORT_ID_LONG sort_allowed = list(PROCESS_SORT_VALUES) + ["_id"] sort_method = {"$sort": self._apply_sort_method(sort, SORT_ID_LONG, sort_allowed)} search_pipeline = [{"$match": search_filters}, sort_method] paging_pipeline = [] if page is not None and limit is not None: paging_pipeline = self._apply_paging_pipeline(page, limit) if total: pipeline = self._apply_total_result(search_pipeline, paging_pipeline) else: pipeline = search_pipeline + paging_pipeline LOGGER.debug("Process listing pipeline:\n%s", repr_json(pipeline, indent=2)) found = list(self.collection.aggregate(pipeline, collation=Collation(locale="en"))) if total: items = [Process(item) for item in found[0]["items"]] total = found[0]["total"] return items, total return [Process(item) for item in found]
from tests import _run from tests import * from unittest import TestCase import porerefiner.jobs.submitters as subs from hypothesis import given class TestSubmitters(TestCase): @given(job_rec = Model.Duties(), job_code = jobs(), run = Model.Runs()) @with_database def test_submit(self, job_rec, job_code, run): run.save() job_rec.run = run job_rec.save() _run(job_code.submitter._submit(job_rec))
<reponame>Francis777/agents # coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tf_agents.environments.specs.array_spec.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import from tf_agents.specs import array_spec from tf_agents.specs import tensor_spec TYPE_PARAMETERS = ( ("np.int8", np.int8), ("np.int16", np.int16), ("np.int32", np.int32), ("np.int64", np.int64), ("np.float16", np.float16), ("np.float32", np.float32), ("np.float64", np.float64), ("python float", float), ("python int", int), ) def example_basic_spec(): return array_spec.ArraySpec((1,), np.int64) def example_nested_spec(dtype): """Return an example nested array spec.""" return { "array_spec_1": array_spec.ArraySpec((2, 3), dtype), "bounded_spec_1": array_spec.BoundedArraySpec((2, 3), dtype, -10, 10), "dict_spec": { "array_spec_2": array_spec.ArraySpec((2, 3), dtype), "bounded_spec_2": array_spec.BoundedArraySpec((2, 3), dtype, -10, 10) }, "tuple_spec": ( array_spec.ArraySpec((2, 3), dtype), array_spec.BoundedArraySpec((2, 3), dtype, -10, 10), ), "list_spec": [ array_spec.ArraySpec((2, 3), dtype), (array_spec.ArraySpec((2, 3), dtype), array_spec.BoundedArraySpec((2, 3), dtype, -10, 10)), ], } # These parameters will be used with every test* method in this class. @parameterized.named_parameters(*TYPE_PARAMETERS) class ArraySpecNestSampleTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): self.rng = np.random.RandomState() return super(ArraySpecNestSampleTest, self).setUp() def testArraySpecSample(self, dtype): spec = array_spec.ArraySpec((2, 3), dtype) sample = array_spec.sample_spec_nest(spec, self.rng) bounded = array_spec.BoundedArraySpec.from_spec(spec) self.assertTrue(np.all(sample >= bounded.minimum)) self.assertTrue(np.all(sample <= bounded.maximum)) def testArraySpecSampleWithName(self, dtype): spec = array_spec.ArraySpec((2, 3), dtype, name="test_spec") sample = array_spec.sample_spec_nest(spec, self.rng) bounded = array_spec.BoundedArraySpec.from_spec(spec) self.assertTrue(np.all(sample >= bounded.minimum)) self.assertTrue(np.all(sample <= bounded.maximum)) self.assertEqual("test_spec", bounded.name) def testBoundedArraySpecSample(self, dtype): spec = array_spec.BoundedArraySpec((2, 3), dtype, -10, 10) sample = array_spec.sample_spec_nest(spec, self.rng) self.assertTrue(np.all(sample >= -10)) self.assertTrue(np.all(sample <= 10)) def testBoundedArraySpecSampleMultipleBounds(self, dtype): spec = array_spec.BoundedArraySpec((2,), dtype, [-10, 1], [10, 3]) sample = array_spec.sample_spec_nest(spec, self.rng) self.assertGreaterEqual(sample[0], -10) self.assertLessEqual(sample[0], 10) self.assertGreaterEqual(sample[1], 1) self.assertLessEqual(sample[1], 3) def testBoundedArraySpecNoBounds(self, dtype): spec = array_spec.ArraySpec((2, 3), dtype) bounded_spec = array_spec.BoundedArraySpec.from_spec(spec) sample = array_spec.sample_spec_nest(bounded_spec, self.rng) tf_dtype = tf.as_dtype(spec.dtype) self.assertTrue(np.all(sample >= tf_dtype.min)) self.assertTrue(np.all(sample <= tf_dtype.max)) def testSampleTensorBoundedSpecFromArraySpecNoBounds(self, dtype): if dtype in [int, float]: return tf_dtype = tf.as_dtype(dtype) # Skip unsupported random_ops dtypes. # TODO(b/68706911): Add tf.float16 once bug is fixed. if tf_dtype not in (tf.bfloat16, tf.float32, tf.float64, tf.int32, tf.int64): return spec = array_spec.ArraySpec((2, 3), dtype) bounded_spec = array_spec.BoundedArraySpec.from_spec(spec) t_spec = tensor_spec.BoundedTensorSpec.from_spec(bounded_spec) sample = tensor_spec.sample_spec_nest(t_spec) bounded = tensor_spec.BoundedTensorSpec.from_spec(t_spec) sample_ = self.evaluate(sample) self.assertTrue( np.all(sample_ >= bounded.minimum), (sample_.min(), sample_.max())) self.assertTrue( np.all(sample_ <= bounded.maximum), (sample_.min(), sample_.max())) def testNestSample(self, dtype): spec = example_nested_spec(dtype) sample = array_spec.sample_spec_nest(spec, self.rng) bounded = array_spec.BoundedArraySpec.from_spec(spec["array_spec_1"]) self.assertTrue(np.all(sample["array_spec_1"] >= bounded.minimum)) self.assertTrue(np.all(sample["array_spec_1"] <= bounded.maximum)) self.assertTrue(np.all(sample["bounded_spec_1"] >= -10)) self.assertTrue(np.all(sample["bounded_spec_1"] <= 10)) self.assertIn("array_spec_2", sample["dict_spec"]) self.assertIn("bounded_spec_2", sample["dict_spec"]) self.assertIn("tuple_spec", sample) self.assertIn("list_spec", sample) self.assertTrue(np.all(sample["list_spec"][1][1] >= -10)) self.assertTrue(np.all(sample["list_spec"][1][1] <= 10)) def testNestSampleOuterDims(self, dtype): spec = example_nested_spec(dtype) outer_dims = [2, 3] sample = array_spec.sample_spec_nest( spec, self.rng, outer_dims=outer_dims) bounded = array_spec.BoundedArraySpec.from_spec(spec["array_spec_1"]) self.assertTrue(np.all(sample["array_spec_1"] >= bounded.minimum)) self.assertTrue(np.all(sample["array_spec_1"] <= bounded.maximum)) self.assertTrue(np.all(sample["bounded_spec_1"] >= -10)) self.assertTrue(np.all(sample["bounded_spec_1"] <= 10)) self.assertIn("array_spec_2", sample["dict_spec"]) self.assertIn("bounded_spec_2", sample["dict_spec"]) self.assertIn("tuple_spec", sample) self.assertIn("list_spec", sample) self.assertTrue(np.all(sample["list_spec"][1][1] >= -10)) self.assertTrue(np.all(sample["list_spec"][1][1] <= 10)) def _test_batched_shape(sample_, spec_): self.assertSequenceEqual(sample_.shape, outer_dims + list(spec_.shape)) tf.nest.map_structure(_test_batched_shape, sample, spec) class CheckArraysNestTest(parameterized.TestCase): @parameterized.named_parameters(*TYPE_PARAMETERS) def testMatch(self, dtype): spec = example_nested_spec(dtype) sample = array_spec.sample_spec_nest(spec, np.random.RandomState()) self.assertTrue(array_spec.check_arrays_nest(sample, spec)) @parameterized.named_parameters( ("different keys", {"foo": np.array([1])}, {"bar": example_basic_spec()}), ("different types 1", {"foo": np.array([1])}, [example_basic_spec()]), ("different types 2", [np.array([1])], {"foo": example_basic_spec()}), ("different lengths", [np.array([1])], [example_basic_spec(), example_basic_spec()]), ("array mismatch 1", {"foo": np.array([1, 2])}, {"foo": example_basic_spec()}), ("array mismatch 2", [np.array([1, 2])], [example_basic_spec()]), ("not an array", "a string", example_basic_spec()), ("not a spec", np.array([1]), "a string"), ) def testNoMatch(self, arrays, spec): self.assertFalse(array_spec.check_arrays_nest(arrays, spec)) class ArraySpecTest(parameterized.TestCase): def testShapeTypeError(self): with self.assertRaises(TypeError): array_spec.ArraySpec(32, np.int32) def testDtypeTypeError(self): with self.assertRaises(TypeError): array_spec.ArraySpec((1, 2, 3), "32") def testStringDtype(self): array_spec.ArraySpec((1, 2, 3), "int32") def testNumpyDtype(self): array_spec.ArraySpec((1, 2, 3), np.int32) def testDtype(self): spec = array_spec.ArraySpec((1, 2, 3), np.int32) self.assertEqual(np.int32, spec.dtype) def testShape(self): spec = array_spec.ArraySpec([1, 2, 3], np.int32) self.assertEqual((1, 2, 3), spec.shape) def testEqual(self): spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32) spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32) self.assertEqual(spec_1, spec_2) def testNotEqualDifferentShape(self): spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32) spec_2 = array_spec.ArraySpec((1, 3, 3), np.int32) self.assertNotEqual(spec_1, spec_2) def testNotEqualDifferentDtype(self): spec_1 = array_spec.ArraySpec((1, 2, 3), np.int64) spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32) self.assertNotEqual(spec_1, spec_2) def testNotEqualOtherClass(self): spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32) spec_2 = None self.assertNotEqual(spec_1, spec_2) self.assertNotEqual(spec_2, spec_1) spec_2 = () self.assertNotEqual(spec_1, spec_2) self.assertNotEqual(spec_2, spec_1) def testFromArray(self): spec = array_spec.ArraySpec.from_array(np.array([1, 2]), "test") self.assertEqual(spec.shape, (2,)) self.assertEqual(spec.dtype, np.int64) self.assertEqual(spec.name, "test") def testFromArrayWithScalar(self): spec = array_spec.ArraySpec.from_array(5, "test") self.assertEqual(spec.shape, tuple()) self.assertEqual(spec.dtype, np.int64) self.assertEqual(spec.name, "test") def testFromArrayWithNonNumeric(self): self.assertRaises(ValueError, array_spec.ArraySpec.from_array, "a string") @parameterized.named_parameters(*TYPE_PARAMETERS) def testCheckArrayMatch(self, dtype): spec = array_spec.ArraySpec((2,), dtype) self.assertTrue(spec.check_array(np.array([1, 2], dtype))) def testCheckArrayMatchWithScalar(self): spec = array_spec.ArraySpec(tuple(), np.double) self.assertTrue(spec.check_array(5.0)) @parameterized.named_parameters( ("wrong shape", np.array([1])), ("wrong dtype", np.array([1, 2], dtype=np.double)), ("not an array", "a string")) def testCheckArrayNoMatch(self, array): spec = array_spec.ArraySpec((2,), np.int64) self.assertFalse(spec.check_array(array)) @parameterized.named_parameters(*TYPE_PARAMETERS) def testReplaceDtype(self, dtype): spec = array_spec.ArraySpec(tuple(), np.double).replace(dtype=dtype) self.assertEqual(spec.dtype, dtype) def testReplace(self): spec = array_spec.ArraySpec(tuple(), np.double) new_spec = spec.replace(shape=(2,)) self.assertEqual(new_spec.shape, (2,)) new_spec = new_spec.replace(dtype=np.int8) self.assertEqual(new_spec.dtype, np.int8) new_spec = new_spec.replace(name="name") self.assertEqual(new_spec.name, "name") exp_spec = array_spec.ArraySpec((2,), np.int8, name="name") self.assertEqual(exp_spec, new_spec) class BoundedArraySpecTest(parameterized.TestCase): def testInvalidMinimum(self): with self.assertRaisesRegexp(ValueError, "not compatible"): array_spec.BoundedArraySpec((3, 5), np.uint8, (0, 0, 0), (1, 1)) def testInvalidMaximum(self): with self.assertRaisesRegexp(ValueError, "not compatible"): array_spec.BoundedArraySpec((3, 5), np.uint8, 0, (1, 1, 1)) def testMinLargerThanMax(self): with self.assertRaisesRegexp(ValueError, "min has values greater than max"): array_spec.BoundedArraySpec((3,), np.uint8, (1, 2, 3), (3, 2, 1)) def testHandleInfLimits(self): spec = array_spec.BoundedArraySpec( (1, 2, 3), np.float32, (-np.inf, 5, -np.inf), (np.inf, 5, np.inf), ) self.assertNotIn(np.inf, spec.minimum) self.assertNotIn(-np.inf, spec.minimum) self.assertNotIn(np.inf, spec.maximum) self.assertNotIn(-np.inf, spec.maximum) self.assertEqual(5, spec.minimum[1]) self.assertEqual(5, spec.maximum[1]) def testMinMaxAttributes(self): spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5)) self.assertEqual(type(spec.minimum), np.ndarray) self.assertEqual(type(spec.maximum), np.ndarray) def testNotWriteable(self): spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5)) with self.assertRaisesRegexp(ValueError, "read-only"): spec.minimum[0] = -1 with self.assertRaisesRegexp(ValueError, "read-only"): spec.maximum[0] = 100 def testEqualBroadcastingBounds(self): spec_1 = array_spec.BoundedArraySpec( (1, 2), np.int32, minimum=0.0, maximum=1.0) spec_2 = array_spec.BoundedArraySpec( (1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0]) self.assertEqual(spec_1, spec_2) def testNotEqualDifferentMinimum(self): spec_1 = array_spec.BoundedArraySpec( (1, 2), np.int32, minimum=[0.0, -1.6], maximum=[1.0, 1.0]) spec_2 = array_spec.BoundedArraySpec( (1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0]) self.assertNotEqual(spec_1, spec_2) def testReuseSpec(self): spec_1 = array_spec.BoundedArraySpec( (1, 2), np.int32, minimum=0.0, maximum=1.0) spec_2 = array_spec.BoundedArraySpec(spec_1.shape, spec_1.dtype, spec_1.minimum, spec_1.maximum) self.assertEqual(spec_1, spec_2) def testNotEqualOtherClass(self): spec_1 = array_spec.BoundedArraySpec( (1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0]) spec_2 = array_spec.ArraySpec((1, 2), np.int32) self.assertNotEqual(spec_1, spec_2) self.assertNotEqual(spec_2, spec_1) spec_2 = None self.assertNotEqual(spec_1, spec_2) self.assertNotEqual(spec_2, spec_1) spec_2 = () self.assertNotEqual(spec_1, spec_2) self.assertNotEqual(spec_2, spec_1) def testNotEqualDifferentMaximum(self): spec_1 = array_spec.BoundedArraySpec( (1, 2), np.int32, minimum=0.0, maximum=2.0) spec_2 = array_spec.BoundedArraySpec( (1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0]) self.assertNotEqual(spec_1, spec_2) def testRepr(self): as_string = repr( array_spec.BoundedArraySpec( (1, 2), np.int32, minimum=73.0, maximum=101.0)) self.assertIn("101", as_string) self.assertIn("73", as_string) def testFromArraySpec(self): spec = array_spec.ArraySpec((2, 3), np.int32) bounded_spec = array_spec.BoundedArraySpec.from_spec(spec) self.assertEqual(np.int32, bounded_spec.dtype) i64_info = np.iinfo(np.int32) self.assertEqual(i64_info.min, bounded_spec.minimum) self.assertEqual(i64_info.max, bounded_spec.maximum) def testFromBoundedArraySpec(self): bounded_spec = array_spec.BoundedArraySpec( (2, 3), np.int32, minimum=5, maximum=15, name="test_spec") new_spec = array_spec.BoundedArraySpec.from_spec(bounded_spec) self.assertEqual(bounded_spec.minimum, new_spec.minimum) self.assertEqual(bounded_spec.maximum, new_spec.maximum) self.assertEqual(bounded_spec.dtype, new_spec.dtype) self.assertEqual(bounded_spec.shape, new_spec.shape) self.assertEqual(bounded_spec.name, new_spec.name) def testFromArraySpecRename(self): bounded_spec = array_spec.BoundedArraySpec( (2, 3), np.int32, minimum=5, maximum=15, name="test_spec") new_spec = array_spec.BoundedArraySpec.from_spec( bounded_spec, name="rename") self.assertEqual(bounded_spec.minimum, new_spec.minimum) self.assertEqual(bounded_spec.maximum, new_spec.maximum) self.assertEqual(bounded_spec.dtype, new_spec.dtype) self.assertEqual(bounded_spec.shape, new_spec.shape) self.assertEqual("rename", new_spec.name) @parameterized.named_parameters(*TYPE_PARAMETERS) def testCheckArrayMatch(self, dtype): spec = array_spec.BoundedArraySpec((2,), dtype, minimum=5, maximum=15) self.assertTrue(spec.check_array(np.array([6, 7], dtype))) # Bounds should be inclusive. self.assertTrue(spec.check_array(np.array([5, 15], dtype))) @parameterized.named_parameters( ("wrong shape", np.array([1])), ("wrong dtype", np.array([1, 2], dtype=np.double)), ("not an array", "a string"), ("out of bounds 1", np.array([1, 10])), ("out of bounds 2", np.array([5, 20]))) def testCheckArrayNoMatch(self, array): spec = array_spec.BoundedArraySpec((2,), np.int64, minimum=5, maximum=15) self.assertFalse(spec.check_array(array)) # Tests that random sample of a complete uint8 range contains all values. def testSampleUint8(self): self.skipTest("TODO(oars): Fix this test.") rng = np.random.RandomState() spec = array_spec.BoundedArraySpec( (100, 10, 10), np.uint8, minimum=0, maximum=255) sample = array_spec.sample_bounded_spec(spec, rng) self.assertTupleEqual((100, 10, 10), sample.shape) hist, _ = np.histogram(sample, bins=256, range=(0, 255)) self.assertTrue(np.all(hist > 0)) # Tests that random sample of a complete int8 range contains all values. The # caveat is that difference of max - min is not int8. # TODO(oars): Fix these tests: perhaps by chance not every bin is filled? # Need a lot more samples (e.g. shape (100, 100, 100) to ensure they are? def testSampleInt8(self): self.skipTest("TODO(oars): Fix this test.") rng = np.random.RandomState() spec = array_spec.BoundedArraySpec( (100, 10, 10), np.int8, minimum=-128, maximum=127) sample = array_spec.sample_bounded_spec(spec, rng) self.assertTupleEqual((100, 10, 10), sample.shape) hist, _ = np.histogram(sample, bins=256, range=(-128, 127)) self.assertTrue(np.all(hist > 0)) # Tests that random sample from uint64 does have all values requested. def testSampleUint64SmallRange(self): self.skipTest("TODO(oars): Fix this test.") rng = np.random.RandomState() spec = array_spec.BoundedArraySpec( (100, 10, 10), np.uint64, minimum=0, maximum=100) sample = array_spec.sample_bounded_spec(spec, rng) self.assertTupleEqual((100, 10, 10), sample.shape) hist, _ = np.histogram(sample, bins=100, range=(0, 100)) self.assertTrue(np.all(hist > 0)) # Tests that random sample from full int64 works well. The caveat is that the # full range min-max cannot be represented as an int64. def testSampleInt64FullRange(self): rng = np.random.RandomState() spec = array_spec.BoundedArraySpec( (100, 10, 10), np.int64, minimum=np.iinfo(np.int64).min, maximum=np.iinfo(np.int64).max) sample = array_spec.sample_bounded_spec(spec, rng) self.assertTupleEqual((100, 10, 10), sample.shape) hist, _ = np.histogram(sample, bins=100, range=(np.iinfo(np.int64).min / 2, np.iinfo(np.int64).max / 2)) self.assertTrue(np.all(hist > 0)) # Tests that random sample from full float64 does have no infs. def testSampleFloat64FullRange(self): rng = np.random.RandomState() spec = array_spec.BoundedArraySpec( (100, 10, 10), np.float64, minimum=0, maximum=100) sample = array_spec.sample_bounded_spec(spec, rng) self.assertTupleEqual((100, 10, 10), sample.shape) self.assertFalse(np.any(np.isinf(sample))) hist, _ = np.histogram(sample, bins=100, range=(0, 100)) self.assertTrue(np.all(hist > 0)) def testReplace(self): spec = array_spec.BoundedArraySpec(tuple(), np.int8, minimum=0, maximum=1) new_spec = spec.replace(shape=(2,)) self.assertEqual(new_spec.shape, (2,)) new_spec = new_spec.replace(dtype=np.int32) self.assertEqual(new_spec.dtype, np.int32) new_spec = new_spec.replace(name="name") self.assertEqual(new_spec.name, "name") new_spec = new_spec.replace(minimum=-1) self.assertEqual(new_spec.minimum, -1) new_spec = new_spec.replace(maximum=0) self.assertEqual(new_spec.maximum, 0) exp_spec = array_spec.BoundedArraySpec((2,), np.int32, minimum=-1, maximum=0, name="name") self.assertEqual(exp_spec, new_spec) @parameterized.named_parameters(*TYPE_PARAMETERS) def testNumValues(self, dtype): spec = array_spec.BoundedArraySpec(tuple(), dtype, minimum=0, maximum=9) num_values = spec.num_values if array_spec.is_discrete(spec): self.assertEqual(10, num_values) else: self.assertEqual(None, num_values) def testNumValuesVector(self): spec = array_spec.BoundedArraySpec((2,), np.int32, [0, 0], [1, 1]) self.assertTrue(np.all([2, 2] == spec.num_values)) spec = spec.replace(minimum=1) self.assertTrue(np.all([1, 1] == spec.num_values)) spec = spec.replace(maximum=2) self.assertTrue(np.all([2, 2] == spec.num_values)) @parameterized.named_parameters(*TYPE_PARAMETERS) class ArraySpecTypeTest(parameterized.TestCase): def testIsDiscrete(self, dtype): spec = array_spec.ArraySpec((2, 3), dtype=dtype) self.assertIs(tensor_spec.is_discrete(spec), issubclass(np.dtype(dtype).type, np.integer)) def testIsContinuous(self, dtype): spec = array_spec.ArraySpec((2, 3), dtype=dtype) self.assertIs(tensor_spec.is_continuous(spec), issubclass(np.dtype(dtype).type, np.floating)) def testExclusive(self, dtype): spec = array_spec.ArraySpec((2, 3), dtype=dtype) self.assertIs( tensor_spec.is_discrete(spec) ^ tensor_spec.is_continuous(spec), True) if __name__ == "__main__": tf.test.main()
{-# LANGUAGE Arrows #-} module Game.Server.Components.BasicComponents where import FRP.Yampa as Yampa import FRP.Yampa.Geometry import Safe import IdentityList import Network.Socket import Data.Maybe import Numeric.IEEE import Game.Shared.Types import Game.Shared.Networking import Game.Shared.Arrows import Game.Shared.Object import Game.Shared.Physics import Game.Server.Object import Game.Server.Networking ------------------ -- Basic object -- ------------------ -- |Input structure for a basic game object data BasicObjectInput = BasicObjectInput { boiNetwork :: ServerNetInput -- ^Network input } -- |Output structure for a basic game object data BasicObjectOutput = BasicObjectOutput { booGlobalAnnounceEvent :: Yampa.Event () -- ^Event fired when the object should announce itself to all connected clients ,booSocketAnnounceEvent :: Yampa.Event [Socket] -- ^Event for when the object should announce itself to a client } -- |Base component that all objects should use. Informs the object when it should -- send Create messages to newly connected clients basicObject :: SF BasicObjectInput BasicObjectOutput basicObject = proc input -> do -- Connection events announceEvent <- delayEvent epsilon <<< now () -< () newClientEvent <- clientsConnected -< boiNetwork input -- Return state returnA -< BasicObjectOutput { booGlobalAnnounceEvent = announceEvent, booSocketAnnounceEvent = newClientEvent } ------------------------ -- Basic owned object -- ------------------------ -- |Input structure for the owned basic object data BasicOwnedObjectInput = BasicOwnedObjectInput { booiNetwork :: ServerNetInput -- ^Network input } -- |Output structure for the owned basic object data BasicOwnedObjectOutput = BasicOwnedObjectOutput { boooGlobalAnnounceEvent :: Yampa.Event () -- ^Event fired when the object should announce itself to all connected clients ,boooSocketAnnounceEvent :: Yampa.Event [Socket] -- ^Event for when the object should announce itself to a client ,boooOwnerDisconnected :: Yampa.Event () -- ^Event fired when the object owned has disconnected ,boooPositionChanged :: Yampa.Event Vector2D -- ^Event for when the objects position has changed } -- |Base component for an object which is owned by a connected client. Owned objects -- can be moved by their owner, and are removed when their owner disconnects basicOwnedObject :: Socket -- ^Socket of the parent client -> SF BasicOwnedObjectInput BasicOwnedObjectOutput basicOwnedObject sock = proc input -> do -- Connection events announceEvent <- delayEvent epsilon <<< now () -< () disconnectedEvent <- clientDisconnected sock -< booiNetwork input newClientEvent <- clientsConnected -< booiNetwork input -- Position moveRequestEvent <- moveRequest sock -< booiNetwork input -- Return state returnA -< BasicOwnedObjectOutput { boooGlobalAnnounceEvent = announceEvent, boooSocketAnnounceEvent = newClientEvent, boooOwnerDisconnected = disconnectedEvent, boooPositionChanged = moveRequestEvent } ------------------ -- Object stats -- ------------------ -- |Input structure for the stats component data ObjectStatsInput = ObjectStatsInput { osiHealthChanged :: Yampa.Event Int -- ^Event to request a change to the health value ,osiMaxHealthChanged :: Yampa.Event Int -- ^Event to request a change to the max health value ,osiManaChanged :: Yampa.Event Int -- ^Event to request a change to the mana value ,osiMaxManaChanged :: Yampa.Event Int -- ^Event to request a change to the max mana value ,osiSpeedChanged :: Yampa.Event Int -- ^Event to request a change to the speed value ,osiDefenceChanged :: Yampa.Event Int -- ^Event to request a change to the defence value ,osiAttackChanged :: Yampa.Event Int -- ^Event to request a change to the attack value } -- |Output structure for the stats component data ObjectStatsOutput = ObjectStatsOutput { osoHealthChanged :: Yampa.Event Int -- ^Event for when the health value has been changed ,osoMaxHealthChanged :: Yampa.Event Int -- ^Event for when the max health value has been changed ,osoManaChanged :: Yampa.Event Int -- ^Event for when the mana value has been changed ,osoMaxManaChanged :: Yampa.Event Int -- ^Event for when the max mana value has been changed ,osoSpeedChanged :: Yampa.Event Int -- ^Event for when the speed value has been changed ,osoDefenceChanged :: Yampa.Event Int -- ^Event for when the defence value has been changed ,osoAttackChanged :: Yampa.Event Int -- ^Event for when the attack value has been changed ,osoDeathEvent :: Yampa.Event () -- ^Event fired when the object is killed ,osoStats :: Stats -- ^Object stats structure } -- |Component that manages the stats for an object objectStats :: Stats -- ^Initial values for the object stats -> SF ObjectStatsInput ObjectStatsOutput objectStats startSt = proc input -> do -- Hold and update stats when necessary rec health <- accumBy (+) (stHealth startSt) <<< delayEvent epsilon -< clampEvent (osiHealthChanged input) healthValue maxHealthValue maxHealth <- accumBy (+) (stMaxHealth startSt) -< osiMaxHealthChanged input mana <- accumBy (+) (stMana startSt) <<< delayEvent epsilon -< clampEvent (osiManaChanged input) manaValue maxManaValue maxMana <- accumBy (+) (stMaxMana startSt) -< osiMaxHealthChanged input speed <- accumBy (+) (stSpeed startSt) -< osiSpeedChanged input defence <- accumBy (+) (stDefence startSt) -< osiDefenceChanged input attack <- accumBy (+) (stAttack startSt) -< osiAttackChanged input healthValue <- hold (stHealth startSt) -< health maxHealthValue <- hold (stMaxHealth startSt) -< maxHealth manaValue <- hold (stMana startSt) -< mana maxManaValue <- hold (stMaxMana startSt) -< maxMana speedValue <- hold (stSpeed startSt) -< speed defenceValue <- hold (stDefence startSt) -< defence attackValue <- hold (stAttack startSt) -< attack -- Return updated stats returnA -< ObjectStatsOutput { osoHealthChanged = health, osoMaxHealthChanged = maxHealth, osoManaChanged = mana, osoMaxManaChanged = maxMana, osoSpeedChanged = speed, osoDefenceChanged = defence, osoAttackChanged = attack, osoDeathEvent = condEvent (healthValue <= 0), osoStats = Stats { stHealth = healthValue, stMaxHealth = maxHealthValue, stMana = manaValue, stMaxMana = maxManaValue, stSpeed = speedValue, stAttack = attackValue, stDefence = defenceValue } } where clampEvent evt@(Yampa.Event val) currVal maxVal | newVal < maxVal = evt | (newVal > maxVal) && (currVal /= maxVal) = Yampa.Event (maxVal - currVal) | newVal < 0 = Yampa.Event (-currVal) | otherwise = noEvent where newVal = val + currVal clampEvent _ _ _ = noEvent
module Flip where import Board (Board, Piece (Black, White), Square, opponentColor) import Data.List import Data.List.Split -- for determining which pieces to flip in a given board -- could probably start by only taking the last move made into account -- and then checking if that results in vertical, horizontal or diagonal lines -- and then flipping any pieces of the other colour caught between the pieces -- transposing before calling is probably going to help a lot -- for reusing code when checking rows/columns -- check a row for trapped pieces checkRow :: [Square] -> Piece -> [Square] checkRow r p = undefined -- one strategy is to get all white indexes in a row -- and all black indexes and then compare to see if -- either colour has any indexes which are lower and -- higher than any index of the other colour -- should be enough to get the max and min indexes of each colour -- if the maximum of either colour is greater than the minimum of the other, -- and the minimum of that colour is less than the maximum whiteIndexes :: [Square] -> [Int] whiteIndexes = pieceIndexes White 0 blackIndexes :: [Square] -> [Int] blackIndexes = pieceIndexes Black 0 pieceIndexes :: Piece -> Int -> [Square] -> [Int] pieceIndexes _ _ [] = [] pieceIndexes White start (Just White : ps) = start : pieceIndexes White (start + 1) ps pieceIndexes Black start (Just Black : ps) = start : pieceIndexes Black (start + 1) ps pieceIndexes White start (_ : ps) = pieceIndexes White (start + 1) ps pieceIndexes Black start (_ : ps) = pieceIndexes Black (start + 1) ps -- an empty row is filled with Nothing emptyRow :: [Square] -> Bool emptyRow = all (== Nothing) splitOnEmpty :: [Square] -> [[Square]] splitOnEmpty = wordsBy (== Nothing) -- requires that no empty squares are present squareToPiece :: [Square] -> [Piece] squareToPiece [] = [] squareToPiece (Just p : ps) = p : squareToPiece ps squareToPiece (Nothing : ps) = error "Cannot turn an empty Square into a piece" -- assumes no empty spaces in the given row betweenOtherColor :: Int -> [Int] -> Bool betweenOtherColor 0 _ = False betweenOtherColor 7 _ = False betweenOtherColor x ys = any (< x) ys && any (> x) ys flipColour :: [Piece] -> [Bool] -> [Piece] flipColour [] _ = [] flipColour _ [] = [] flipColour (p : ps) (False : bs) = p : flipColour ps bs flipColour (White : ps) (True : bs) = Black : flipColour ps bs flipColour (Black : ps) (True : bs) = White : flipColour ps bs ---------- I don't think any code above this comment is actually used ------------------------------ -- Question is: Do we need to know which colour we are to flip a unit? -- it probably helps -- colour should represent the colour of the pieces which are to be flipped -- can use splitWhen (==Nothing) to get sublists of non-empty squares -- produces a list of sublists of non-empty squares nonEmptySquares :: [Square] -> [[Square]] nonEmptySquares = splitWhen (== Nothing) -- this might be oversimplified, but it should be the case that the two -- edge pieces are always of the same colour, the one we should flip to flipSublist :: Piece -> [Square] -> [Square] flipSublist _ [] = [] flipSublist colorToFlip squares = replicate (length squares) (Just (opponentColor colorToFlip)) -- Checks the end of a non-empty sublist and determines if its contents should be flipped or not shouldFlip :: [Square] -> Bool shouldFlip [] = False shouldFlip squares = firstPiece == lastPiece where firstPiece = head squares lastPiece = last squares rejoinSublists :: [[Maybe a]] -> [Maybe a] rejoinSublists [] = [] rejoinSublists [x] = x rejoinSublists (x : xs) = x ++ [Nothing] ++ rejoinSublists xs -- need some way to flatten this, replacing empty lists with Nothings flipUnit :: Piece -> [Square] -> [Square] flipUnit _ [] = [] flipUnit colorToFlip squares = rejoinSublists $ map (\x -> if shouldFlip x then flipSublist colorToFlip x else x) subLists where subLists = nonEmptySquares squares -- TODO: This does not yet take diagonals into consideration! flipBoard :: Piece -> Board -> Board flipBoard _ [] = [] flipBoard colorToFlip rows = map (flipUnit colorToFlip) (transpose flippedRows) where flippedRows = map (flipUnit colorToFlip) rows
i love how lemony this cake is. it's the perfect treat with that afternoon tea or morning coffee. this lemon loaf cake is so easy to make. you can make it again and again and it'll turn out just perfect every time. recipe adapted from sugar loco 1-1/2 cup flour 1/2 teaspoon baking powder 1/2 teaspoon baking soda 1/2 teaspoon salt 3 eggs 1 cup sugar 2 tablespoons butter, softened 1 teaspoon vanilla extract 1/3 cup fresh lemon juice 1/2 cup oil zest of one lemon 1 cup powdered sugar 1/4 cup lemon juice preheat oven to 350°f. prepare 9x5 loaf pan or 6 mini pans. in a large bowl combine flour, baking soda, baking powder and salt. in a medium bowl combine eggs, sugar, butter, vanilla and lemon extracts, and lemon juice with a mixer. scrape bottom to make sure all is combined. pour dry ingredients into wet ingredients and blend until smooth, scraping sides and bottom of bowl. add oil and lemon zest. mix well. pour into prepared loaf pan(s) and bake for 45 mins (9x5) or 25 mins (minis). note: i did the loaf pan and only baked mine for 43 mins and it was fine. but of course, every oven is different. insert toothpick to test. if the toothpick comes out clean, they should be good and taken out of the oven. once removed from the oven, let the cakes cool in the pan for 5 minutes. while cake is cooling in the pan, prepare the glaze. combine both the lemon juice and powdered sugar and whisk to combine. remove the cake from the pan(s) and onto a wire rack. make sure and put a cookie sheet or newspaper under the rack. pour glaze over the cake loaf. let the cake cool completely on rack. once cooled, slice and serve. enjoy! yes, i know, i've been MIA lately. but just because i haven't been blogging about any baked goodies or delicious eats doesn't mean i haven't been baking or eating good stuff, or designing or even exercising/running. actually, i have been with all of that. in fact, i've been baking a lot. i just haven't been taking any pictures and sharing them on here. that's because i can never find the time to take pictures of all the delicious eats i've been making. about the only time i can take photos of any baked goodies i make is in the morning and mornings around here are quite hectic with two little ones.but yes, at long last, here is a yummy post. and if these photos aren't so great, it's because i took them with my phone this morning. so please, forgive me for the awful backgrounds and anything else that's imperfect about it. ;)over the weekend my bro-in-law stopped by with a giant bag of freshly picked meyer lemons from his backyard. i kid you not, when i say giant, i mean one of those 13-gallon white garbage bag. it was halfway full at least, with who knows how many lemons. i know i've used about 1/3 of it already and there's still a ton left. so you see, my bro-in-law has this good-size (i don't think it's huge, but it sure produces a ton of lemons) lemon tree out back in his yard. it seriously is one lemon-producing machine because no matter how many branches he has cut off, the tree still finds ways to get more lemons out.anyway, i decided to make use of the lemons. one of the things i've made with these lemons is this lemon loaf cake. i've made this recipe twice already in the last three days. it's pretty simple, so yes, piece of cake. i love how lemony this cake is. the amount of lemon juice is perfect and the added zest in the cake really makes all that lemon flavor stand out. i love how it uses both butter and oil and not just one or the other. the cake comes out just right and not dry at all. oh, to make it even more lemony is that added lemon glaze at the end. oh, so good. our house smelled delicious as the cake was baking in the oven. i couldn't wait to get it out and eat it while still hot. but yes, i was patient enough to wait until it cooled down to slice into it.below's the recipe should you want to make it yourself. it's very easy and pretty straight forward. i've baked it twice already with minor adjustments and it hasn't failed me once (or twice). so go ahead and make some. these are delicious as an accompaniment to that afternoon tea or with your morning breakfast (i had it this morning w/ my iced coffee). enjoy and happy baking! :)ingredients:lemon glaze:directions:makes 10-12 servings depending on how thick or thin you slice it. cake is good for 1-2 days covered/wrapped (uncovered for me because i'm too lazy to cover it so we leave it uncover, plus it gets eaten within a day) at room temperature. it could probably go longer, but it might go dry as the days passes. enjoy! :)
Harold Stallworth admires Shy Glizzy’s beard. Earlier this summer, despite a sweltering 97-degree forecast, hundreds of teenagers convened on the 200th block of 37th Street—just two miles east of the Redskins’ old stomping grounds, RFK Stadium—in honor of Shy Glizzy’s 2nd annual Glizzy Day. The term Glizzy is as malleable as a damp pretzel. It can be a suffix or prefix, firearm or narcotic, phalanx or phallus. But in this particular instance, it was used to describe Shy Glizzy leaning on an SUV, entertaining freestyles from 12-year-olds and striking an occasional pose for the one photographer brave—or savvy—enough to forfeit his Saturday afternoon to a Southeast D.C. housing project. The flyer circulated weeks beforehand promised a block party with food, music, games and surprise celebrity guests, none of which were made readily available for Glizzy Day. But if you buy into the idea that a rapper’s mere presence is inherently philanthropic, Glizzy Gang is due for massive charitable write-offs come tax season. In the context of Shy Glizzy’s music, everyday is Glizzy Day. On Law 2, his third street album in less than 18 months, 37th Street is shouted out upward of a dozen times. Though, the strongest efforts surface when he takes a break from rapping about his old neighborhood like a treasured landmark, and sets out to convey the plight of his neighbors therein. “Some Ones” is the bravest record featured on the album. It’s a shrewd hybrid between “Brenda’s Got a Baby” and “No Hands” that’s too depressing for the strippers employed by Club Stadium, yet too edgy for the D.C. Loves Dilla cult. It’s performances like this—empathetic narratives rich in detail and devoid of finger wagging—that almost justify his self-legislated holiday. Download: ZIP: Shy Glizzy – Laws 2 (Left-Click) We rely on your support to keep POW alive. Please take a second to donate on Patreon!
<filename>petra_viewer/utils/fake_image_item.py # Created by matveyev at 10.11.2021 from PyQt5 import QtCore class FakeImageItem(QtCore.QObject): sigImageChanged = QtCore.pyqtSignal() _levels = None, None # ---------------------------------------------------------------------- def __init__(self, data_pool, image_item=None): super(FakeImageItem, self).__init__() self._current_file = None self._mode = 'lin' self._data_pool = data_pool self._image_item = image_item # ---------------------------------------------------------------------- @property def levels(self): if self._current_file is None: return None, None if self._levels[0] is None: self._levels = self._data_pool.get_levels(self._current_file, self._mode) return self._levels # ---------------------------------------------------------------------- def setAutoLevels(self): self._levels = self._data_pool.get_levels(self._current_file, self._mode) # ---------------------------------------------------------------------- def setMode(self, mode): self._mode = mode self._levels = self._data_pool.get_levels(self._current_file, self._mode) self.sigImageChanged.emit() # ---------------------------------------------------------------------- def setNewFile(self, file): self._current_file = file self._levels = None, None self.sigImageChanged.emit() # ---------------------------------------------------------------------- def setEmptyFile(self): self._current_file = None self._levels = (0, 1) self.sigImageChanged.emit() # ---------------------------------------------------------------------- def setLookupTable(self, lut): if self._image_item is not None: self._image_item.setLookupTable(lut) # ---------------------------------------------------------------------- def setLevels(self, levels): self._levels = levels if self._image_item is not None: self._image_item.setLevels(levels) # ---------------------------------------------------------------------- def getHistogram(self, perChannel=False): if self._current_file is None: return None, None else: return self._data_pool.get_histogram(self._current_file, self._mode) # ---------------------------------------------------------------------- def channels(self): if self._image_item is not None: return self._image_item.channels() else: return 1
def explode_query(self, query): res = [] def dig(sub, res): level = [] for item in sub: if isinstance(item, tuple): got = dig(item, res) if got and level and isinstance(level[0], Op): level.append(got) res.append(tuple(level)) level = [] else: level.append(item) return tuple(level) level = dig(query, res) if not res: return ((Op('&'), level),) if level: assert len(level) == 1, (len(level), level) res.append((level[0], ())) return tuple(res)
The missing component in rail charging modeling - access charges principle selection The motive of this research is the fact that until now no universal model of access charges (AC) has been defined. In the process of modeling, the AC principle is one of the key elements for defining the AC. In this paper, a model for the AC principle selection based on the analytic network process (ANP) approach is proposed. The developed model presents the objectives of the identified stakeholders through the established criteriafromthreedifferentperspectives:thegovernmentinfluence,railwaymarketenvironment,andefficiency of network use. Based on the results of a research, network structure of the ANP method can successfully resolve the dependence and conflicts among evaluation criteria for the AC principle selection. The proposed ANP-based model can become a tool for evaluation and ranking of the AC principle as shown in the case study of the Serbian Railways. This paper is based on real data.
import { Injectable } from '@nestjs/common'; import {getConnection, Repository} from "typeorm"; import { InjectRepository } from '@nestjs/typeorm'; import { User } from '../users/User.entity'; import { CreateUserDto } from '../users/dto/create-user.dto'; @Injectable() export class RegisterService { constructor( @InjectRepository(User) private readonly userRepository: Repository<User> ){} registerUser(createDto: CreateUserDto): Promise<User> { const newUser = this.userRepository.create(createDto); return this.userRepository.save(newUser); } }
Are Highly Motivated Learners More Likely to Complete a Computer Programming MOOC? Computer programming MOOCs attract people who have different motivations. Previous studies have hypothesized that the motivation declared before starting the course can be an important predictor of distinctive dropout rates. The aim of this study was to outline the main motivation clusters of participants in a computer programming MOOC, and to compare how these clusters differed in terms of intention to complete and actual completion rate. The sample consisted of 1,181 respondents to the pre-course questionnaire in the Introduction to Programming MOOC. A validated motivation scale, based on expectancy-value theory and k-means cluster analysis, was used to form the groups. The four identified clusters were named as Opportunity motivated (27.7%), Over-motivated (28.6%), Success motivated (19.6%) and Interest motivated (24.0%). Comparison tests and chi-square test were used to describe the differences among the clusters. There were statistically significant differences among clusters in selfevaluated probability of completion. Also, significant differences emerged among three clusters in terms of percentages of respondents who completed the MOOC. Interestingly, the completion rate was the lowest in the Over-motivated cluster. A statistically significant higher ratio of completers to non-completers was found in the Opportunity motivated, Success motivated, and Interest motivated clusters. Our findings can be useful for MOOC instructors, as a better vision of participants’ motivational profiles at the beginning of the MOOC might help to inform the MOOC design to better support different needs, potentially resulting in lower dropout rates. Résumé de l'article Computer programming MOOCs attract people who have different motivations. Previous studies have hypothesized that the motivation declared before starting the course can be an important predictor of distinctive dropout rates. The aim of this study was to outline the main motivation clusters of participants in a computer programming MOOC, and to compare how these clusters differed in terms of intention to complete and actual completion rate. The sample consisted of 1,181 respondents to the pre-course questionnaire in the Introduction to Programming MOOC. A validated motivation scale, based on expectancy-value theory and k-means cluster analysis, was used to form the groups. The four identified clusters were named as Opportunity motivated (27.7%), Over-motivated (28.6%), Success motivated (19.6%) and Interest motivated (24.0%). Comparison tests and chi-square test were used to describe the differences among the clusters. There were statistically significant differences among clusters in self-evaluated probability of completion. Also, significant differences emerged among three clusters in terms of percentages of respondents who completed the MOOC. Interestingly, the completion rate was the lowest in the Over-motivated cluster. A statistically significant higher ratio of completers to non-completers was found in the Opportunity motivated, Success motivated, and Interest motivated clusters. Our findings can be useful for MOOC instructors, as a better vision of participants' motivational profiles at the beginning of the MOOC might help to inform the MOOC design to better support different needs, potentially resulting in lower dropout rates. Introduction The interest in learning programming and computer science has been growing in recent years; nowadays society needs more and more people with programming skills. Massive open online courses (MOOCs) are a possible way to meet this demand and educate a large number of people. Despite their enormous popularity, MOOCs still have an extensive problem with dropout. One variable related to dropout is the motivation to start the course (Reparaz et al., 2020). Motivation to start the MOOC is different from motivation in traditional courses and is more diverse: some participants merely want to use the materials, others learners are highly motivated and follow every aspect of the course, while some simply enjoy interacting with other MOOC learners (Daza et al., 2013). It is important to understand how to make a MOOC work for as many of its diverse participants as possible (Grover et al., 2013). Existing research on MOOCs has focused on classification of MOOC learners based on their behavior during the course (Feklistova et al., 2019;Kahan et al., 2017;Kizilcec et al., 2013;Tseng et al., 2016). However, different motivational goals may predict different behavioral patterns for MOOC learners and dropouts (Kizilcec & Schneider, 2015), and MOOC completers can be characterized according to their motivation to learn (Barak et al., 2016). Lately, it has been shown that the classification of MOOC participants according to their motivations and intentions at the beginning of the MOOC can help to identify in advance the benefits and potential obstacles that one can find throughout a MOOC (Maya-Jariego et al., 2019). Therefore, this research aimed to outline the main motivation clusters of participants in a computer programming MOOC and compare the differences in completion rates between these clusters. Motivation Motivation can be defined as an internal state or condition that activates behavior and gives it direction (Huitt, 2011). Motivation can be affected by several factors, such as self-efficacy and competence beliefs, control beliefs, interest, value beliefs, and achievement goals (Pintrich, 2003). Motivation to learn is one type of motivation that can be conceptualized as the degree to which students invest attention and effort in various learning activities (Brophy, 2013). Motivation plays a key role in learning and academic performance (Green et al., 2012) and has an important effect on student achievement (Orhan Özen, 2017). In general, explanations relating to the sources of motivation can be categorized into two main categories: intrinsic (internal to the person) and extrinsic (outside the person; Deci et al., 2001). Studies on why people choose a teaching career have also added altruistic motivation, namely desire to improve the well-being of others (e.g., Anthony & Ord, 2008;Brookhart & Freeman, 1992;Tomšik, 2016). Several theories of motivation are relevant to the learning domain (Brophy, 2013). Expectancy-value theory explains how motivation influences students' choice of achievement tasks, persistence on those tasks, and performance on them (Wigfield & Eccles, 2000). This theory states that individuals' choice, persistence, and performance can be explained by their beliefs about how well they will do on the activity (i.e., expectancies for success) and the extent to which they value the activity or task (i.e., values; Wigfield & Eccles, 2000). Expectancies and values are influenced by ability beliefs, the perceived difficulty of different 43 tasks, and individual goals (Eccles & Wigfield, 2002). Expectancies and values have a direct influence on achievement choices, performance, effort, and persistence (Wigfield & Eccles, 2000). Different components of achievement values have been defined: (a) attainment value or importance of doing well, (b) intrinsic value or enjoyment from doing the task, (c) utility value or usefulness of the task, and (d) perceived cost to finish the task (Wigfield et al., 2020). Motivation in MOOCs Studying motivation in MOOCs, it is important to understand the value and worth of the MOOC for the participant (Macdonald & Ahern, 2015). A wide range of motivations for using MOOCs has been described (Kizilcec & Schneider, 2015;Luik et al., 2019;Milligan & Littlejohn, 2017;Zheng et al., 2015). It has been found that the learners who enroll in MOOCs tend to have different motivations than learners in traditional courses (Watted & Barak, 2018). Learners in MOOCs may not be focused on gaining a certificate of completion (Kizilcec et al., 2013) as course completion and certification is merely one of many potential outcomes of MOOC participation (Zheng et al., 2015). Learners can choose only parts of a MOOC according to their goals and interests (Kizilcec & Schneider, 2015;Wang & Baker, 2015;White et al., 2015). Several studies have attempted to identify and describe the major motives in MOOCs and have proposed different lists. Zheng et al. (2015) named four types of MOOC learner motivation: (a) fulfilling current needs, (b) satisfying curiosity, (c) preparing for the future, and (d) connecting with people. Watted and Barak (2018) grouped participants' motivating factors into three themes-career benefits, personal benefits, and educational benefits-and found that the general interest category, under the personal benefit theme, and professional competence, under career benefits, were the major motivating factors. General interest in the topic, desire for growth and enrichment, and an expectation to have fun and be challenged were the major motivating forces reported by Kizilcec and Schneider (2015). Milligan and Littlejohn (2017) identified nine types of motivation for participating, with four primary motives: (a) the opportunity to learn about the topic, (b) general interest in the topic, (c) the relevance of the course topic to current role challenges, and (d) its relevance to future career intention. White et al. (2015) The diversity of motivations can help predict persistence in a MOOC. Luik et al. (2018) found that the factors (a) interest and expectancies for course, (b) personal suitability of distance learning, (c) usefulness related to certification, and (d) social influence were rated higher by completers than by non-completers. Chaw and Tang (2019) showed that positive motivation (which consists of three elements: believing and having confidence in one's ability to perform well; valuing learning for its usefulness, importance, and relevance; learning to solve problems and develop skills) led to positive engagement, which promotes an increased tendency to complete MOOCs. Evans et al. (2016) claimed that participants who were motivated by their curiosity about online courses or by professional reasons tended to not persist. Wang and Baker (2015) showed that completers were more interested in the course content, whereas non-completers tended to be more interested in MOOCs as a type of learning experience. On the other hand, Douglas et al. (2020) argued that the scores of the Expectancy-Value-Cost motivation scale had only limited predictive power on performance, and Breslow et al. (2013) even stated that there were no correlations between motivation for enrollment and success in the course. Classifying MOOC Participants Based on Motivation and MOOC Completion A few studies have tried to classify MOOC participants according to their motivation and to look at specific completion data of the identified groups of participants. The methods used have included quantitative approaches, such as cluster analysis (Maya-Jariego et al., 2019) or data mining with clustering techniques (Gallén & Caro, 2017), as well as qualitative techniques, such as inductive content analysis (Barak et al., 2016). Based on content analysis of e-mail messages and forum posts, Barak et al. (2016) identified five types of MOOC completers according to their learning motivation. Networkers wish to meet people with similar interests. Problem-solvers seek to find a solution for a real problem from their work. Benefactors learn for the benefit of others and want to contribute to society. 'Innovation-seekers desire to stay constantly updated and informed. Complementary-learners are students who take the MOOC to broaden and deepen their curriculum. Gallén and Caro (2017) used clustering techniques in order to better understand the external regulation and motivations of MOOC participants. They identified three motivational profiles they labeled as convinced, cautious, and irrelevant. Convinced participants did not consider the course a waste of time, wanted to do it, and would not prefer doing other things. Also, they did not lose face in front of others, did not consider that others thought badly of them, and did not get in a lot of trouble. Irrelevant participants felt a little guilty for not doing the course, a little ashamed of themselves, and thought that people might think badly about them. Cautious participants did not cluster items with high prevalence and their answers on a seven-point Likert-type scale were mostly of the applies somewhat to me variety. Their highest-rated item was somewhat preferring to do other things. Maya-Jariego et al. (2019) classified participants based on motivation and learning intention. Three profiles of involvement in the course were identified: low interest, self-referential, and high commitment. All three profiles demonstrated significant differences in self-reported learning experiences at the end of the course. It was hypothesized that a significant predictor of differential dropout rates can be the motivation stated at the beginning of the course. Research Aim and Questions Maya-Jariego et al. (2019) used motivation and intent to complete MOOC as a diagnosis of drop-out. This study went further. We aimed to outline the main motivation clusters of participants in a computer programming MOOC, and to compare how these clusters differed in intention to complete and in completion rate. The study was based on the following research questions: 1. What kind of learner subgroups (clusters) can be identified based on motivation? 2. How do these clusters differ in intention to complete and in completion rates? Methodology The Estonian-language computer programming MOOC named Introduction to Programming (in Estonian Programmeerimise alused) lasted for eight weeks, with an expected total workload of 78 hours. The course was designed for learners with little or no programming experience. The MOOC gave an overview of some programming techniques and taught the basics of algorithmic thinking. So far, this MOOC has been organized five times since the winter of 2016 and the completion rate has been over 50% (Lepp et al., 2017). Sample and Data Collection The sample consisted of 1,181 respondents (571 male and 610 female) who enrolled in the Estonianlanguage MOOC Introduction to Programming organized by University of Tartu in the fall of 2018. Participants completed a pre-course questionnaire. Answering the questionnaire was voluntary and was not a prerequisite for passing the MOOC. The youngest participant was 10 years of age and the oldest was 70; the mean age of the participants was 32.4 years (SD = 10.82). Most of the participants had bachelor's (28.4%) or master's (29.8%) degrees. Of the participants, 64.2% were working and 25.9% were students. More than half of the participants (53.7%) had not learned programming before and 21.6% had learned it only by themselves. Of those who filled in the pre-course questionnaire, 700 (59.3%) went on to complete the MOOC. As in several previous studies (e.g., Kizilcec & Schneider, 2015;Zheng et al., 2015) this research used a quantitative approach to study motivation. Motivation was measured with a Factors Influencing Enrolment in MOOC (FIEM) scale based on expectancy-value theory (Eccles & Wigfield, 2002). According to the results from confirmatory factor analysis, the scale consisted of 28 items, divided by 7 factors describing internal, external, and altruistic motivation; suitability of online learning; and social influence . This scale was chosen because it was based on motivational theory and covered different areas of motivation. A description of the scale with sample items is presented in Table 1. The prefacing statement to all motivational items was "What did motivate you to enroll in the MOOC?", and all motivational items were presented on a 7-point Likert scale ranging from 1 (totally disagree) to 7 (totally agree). 47 The questionnaire ended with questions about background data, and self-evaluation of the probability of completing the MOOC in percentage terms in order to measure the intention to complete. Data about actual completion or non-completion of the MOOC was taken from the Moodle learning platform. Data Analysis SPSS version 25.0 was used for statistical data analysis. First, composite scores of individual items from the FIEM scale were calculated based on seven factors . The second step focused on building motivation clusters from composite score results of the FIEM scale. This was done by conducting a k-mean cluster analysis with the Euclidean distance and with a maximum of 10 iterations. Cluster analysis was used to group respondents based on motivational factors. As k-means cluster analysis is conducted with different numbers of clusters to find a solution that is the most meaningful in the research context (Jain, 2010), we tested the cluster models with three, four, and five clusters. In the last phase, general linear models (multivariate and repeated measures) were used to describe differences between clusters. ANOVA (comparing intention to complete, which was measured in percentages) and chi-square test (comparing proportion of completers and non-completers) were used to answer the second research question. Results Based on the k-means cluster analysis, the respondents were divided into four groups according to their motivation at time of enrolling in the programming MOOC. The most meaningful results were obtained with four identified clusters, which showed the differences among the respondents (Table 2). 49 The second cluster (28.6% of respondents) was identified as Over-motivated. They rated almost all motivational factors higher than did members of other clusters (p < .05). Only in the case of factors Suitability to family and work and Personal suitability of distance learning was there no statistically significant difference with cluster 1. All the factor means were over 5. The smallest cluster (19.6% of respondents) was cluster 3. The highest-rated factors in this cluster included Interest in and expectations of the course, Personal suitability of distance learning (difference from other factors in both cases p < .05), Importance and perceived ability, and Social influence. The latter two were not significantly different from each other (p > .05), but different from other factors (all p < .05). Because items such as I get more knowledge from this course, I need that knowledge in real life, I know that I do well in this topic, and members of my family think that I would be successful at this course belonged to these factors, we named this cluster Success motivated. Compared with clusters 1 and 2, the ratings in this cluster were all statistically lower (all p < .05). The fourth and last cluster included 24.0% of the participants. Compared with clusters 1 and 2, the respondents in this cluster gave lower ratings to motivational factors. The Social influence factor was the lowest rated among all clusters and was also the lowest rated in this cluster compared with other factors (all p < .05). The factors Importance and perceived ability and Usefulness related to certification were also rated lower than in other clusters (all p < .05). Learners in this cluster could be described as interested in the topic, as well as being people for whom this type of learning was suitable and significant others did not play a role in their enrollment in the MOOC. We identified this cluster as Interest motivated. Comparing the clusters in terms of self-evaluated probability of completing the MOOC in percentages (Table 3), it was found with ANOVA that there was a statistically significant difference in evaluations between the clusters (F = 15.228, p < .001). Bonferroni Post Hoc tests indicated that Interest motivated learners evaluated the probability of completing the MOOC lower than did learners from the Opportunity motivated and Over-motivated clusters (in both cases p < .001). Over-motivated learners also evaluated the probability of completing the MOOC higher than did the Success motivated group (p < .01). There were no other statistically significant differences between the clusters (all p > .05). Significant differences emerged when we compared the percentages of respondents in three clusters according to completion rate (Table 4). The percentage of completers was significantly higher than the percentage of non-completers in the Opportunity motivated, Success motivated, and Interest motivated clusters. There was no significant difference in the Over-motivated cluster. The results also indicated that the percentage of learners in the Opportunity motivated and Interest motivated clusters who completed the MOOC was statistically higher than the respective proportion of completers in the Over-motivated cluster (accordingly, chi-square 6.829, p = .001 and chi-square 5.454, p = .020). 51 Table 4 Comparison The proportion of completers in the Opportunity motivated cluster was also significantly higher than in the Success motivated cluster (chi-square 4.959, p =.026). There was no statistically significant difference in completion rates between the Opportunity motivated and Interest motivated (chi-square .941, p = .332), Success motivated and Interest motivated (chi-square 1.607, p = .205), and Over-motivated and Success motivated (chi-square .801, p = .371) clusters. Discussion The aim of this study was to outline the main motivation clusters of participants in a computer programming MOOC, and to compare how these clusters differ in completion rate. The FIEM scale based on expectancy-value theory (Eccles & Wigfield, 2002) was used to form clusters of MOOC learners according to their motivation to enroll in the MOOC. As an answer to the first research question-What kind of learner sub-groups (clusters) can be identified based on motivation?-we found four clusters of learners. Previous studies have identified three (Gallén & Caro, 2017;Maya-Jariego et al., 2019) or five (Barak et al., 2016) clusters, but they used different motivational scales (Gallén & Caro, 2017;Maya-Jariego et al., 2019) or were based only on e-mails and 52 forum posts as data sources (Barak et al., 2016). It was interesting that the highest proportion of learners belonged to the cluster named Over-motivated, as they rated almost all motivational factors higher than did members of the other clusters. This finding aligned with the result of Maya-Jariego et al. (2019), who found that more than half of MOOC participants were highly motivated. Opportunity motivated learners take advantage of e-learning courses, including MOOCs. They like distance learning and, due to family and work commitments, this kind of learning can be for them the only option for studying interesting topics. In terms of size, this cluster was just a little smaller than the Over-motivated cluster. The opportunity to learn about the topic was also mentioned as one of the four primary motives in a previous study (Milligan & Littlejohn, 2017) and suitability of this kind of learning was among the three highest rated motivational factors in Luik et al. (2019). Almost a quarter of learners belonged to the Interest motivated cluster-learners interested in the topic who find distance learning suitable for them and decided by themselves to enroll in the MOOC. It was interesting that only a quarter of the participants belonged to this cluster because interest has been reported as one of the highest motivational factors (Kizilcec & Schneider, 2015;Luik et al., 2019;Milligan & Littlejohn, 2017;White et al., 2015). These learners were not influenced by others; a similar cluster was found in a previous study (Maya-Jariego et al., 2019), where the self-referential cluster was also described as people who felt less pressured by the opinion of family and friends. The smallest cluster, Success motivated, could be described as people oriented towards getting more benefit from the course. They and their significant others believe that they can be successful studying this topic. Barak et al. (2016) identified a similar cluster, named benefactors. Like our Success motivated cluster, they wanted to learn for their own benefit, but according to the previous study (Barak et al., 2016) they also wanted to contribute to society, which was not a trait identified in our study. A previous study (Chaw & Tang, 2019) also observed that confidence in one's ability to perform well and valuing learning for its usefulness leads to increased tendency to complete MOOCs. Second, we examined differences among the clusters according to the intention to complete the MOOC and according to completion rates. Interest motivated respondents evaluated their probability of completing the MOOC lower than did the Opportunity motivated and Over-motivated respondents. This could be related to the tendency of some learners to choose only parts of MOOC according to their interests (Kizilcec & Schneider, 2015;Wang & Baker, 2015;White et al., 2015). Therefore, it is possible that they simply intend to satisfy their curiosity and, as a result, they know that they are not very likely to complete the MOOC. Maya-Jariego et al. (2019) argued that the intention to initiate and complete a MOOC was highest in a highly motivated cluster, which was supported by our study. Over-motivated learners evaluated their probability of completing the MOOC higher than did Interest motivated and Success motivated respondents. Unlike the findings of Breslow et al. (2013), our results indicated that motivation for enrollment is related to completion of the course. The comparison of completion rates indicated that the proportion of completers was higher than the proportion of non-completers in all clusters, except for the Over-motivated. The result that almost two-thirds of learners from the Opportunity motivated cluster completed the course indicated that this type of motivation seems to be the most beneficial for completing a MOOC. Learners who are 53 comfortable with distance learning might be better equipped to cope with the difficulties of a MOOC, to keep motivated and, therefore, complete the MOOC (Luik et al., 2018). More than 60% of the participants in the Interest motivated cluster completed the MOOC. Also, the results indicated that the intention to complete is not related to actual completion. While Interest motivated learners evaluated their probability of completing the MOOC almost the lowest, the completion rate in this cluster was higher than among Over-motivated learners and not significantly different from the other clusters. The results of previous studies about the relationships between interest and completion have been inconsistent. For example, Luik et al. (2018) found that completers rated interest-related motivational items higher than did non-completers, but the findings of Evans et al. (2016) indicated that MOOC learners motivated by curiosity tended to drop out. Interest in the course content leads to completion, but interest in MOOCs as a special type of learning did not (Wang & Baker, 2015). Consequently, it seems that the interaction between interest and completion needs further study. An interesting finding was that learners in the Over-motivated cluster, having reported the highest motivation, dropped out more than did Opportunity motivated and Interest motivated participants. Maya-Jariego et al. (2019) claimed, based on intention to complete, that motivation expressed before the course was a relevant predictor of varying dropout rates and learners with higher motivation at the beginning of the MOOC were more likely to complete. However, our results did not support this statement. Also, it seems that orientation to success is not particularly beneficial for completing a MOOC. Conclusion and Implications Our findings can be useful for MOOC instructors, as a better vision of participants' motivational profiles at the beginning of the MOOC might help to design better supports for different learners and result in lower dropout rates. Learners who are highly motivated at the beginning of a MOOC might get less attention because it is assumed that they do not need so much outside support. However, our results indicated that learners with high motivation in all areas tended to drop out. The reason might be that, compared with others, they had a less clear idea about why they needed this MOOC. The other group of learners needing more attention includes those who believe that they can be successful in the subject area of the MOOC, partially because they are convinced by their family members and friends. However, merely believing in success does not help one complete a MOOC and such learners may be more prone to being distracted when difficulties arise. The study has some limitations that must be taken into account. The number of participants was low compared with the number of participants commonly found in MOOCs, but the participation rate was still considerable. Also, this MOOC was about computer programming and with a higher completion rate than the average; the results may be not generalizable to other MOOCs. Furthermore, as is usual in studies on motivation, we used a self-reported scale. In future studies, it would be interesting to extend our work and explore larger samples and MOOCs with different topics. Additionally, considering the globality of MOOCs, it would be relevant to analyze the 54 motivation to learn and completion rates among culturally diverse learners. The relationships between motivation and completion need more attention in future investigations, because it is still not clear how interest influences completion. Also, the actual reasons for highly motivated participants dropping out are still vague and could, therefore, prove to be a very beneficial avenue of research.
/** * Wrapper that will let users know to use the Elytron Tool scripts instead * of using the wildfly-elytron-tool JAR directly. * * @author <a href="mailto:[email protected]">Farah Juma</a> */ public class ElytronToolScriptWrapper { private static final String ELYTRON_TOOL_SCRIPT = "elytron-tool.[sh|bat|ps1]"; public static void main(String[] args) { try { File jarPath = new File(ElytronToolScriptWrapper.class.getProtectionDomain().getCodeSource().getLocation().toURI().getPath()); File parentDir = jarPath.getParentFile(); if (parentDir != null) { String parentDirPath = parentDir.getAbsolutePath(); StringBuilder elytronToolScriptCommand = new StringBuilder(parentDirPath + File.separator + ELYTRON_TOOL_SCRIPT + " "); Arrays.stream(args).forEach(arg -> elytronToolScriptCommand.append(arg + " ")); System.out.println(ElytronToolWrapperMessages.ROOT_LOGGER.redirectToScript(elytronToolScriptCommand.toString())); return; } } catch (URISyntaxException e) { // ignored, simple message will be displayed instead } System.out.println(ElytronToolWrapperMessages.ROOT_LOGGER.redirectToScriptSimple()); } }
/** * Non-metric Space Library * * Main developers: Bilegsaikhan Naidan, Leonid Boytsov, Yury Malkov, Ben Frederickson, David Novak * * For the complete list of contributors and further details see: * https://github.com/nmslib/nmslib * * Copyright (c) 2013-2018 * * This code is released under the * Apache License Version 2.0 http://www.apache.org/licenses/. * */ #if defined(WITH_EXTRAS) #include <string.h> #include "space.h" #include "bunit.h" #include "testdataset.h" #include "space_sqfd.h" namespace similarity { Object* CreateSqfdObject( const std::vector<std::vector<float>>& cluster, const std::vector<float>& weight) { CHECK(cluster.size() > 0); CHECK(weight.size() == cluster.size()); std::vector<char> buf( 2*sizeof(int) + (cluster[0].size() + 1) * cluster.size() * sizeof(float)); int* h = reinterpret_cast<int*>(&buf[0]); h[0] = cluster.size(); h[1] = cluster[0].size(); float* obj = reinterpret_cast<float*>(&buf[2*sizeof(int)]); int pos = 0; for (size_t i = 0; i < cluster.size(); ++i) { for (size_t j = 0; j < cluster[i].size(); ++j) { obj[pos++] = cluster[i][j]; } obj[pos++] = weight[i]; } return new Object(-1, -1, buf.size(), &buf[0]); } TEST(Sqfd_From_Article) { std::vector<std::vector<float>> cq = {{3,3}, {8,7}}; std::vector<float> wq = {0.5, 0.5}; Object* q = CreateSqfdObject(cq, wq); std::vector<std::vector<float>> co = {{4,7}, {9,5}, {8,1}}; std::vector<float> wo = {0.5, 0.25, 0.25}; Object* o = CreateSqfdObject(co, wo); SqfdFunction<float>* f = new SqfdHeuristicFunction<float>(1.0); Space<float>* space = new SpaceSqfd<float>(f); /* >>> import numpy as np >>> import math >>> w = np.array([0.5,0.5,-0.5,-0.25,-0.25]) >>> a = np.array([[1.0, 0.135, 0.195, 0.137, 0.157], [0.135, 1.0, 0.2, 0.309, 0.143], [0.195, 0.2, 1.0, 0.157, 0.122], [0.137, 0.309, 0.157, 1.0, 0.195], [0.157, 0.143, 0.122, 0.195, 1.0]]) >>> math.sqrt(w.dot(a).dot(w.transpose())) 0.807 */ float d = space->IndexTimeDistance(q, o); EXPECT_EQ_EPS(d, 0.808f, 0.01f); delete space; delete q; delete o; } TEST(Sqfd) { std::vector<std::vector<float>> cq = { {0.382806,0.397073,0.661498,0.683582,0.203314,0.0871583,1}, {0.482246,0.368699,0.701657,0.731006,0.175442,0.132232,0.20056}, {0.740454,0.434634,0.661071,0.850084,0.681469,0.0610024,0.218037}, {0.178604,0.416208,0.62079,0.437091,0.757451,0.0982573,0.256335}, {0.518211,0.432369,0.639439,0.23629,0.690716,0.191468,0.193767}, {0.250961,0.416317,0.621276,0.344846,0.763613,0.0738424,1}, {0.609122,0.331734,0.760648,0.710042,0.769226,0.0996582,1}, {0.744822,0.425876,0.551634,0.223641,0.23818,0.0885243,1}, {0.843675,0.543647,0.541379,0.798141,0.496724,0.0357157,1}, {0.612551,0.408074,0.600394,0.266899,0.234377,0.143155,0.252654}}; std::vector<float> wq = { 0.0822,0.1005,0.1314,0.0878,0.1087,0.1413,0.0397,0.0886,0.0832,0.1366}; Object* q = CreateSqfdObject(cq, wq); std::vector<std::vector<float>> co = { {0.720299,0.460648,0.609983,0.733792,0.279245,0.0940223,0.940909}, {0.732504,0.470709,0.584041,0.849335,0.51135,0.105338,0.100655}, {0.790358,0.446342,0.585623,0.633481,0.834874,0.0901916,0.855607}, {0.265014,0.441256,0.551832,0.435405,0.199537,0.111734,0.240841}, {0.714692,0.469428,0.333677,0.1261,0.297041,0.0146298,0.987917}, {0.194637,0.449039,0.533339,0.482084,0.214012,0.0459264,1}, {0.288555,0.430071,0.558277,0.17054,0.765986,0.0694933,1}, {0.268943,0.460447,0.544101,0.583028,0.829013,0.0607609,1}, {0.23752,0.443694,0.554333,0.345023,0.773767,0.089284,0.310363}, {0.55076,0.411417,0.602403,0.311277,0.628119,0.171292,0.163618}}; std::vector<float> wo = { 0.066,0.2385,0.0651,0.1085,0.12,0.0968,0.0684,0.0541,0.0965,0.0861}; Object* o = CreateSqfdObject(co, wo); SqfdFunction<float>* f = new SqfdHeuristicFunction<float>(1.0); Space<float>* space = new SpaceSqfd<float>(f); float d = space->IndexTimeDistance(q, o); EXPECT_EQ_EPS(d, 0.214f, 0.01f); delete space; delete q; delete o; } } // namespace similarity #endif
/** * The implementation tries to remove the activation and doesn't check * the effective and expiration date. this is just in case the engine * is running near midnight. If the activation was added right before * mid night, we still need to remove it to be safe. * @param facts * @param engine */ public void retractFacts(Index inx, Rete engine, WorkingMemory mem) { Map<?, ?> tmem = (Map<?, ?>) mem.getTerminalMemory(this); LinkedActivation act = (LinkedActivation) tmem.remove(inx); if (act != null) { engine.getAgenda().removeActivation(act); } }
<filename>components/ExpensePageContent.tsx import Head from 'next/head' import Footer from './Footer' import ExpenseData from './ExpenseData' import { useTranslation } from '../lib/i18n' import { ExpenseProps } from './types/common' interface ExpensePageContentProps { expenseData: ExpenseProps } const ExpensePageContent: React.FC<ExpensePageContentProps> = ({ expenseData }) => { const { t } = useTranslation() const { user: { first, last }, merchant } = expenseData return ( <> <Head> <title> {t('common:expense')} — {first} {last} &lt;{merchant}&gt; </title> </Head> <ExpenseData {...expenseData} /> <Footer /> </> ) } export default ExpensePageContent
/** * Increments the shared distributed counter named {@code counterName} by one. */ public void incrementCounter(String counterName) throws Exception { ArgumentChecker.notNull(counterName); SharedCount count = sharedCounters.get(counterName); Preconditions.checkArgument(count != null, "Invalid counter name: " + counterName + ". Shared counter may be down."); VersionedValue<Integer> currentCount = count.getVersionedValue(); int newCount = currentCount.getValue() + 1; int tries = 0; while (!count.trySetCount(currentCount, newCount)) { currentCount = count.getVersionedValue(); newCount = currentCount.getValue() + 1; if (++tries >= maxRetries) { sharedCounters.remove(counterName); count.removeListener(sharedCountListeners.get(counterName)); count.close(); reregisterCounter(counterName, sharedCountListeners.get(counterName), newCount); throw new IllegalStateException("Unable to increment shared counter " + counterName + " after " + maxRetries + " attempts. Zookeeper connection may be down."); } } localCounters.put(counterName, newCount); }
import cheerio from 'cheerio'; import { shallow } from 'enzyme'; import React from 'react'; import { IOrganization, IOrganizationQuota, IV3OrganizationQuota } from '../../lib/cf/types'; import { OrganizationsPage, EditOrganizationQuota } from './views'; import { spacesMissingAroundInlineElements } from '../../layouts/react-spacing.test'; function linker(_route: string, params: any): string { return params?.organizationGUID ? `/org/${params.organizationGUID}` : '/test'; } describe(OrganizationsPage, () => { const orgA = ({ metadata: { guid: 'a' }, entity: { name: 'A', quota_definition_guid: 'trial' }, } as unknown) as IOrganization; const orgB = ({ metadata: { guid: 'b' }, entity: { name: 'B', quota_definition_guid: 'billable' }, } as unknown) as IOrganization; const suspendedOrg = ({ metadata: { guid: 'c' }, entity: { name: 'Suspended', quota_definition_guid: 'billable', status: 'suspended' }, } as unknown) as IOrganization; const quotaBillable = ({ entity: { name: 'not-default' }, } as unknown) as IOrganizationQuota; const quotaFree = ({ entity: { name: 'default' }, } as unknown) as IOrganizationQuota; const quotas = { billable: quotaBillable, trial: quotaFree }; it('should display list of organizations', () => { const markup = shallow( <OrganizationsPage organizations={[orgA, orgB]} linkTo={linker} quotas={quotas} />, ); const $ = cheerio.load(markup.html()); expect($('p.govuk-body:first-of-type').text()).toContain( 'There are 2 organisations which you can access.', ); expect($('table tbody tr')).toHaveLength(2); expect($('table tbody tr:first-of-type th a.govuk-link').text()).toBe('Organisation name: A'); expect($('table tbody tr:first-of-type th a.govuk-link').prop('href')).toBe( '/org/a', ); expect($('table tbody tr:first-of-type td:last-of-type').text()).toBe( 'Trial', ); expect($('table tr:last-of-type th a.govuk-link').text()).toBe('Organisation name: B'); expect($('table tr:last-of-type th a.govuk-link').prop('href')).toBe('/org/b'); expect($('table tr:last-of-type td:last-of-type').text()).toBe('Billable'); }); it('should highlight suspended organizations', () => { const markup = shallow( <OrganizationsPage organizations={[orgA, suspendedOrg]} linkTo={linker} quotas={quotas} />, ); const $ = cheerio.load(markup.html()); expect($('table tbody tr')).toHaveLength(2); expect($('table tbody tr:first-of-type th a.govuk-link').text()).toBe('Organisation name: A'); expect($('table tbody tr:first-of-type th span.govuk-tag--grey').length).toEqual(0); expect($('table tbody tr:nth-of-type(2) th a.govuk-link').text()).toBe('Organisation name: Suspended'); expect($('table tbody tr:nth-of-type(2) th span.govuk-tag--grey').text()).toBe( 'Suspended', ); }) it('should display list of organizations with single item', () => { const markup = shallow( <OrganizationsPage organizations={[orgA]} linkTo={linker} quotas={quotas} />, ); const $ = cheerio.load(markup.html()); expect($('p.govuk-body:first-of-type').text()).toContain( 'There is 1 organisation which you can access.', ); expect($('table tbody tr')).toHaveLength(1); }); }); describe(EditOrganizationQuota, () => { it('should parse the page correctly', () => { const quota = { guid: '__QUOTA_1_GUID__', name: 'quota-1', apps: { total_memory_in_mb: 2 }, routes: { total_routes: 2 }, services: { total_service_instances: 2 }, }; const markup = shallow(<EditOrganizationQuota csrf="__CSRF_TOKEN__" organization={{ entity: { name: 'org-name', quota_definition_guid: '__QUOTA_2_GUID__' }, metadata: { guid: '__ORG_GUID__' }, } as IOrganization} quotas={[ quota as IV3OrganizationQuota, { ...quota, guid: '__QUOTA_2_GUID__', name: 'quota-2' } as IV3OrganizationQuota, ]} />); expect(markup.render().find('table').text()).toContain('quota-1'); expect(markup.render().find('table').text()).toContain('quota-2'); expect(markup.render().find('select option').text()).toContain('quota-1'); expect(markup.render().find('select option').text()).toContain('quota-2'); expect(markup.render().find('select option[selected]').text()).toEqual('quota-2'); expect(spacesMissingAroundInlineElements(markup.render().html()!)).toHaveLength(0); }); });
// Render does the pbr pass func (tmp *GenPass) Render() { P := mgl32.Perspective(mgl32.DegToRad(90.0), 1.0, 0.1, 10.0) Vs := make([]mgl32.Mat4, 6) Vs[0] = mgl32.LookAt(0, 0, 0, 1, 0, 0, 0, -1, 0) Vs[1] = mgl32.LookAt(0, 0, 0, -1, 0, 0, 0, -1, 0) Vs[2] = mgl32.LookAt(0, 0, 0, 0, 1, 0, 0, 0, 1) Vs[3] = mgl32.LookAt(0, 0, 0, 0, -1, 0, 0, 0, -1) Vs[4] = mgl32.LookAt(0, 0, 0, 0, 0, 1, 0, -1, 0) Vs[5] = mgl32.LookAt(0, 0, 0, 0, 0, -1, 0, -1, 0) tmp.shader.Use() tmp.equirecttex.Bind(0) gl.Viewport(0, 0, int32(tmp.resolution), int32(tmp.resolution)) tmp.framebuffer.Bind() for i := 0; i < 6; i++ { tmp.shader.UpdateMat4("V", Vs[i]) tmp.shader.UpdateMat4("P", P) gl.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_CUBE_MAP_POSITIVE_X+uint32(i), tmp.emptyCubemap.GetHandle(), 0) tmp.framebuffer.Clear() tmp.shader.Render() } tmp.framebuffer.Unbind() tmp.equirecttex.Unbind() tmp.shader.Release() gl.Viewport(0, 0, 800, 600) }
/** * @description: calculate the cos value of angle of two vectors * @param POINT Left, POINT Middle, POINT Right * @return double */ double cos_between_edges(POINT Left, POINT Middle, POINT Right) { VECTOR One = (VECTOR)malloc(sizeof(struct Vector)); VECTOR Two = (VECTOR)malloc(sizeof(struct Vector)); One->x_dis = Middle->x_pos - Left->x_pos; One->y_dis = Middle->y_pos - Left->y_pos; Two->x_dis = Right->x_pos - Middle->x_pos; Two->y_dis = Right->y_pos - Middle->y_pos; return inner_product(One, Two) / (length_of_vector(One) * length_of_vector(Two)); }
<gh_stars>0 package select_pod import ( "fmt" "github.com/manifoldco/promptui" ) var podTemplate = &promptui.SelectTemplates{ Label: "{{ . }}", Active: fmt.Sprintf("%s {{ .Name | cyan }}", promptui.IconSelect), Inactive: "{{ .Name | magenta }}", Selected: fmt.Sprintf("%s {{ .Name | cyan }}", promptui.IconGood), Details: ` --------- Info ---------- {{ "Namespace:" | faint }} {{ .Namespace | yellow }} {{ "Node:" | faint }} {{ .Spec.NodeName | yellow }} {{if ne .Status.Phase "Running"}}{{ "Status:" | faint }} {{ .Status.Phase | red }}{{else}}{{ "Status:" | faint }} {{ .Status.Phase | green }}{{end}} {{ "Pod IP:" | faint }} {{ .Status.PodIP | yellow }}`, }
undefined = undefined list :: a -> [a] list x = [x] map f [] = [] map f (x:xs) = (f x):(map f xs) succ :: Int -> Int succ x = undefined ones :: [] Int ones = 1:ones nats = 1:map succ nats
<gh_stars>0 import java.net.*; import java.io.*; import java.lang.Thread; import java.util.ArrayList; import java.util.Scanner; import ch.makery.address.model.*; public class Main{ public static void main(String[] args) { System.out.println("Podaj port dla servera!"); Scanner odczyt = new Scanner(System.in); int port = odczyt.nextInt(); try { System.out.println("Tworze server pod portem: " + port); Server server = new Server(port); while(true) Thread.sleep(1000000000); } catch (Exception e) { System.out.println(e); } } }
/** * Chapter 2. OOP. * Lesson 4. Polymorphism. * <p> * Test class. * Class contains solution of task 785. * * @author Pavel Zubaha (mailto:[email protected]) * @version 1 * @since 30.05.017 */ public class PaintTest { /** * Test draw triangle. */ @Test public void whenDrawTriangleThenOutTriangleAsString() { //asign output stream. ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); //define expected triangle as the String. StringBuilder expectedBuilder = new StringBuilder(); String separator = System.getProperty("line.separator"); expectedBuilder.append(separator); for (int index = 0; index < 10; index++) { for (int i = 0; i <= index; i++) { expectedBuilder.append("U"); } expectedBuilder.append(separator); } expectedBuilder.append(separator); String expectedString = expectedBuilder.toString(); Paint paint = new Paint(); paint.draw(new Triangle()); //checking. assertThat(out.toString(), is(expectedString)); } /** * Test draw square. */ @Test public void whenDrawSquareThenOutSquareAsString() { //asign output stream. ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); //define expected square as the String. StringBuilder expectedBuilder = new StringBuilder(); String separator = System.getProperty("line.separator"); expectedBuilder.append(separator); StringBuilder oneStringOfSquare = new StringBuilder(); for (int i = 0; i < 12; i++) { oneStringOfSquare.append("U"); } oneStringOfSquare.append(separator); for (int index = 0; index < 8; index++) { expectedBuilder.append(oneStringOfSquare); } expectedBuilder.append(separator); String expectedString = expectedBuilder.toString(); //act drawing. Paint paint = new Paint(); paint.draw(new Square()); //checking. assertThat(out.toString(), is(expectedString)); } }
How long, the American farmer-poet Wendell Berry asked, does it take to make the woods? As long, he answered himself, as it takes to make the world. But Berry warned that woodlands can be unmade overnight, and that it then takes generations to remake them. The Government should remember this, before it proceeds to sell off the harvesting rights in our national forests, currently vested in Coillte. There are many reasons – economic, cultural and environmental – for valuing our woodlands. Until recently, however, we have not been very good at exploiting them intelligently and enjoying them fully. We often fool ourselves about this. The familiar nationalist narrative, telling us that our cherished virgin forests were ravaged by the foreign foe, is more than a little exaggerated. The author of the great lament Cill Chais identified the “end of the woods”, deireadh na gcoillte, with the end of the Gaelic world. He was partly right – but he ignored the inconvenient truth that we natives had also been busy clearing forests for millennia. We didn’t exactly rush to restore them after independence, either. And the “social forestry” finally rolled out in the 1950s was a last-ditch effort to stem rural emigration, not a comprehensive policy for the sector. From the late 1980s, however, we have seen a steadily more progressive – and profitable – engagement with forestry. This has happened through the high certification and environmental standards espoused (despite early lapses) by Coillte, a State-owned company with a commercial mandate; through successful tree-planting incentives for private landowners; and through NGO activism. Forestry and the wood-processing industries directly support thousands of jobs, and many more indirectly. Native tree species are being re-established through impressive restoration programmes undertaken by the National Parks and Wildlife Service, by Coillte, and by NGOs such as Woodlands of Ireland. The recreational value of public forests is confirmed by 18 million annual visits nationwide. Multiple additional benefits from woodlands, from carbon storage to flood control to enhanced human health, are being rediscovered or recognised for the first time. A forthcoming Woodlands of Ireland report will argue that our national accounts critically undervalue the natural capital and ecosystems services flowing from environmentally responsible forestry. Yet just at this hopeful moment, the Government is proposing to sell Coillte’s harvesting rights, for almost a century to come, to the highest bidder. This would almost certainly be a foreign company, with no stake in local communities dependent on forestry jobs, or in the long-term health of our landscapes. The reason? Selling public assets – to pay off privately generatedState debt – ticks another box on the EU-ECB-IMF troika’s must-do list. Why lament for our forests, you may well ask, when this Government repeatedly reneges on commitments to the most disadvantaged in our midst? But if this proposed sell-off will cost the State dear, in financial and many other senses, then the disadvantaged will be even bigger losers if it goes ahead. Critics of the proposal include unlikely bedfellows: sawmill owners on the Irish Timber Council, Coillte workers in Impact, conservation NGOs, recreational groups such as Mountaineering Ireland, and a mixed bag of TDs. Impact and the timber council have made their cases well, in cogent position papers. Happily, neither group argues only from its sectoral self-interest, but takes on board broader environmental and cultural benefits accruing from a well-managed national forest. The British government has recently abandoned a similar sell-off plan, due to similar counter-arguments – but also and especially due to the one thing the Irish Government has yet to encounter on the issue – a massive public outcry. Economist Peter Bacon, in a report for Impact, calculates that the State could make a substantial loss of ¤1.3 billion on the sell-off over time, unless the price of timber almost doubles. He estimates the short-term gain at a maximum of ¤774 million – others say as little as ¤400 million – to pay off debt. There are some signs that the Government is rowing back. In particular, Minister for Agriculture Simon Coveney has attempted to address concerns that sawmills would close, with big job losses, due to higher prices imposed by a buyer. He insists that a condition of sale would oblige the buyer to supply timber locally at affordable rates. But such a condition would depress the sale price, thus further undermining the case for a sell-off. The conservation and recreation lobbies’ critique of the proposed sale takes us down a similar road. At present, Coillte is involved in numerous top-drawer conservation schemes, restoring native woodland and bogs on its properties. Likewise, the company maintains 23,000km of public access roads, an “open gate” policy benefitting communities all over the country. Unlike the parks and wildlife service, Coillte carries out these public-interest functions without any subsidy. If a private buyer were obliged to carry them out, this condition would again severely depress the sale price. And if a remnant of Coillte were somehow to remain in charge of these responsibilities, it would then require State funding to do so, as it would enjoy no future profits from timber. Once again, the State, and the public, lose out. The current strategic plan for the sector dates from 1996. It has recently been reviewed, in extensive consultation with stakeholders, by the Forest Service. Inexplicably, this review has not been published. The Government could restore some faith in democratic, strategic planning by shelving the sell-off plan, at least until this review is subject to extensive public discussion. Our forestry model is far from perfect, but Coillte has been a key player in developing commercial yet environmentally and socially responsible State forestry, alongside a growing private sector. Does anyone really believe we are going to fix this country by breaking one of the few things that is still working? Paddy Woodworth ’s book , Our Once and Future Planet: Restoring the World in the Climate Change Century , will be published by University of Chicago Press in October
// createSessionLocal is used to create a new session in a foreign datacenter // This is more complex since the local agent cannot be used to create // a session, and we must associate with a node in the remote datacenter. func (c *ExecCommand) createSessionForeign() (string, error) { health := c.client.Health() services, _, err := health.Service("consul", "", true, nil) if err != nil { return "", fmt.Errorf("Failed to find Consul server in remote datacenter: %v", err) } if len(services) == 0 { return "", fmt.Errorf("Failed to find Consul server in remote datacenter") } node := services[0].Node.Node if c.conf.verbose { c.Ui.Info(fmt.Sprintf("Binding session to remote node %s@%s", node, c.conf.datacenter)) } session := c.client.Session() se := consulapi.SessionEntry{ Name: fmt.Sprintf("Remote Exec via %s@%s", c.conf.localNode, c.conf.localDC), Node: node, Checks: []string{}, Behavior: consulapi.SessionBehaviorDelete, TTL: rExecTTL, } id, _, err := session.CreateNoChecks(&se, nil) return id, err }
/** * Load the settings asynchronously. * @param callback * @return settings */ public String load(final SettingsCallback callback) { Logger.d("Requesting Segment.io settings .."); layer.fetch(new SettingsCallback () { @Override public void onSettingsLoaded(boolean success, EasyJSONObject settings) { if (settings == null) { Logger.w("Failed to fetch new Segment.io settings."); } else { EasyJSONObject container = new EasyJSONObject(); Calendar rightNow = Calendar.getInstance(); container.put(LAST_UPDATED_KEY, rightNow); container.put(SETTINGS_KEY, settings); reloads += 1; Logger.d("Successfully fetched new Segment.io settings."); set(container.toString()); } if (callback != null) callback.onSettingsLoaded(success, settings); } }); return null; }
/*- * #%L * This file is part of "Apromore Community". * * Copyright (C) 2017 Alireza Ostovar. * %% * Copyright (C) 2018 - 2020 The University of Melbourne. * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Lesser Public License for more details. * * You should have received a copy of the GNU General Lesser Public * License along with this program. If not, see * <http://www.gnu.org/licenses/lgpl-3.0.html>. * #L% */ package org.apromore.prodrift.graph.util; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import org.jbpt.graph.Edge; import org.jbpt.graph.Graph; import org.jbpt.hypergraph.abs.Vertex; /** * Computes all maximal c-cliques of an Undirected Graph. Used in conjunction * with the Modular Product of graphs, to solve the Maximum Commom Edge Subgraph * problem. * The implementation is an extension to well-known Bron-Kerbosch's Algorithm 457 * as described in: * * F. Cazals and C. Karande, An Algorithm for reporting maximal c-cliques. * Technical Computer Science 349 pp 484-490, 2005 * * @author Luciano Garcia-Banuelos * */ public class CCliqueFinder { Set<Set<Vertex>> cliques = null; SortedMap<Integer, Set<Set<Vertex>>> sortedCliques; private Graph graph; Map<Vertex, Set<Vertex>> d, c, n; public CCliqueFinder(Graph graph, Set<Edge> dedges, Set<Edge> cedges) { this.graph = graph; this.cliques = null; d = new HashMap<Vertex, Set<Vertex>>(); c = new HashMap<Vertex, Set<Vertex>>(); n = new HashMap<Vertex, Set<Vertex>>(); for (Vertex v: graph.getVertices()) { c.put(v, new HashSet<Vertex>()); d.put(v, new HashSet<Vertex>()); n.put(v, new HashSet<Vertex>()); } for (Edge edge: graph.getEdges()) { Vertex u = edge.getV1(); // graph.getEdgeSource(edge); Vertex v = edge.getV2(); //graph.getEdgeTarget(edge); if (cedges.contains(edge)) { c.get(u).add(v); c.get(v).add(u); } else { d.get(u).add(v); d.get(v).add(u); } n.get(u).add(v); n.get(v).add(u); } } public Set<Set<Vertex>> getAllMaximalCliques() { if (cliques == null) { cliques = new HashSet<Set<Vertex>>(); ccliqueInit(); } return cliques; } public Set<Set<Vertex>> getBiggestMaximalCliques() { getSortedCliques(); return sortedCliques.get(sortedCliques.lastKey()); } public SortedMap<Integer, Set<Set<Vertex>>> getSortedCliques() { if (sortedCliques == null) { cliques = new HashSet<Set<Vertex>>(); ccliqueInit(); sortedCliques = new TreeMap<Integer, Set<Set<Vertex>>>(); for (Set<Vertex> clique: cliques) { int size = clique.size(); Set<Set<Vertex>> subset = sortedCliques.get(size); if (subset == null) sortedCliques.put(size, subset = new HashSet<Set<Vertex>>()); subset.add(clique); } } return sortedCliques; } private void ccliqueInit() { Set<Vertex> T = new HashSet<Vertex>(); for (Vertex ui: graph.getVertices()) { Set<Vertex> R = new HashSet<Vertex>(); Set<Vertex> Q = new HashSet<Vertex>(); Set<Vertex> P = new HashSet<Vertex>(); Set<Vertex> Y = new HashSet<Vertex>(); Set<Vertex> X = new HashSet<Vertex>(); R.add(ui); Q.addAll(graph.getVertices()); Q.removeAll(T); Q.retainAll(d.get(ui)); P.addAll(graph.getVertices()); P.removeAll(T); P.retainAll(c.get(ui)); Y.addAll(d.get(ui)); Y.retainAll(T); X.addAll(c.get(ui)); X.retainAll(T); cclique(R,P,Q,X,Y); T.add(ui); } } private void cclique(Set<Vertex> r, Set<Vertex> p, Set<Vertex> q, Set<Vertex> x, Set<Vertex> y) { if (p.isEmpty() && x.isEmpty()) cliques.add(new HashSet<Vertex>(r)); else { while (!p.isEmpty()) { Vertex ui = p.iterator().next(); p.remove(ui); Set<Vertex> Rnew = new HashSet<Vertex>(); Set<Vertex> Qnew = new HashSet<Vertex>(); Set<Vertex> Pnew = new HashSet<Vertex>(); Set<Vertex> Ynew = new HashSet<Vertex>(); Set<Vertex> Xnew = new HashSet<Vertex>(); Rnew.addAll(r); Rnew.add(ui); Qnew.addAll(q); Qnew.retainAll(d.get(ui)); Set<Vertex> PClone = new HashSet<Vertex>(p); PClone.retainAll(n.get(ui)); Set<Vertex> QClone = new HashSet<Vertex>(q); QClone.retainAll(c.get(ui)); Pnew.addAll(PClone); Pnew.addAll(QClone); Ynew.addAll(y); Ynew.retainAll(d.get(ui)); Set<Vertex> XClone = new HashSet<Vertex>(x); XClone.retainAll(n.get(ui)); Set<Vertex> YClone = new HashSet<Vertex>(y); YClone.retainAll(c.get(ui)); Xnew.addAll(XClone); Xnew.addAll(YClone); cclique(Rnew, Pnew, Qnew, Xnew, Ynew); x.add(ui); } } } }
<reponame>stefanprodan/syros<filename>api/docker-routes.go package main import ( "net/http" "github.com/go-chi/chi" "github.com/go-chi/jwtauth" "github.com/go-chi/render" "github.com/stefanprodan/syros/models" ) func (s *HttpServer) dockerRoutes() chi.Router { r := chi.NewRouter() // JWT protected r.Group(func(r chi.Router) { r.Use(jwtauth.Verifier(s.TokenAuth)) r.Use(jwtauth.Authenticator) r.Get("/hosts", func(w http.ResponseWriter, r *http.Request) { hosts, err := s.Repository.AllHosts() if err != nil { render.Status(r, http.StatusInternalServerError) render.PlainText(w, r, err.Error()) return } render.JSON(w, r, hosts) }) r.Get("/hosts/{hostID}", func(w http.ResponseWriter, r *http.Request) { hostID := chi.URLParam(r, "hostID") payload, err := s.Repository.HostContainers(hostID) if err != nil { render.Status(r, http.StatusInternalServerError) render.PlainText(w, r, err.Error()) return } render.JSON(w, r, payload) }) r.Get("/environments/{env}", func(w http.ResponseWriter, r *http.Request) { env := chi.URLParam(r, "env") payload, err := s.Repository.EnvironmentContainers(env) if err != nil { render.Status(r, http.StatusInternalServerError) render.PlainText(w, r, err.Error()) return } deployments := models.ChartDto{ Labels: make([]string, 0), Values: make([]int64, 0), } // aggregate deployments per day based on container created date for _, cont := range payload.Containers { if cont.State != "running" { continue } date := cont.Created.Format("06-01-02") found := -1 for i, s := range deployments.Labels { if s == date { found = i break } } if found > -1 { deployments.Values[found]++ } else { deployments.Labels = append(deployments.Labels, date) deployments.Values = append(deployments.Values, 1) } } result := models.EnvironmentDto{ Host: payload.Host, Containers: payload.Containers, Deployments: deployments, } render.JSON(w, r, result) }) r.Get("/containers", func(w http.ResponseWriter, r *http.Request) { containers, err := s.Repository.AllContainers() if err != nil { render.Status(r, http.StatusInternalServerError) render.PlainText(w, r, err.Error()) return } render.JSON(w, r, containers) }) r.Get("/containers/{containerID}", func(w http.ResponseWriter, r *http.Request) { containerID := chi.URLParam(r, "containerID") payload, err := s.Repository.Container(containerID) if err != nil { render.Status(r, http.StatusInternalServerError) render.PlainText(w, r, err.Error()) return } render.JSON(w, r, payload) }) }) return r }
import { PitchValueScale, STANDARD_PITCH_INDEX_INDICATING_REST } from '@musical-patterns/material' import { as, combinationCount, ContourElement, ContourPiece, deepEqual, forEach, INCREMENT, indexOfFinalElement, keys, NEXT, Ordinal, use, } from '@musical-patterns/utilities' import { computeYerFactorizationByPitchClass, thunkBassContourPieces, thunkFirstHarmonyContourPieces, thunkLeadContourPieces, thunkOrderedPitchClassIndices, thunkSecondHarmonyContourPieces, YerFactor, YerFactorization, } from '../../../src/indexForTest' import { pitchClassIndexFromPitchIndexRespectingRests } from '../../support/helpers' import { PotentialFailure } from '../../support/types' describe('segments', (): void => { const SEGMENT_COUNT: number = 23 const QUARTERS_PER_SEGMENT: number = 4 const QUARTERS_COUNT: number = SEGMENT_COUNT * QUARTERS_PER_SEGMENT const COMBINATIONS_PER_QUARTER: number = combinationCount(QUARTERS_PER_SEGMENT, 2) const COMBINATIONS_COUNT: number = COMBINATIONS_PER_QUARTER * QUARTERS_COUNT const INDEX_OF_PITCH_IN_PITCH_DURATION_SCALE_CONTOUR: Ordinal = as.Ordinal(0) const INDEX_OF_DURATION_IN_PITCH_DURATION_SCALE_CONTOUR: Ordinal = as.Ordinal(1) const computePitchClassIndicesByQuarter: (contourPieces: Array<ContourPiece<PitchValueScale>>) => number[] = (contourPieces: Array<ContourPiece<PitchValueScale>>): number[] => { const pitchIndicesByQuarter: number[] = [] contourPieces.forEach((piece: ContourPiece<PitchValueScale>): void => { piece.forEach((element: ContourElement<PitchValueScale>): void => { for ( let quarter: Ordinal = as.Ordinal(0); as.number(quarter) < (use.Ordinal(element, INDEX_OF_DURATION_IN_PITCH_DURATION_SCALE_CONTOUR)); quarter = use.Cardinal(quarter, NEXT) ) { pitchIndicesByQuarter.push( use.Ordinal(element, INDEX_OF_PITCH_IN_PITCH_DURATION_SCALE_CONTOUR), ) } }) }) return pitchIndicesByQuarter.map(pitchClassIndexFromPitchIndexRespectingRests) } let leadContourPieces: Array<ContourPiece<PitchValueScale>> let bassContourPieces: Array<ContourPiece<PitchValueScale>> let firstHarmonyContourPieces: Array<ContourPiece<PitchValueScale>> let secondHarmonyContourPieces: Array<ContourPiece<PitchValueScale>> beforeEach((): void => { leadContourPieces = thunkLeadContourPieces() bassContourPieces = thunkBassContourPieces() firstHarmonyContourPieces = thunkFirstHarmonyContourPieces() secondHarmonyContourPieces = thunkSecondHarmonyContourPieces() }) it('each array of contour piece has the correct and same amount of segments', (): void => { expect(leadContourPieces.length) .toBe(SEGMENT_COUNT) expect(bassContourPieces.length) .toBe(SEGMENT_COUNT) expect(firstHarmonyContourPieces.length) .toBe(SEGMENT_COUNT) expect(secondHarmonyContourPieces.length) .toBe(SEGMENT_COUNT) }) it('each segment makes simple chords', (): void => { const leadPitchClassIndicesByQuarter: number[] = computePitchClassIndicesByQuarter(leadContourPieces) const bassPitchClassIndicesByQuarter: number[] = computePitchClassIndicesByQuarter(bassContourPieces) const firstHarmonyPitchClassIndicesByQuarter: number[] = computePitchClassIndicesByQuarter(firstHarmonyContourPieces) const secondHarmonyPitchClassIndicesByQuarter: number[] = computePitchClassIndicesByQuarter(secondHarmonyContourPieces) const quarters: number[][] = [] for (let index: Ordinal = as.Ordinal(0); as.number(index) < QUARTERS_COUNT; index = use.Cardinal(index, NEXT)) { quarters.push([ use.Ordinal(leadPitchClassIndicesByQuarter, index), use.Ordinal(bassPitchClassIndicesByQuarter, index), use.Ordinal(firstHarmonyPitchClassIndicesByQuarter, index), use.Ordinal(secondHarmonyPitchClassIndicesByQuarter, index), ]) } const combinationsOfPitches: Array<[ number, number ]> = [] quarters.forEach((quarter: number[]): void => { for ( let firstPitchIndex: Ordinal = as.Ordinal(0); firstPitchIndex < indexOfFinalElement(quarter); firstPitchIndex = use.Cardinal(firstPitchIndex, NEXT) ) { for ( let secondPitchIndex: Ordinal = use.Cardinal(firstPitchIndex, INCREMENT); as.number(secondPitchIndex) < quarter.length; secondPitchIndex = use.Cardinal(secondPitchIndex, NEXT) ) { combinationsOfPitches.push([ use.Ordinal(quarter, firstPitchIndex), use.Ordinal(quarter, secondPitchIndex), ]) } } }) const combinationsOfFactorizations: Array<[ YerFactorization, YerFactorization ]> = combinationsOfPitches.map(([ firstPitch, secondPitch ]: [ number, number ]): [ YerFactorization, YerFactorization ] => { if (firstPitch === as.number(STANDARD_PITCH_INDEX_INDICATING_REST) || secondPitch === as.number(STANDARD_PITCH_INDEX_INDICATING_REST)) { return [ {}, {} ] } return [ computeYerFactorizationByPitchClass(thunkOrderedPitchClassIndices()[ firstPitch ]), computeYerFactorizationByPitchClass(thunkOrderedPitchClassIndices()[ secondPitch ]), ] }) expect(combinationsOfFactorizations.length) .toBe(COMBINATIONS_COUNT) const DESIRED_TENSION_AT_END_OF_DESPERATION_1: number = 45 const DESIRED_TENSION_AT_END_OF_DESPERATION_2: number = 46 const DESIRED_TENSION_AT_END_OF_DESPERATION_3: number = 47 const DESIRED_TENSION_AT_END_OF_DESPERATION_4: number = 49 const DESIRED_TENSION_AT_END_OF_DESPERATION_5: number = 50 const DESIRED_TENSION_AT_END_OF_DESPERATION_6: number = 51 const DESIRED_TENSION_AT_END_OF_FIRST_QUIETUDE_SINCE_IT_IS_NOT_READY_TO_LOOP: number = 65 const exceptionalIndices: number[] = [ DESIRED_TENSION_AT_END_OF_DESPERATION_1, DESIRED_TENSION_AT_END_OF_DESPERATION_2, DESIRED_TENSION_AT_END_OF_DESPERATION_3, DESIRED_TENSION_AT_END_OF_DESPERATION_4, DESIRED_TENSION_AT_END_OF_DESPERATION_5, DESIRED_TENSION_AT_END_OF_DESPERATION_6, DESIRED_TENSION_AT_END_OF_FIRST_QUIETUDE_SINCE_IT_IS_NOT_READY_TO_LOOP, ] const potentialFailures: PotentialFailure[] = [] forEach( combinationsOfFactorizations, ([ firstFactorization, secondFactorization ]: [ YerFactorization, YerFactorization ], index: Ordinal<Array<[ YerFactorization, YerFactorization ]>>): void => { let differences: number = 0 keys(YerFactor) .forEach((factor: keyof typeof YerFactor): void => { if (!!firstFactorization[ factor ] !== !!secondFactorization[ factor ]) { differences += 1 } }) if (differences > 2) { const segment: number = Math.floor(as.number(index) / (6 * 4)) const quarter: number = Math.floor((as.number(index) % (6 * 4)) / 6) const patternQuarter: number = (segment * 4) + quarter if (exceptionalIndices.includes(patternQuarter)) { return } potentialFailures.forEach((potentialFailure: PotentialFailure): void => { if ( deepEqual(potentialFailure.firstFactorization, firstFactorization) && deepEqual(potentialFailure.secondFactorization, secondFactorization) && potentialFailure.patternQuarter === patternQuarter - 1 ) { fail(`There was a harmony sustained longer than a single quarter note which was too complex:\ ${JSON.stringify(firstFactorization)} ${JSON.stringify(secondFactorization)},\ at segment ${segment}, quarter ${quarter}.`) } }) potentialFailures.push({ firstFactorization, secondFactorization, patternQuarter }) } }, ) }) })
/** * A <code>StealPool</code> is one of the mechanisms to determine the activities that are to be executed by an {@link Executor}. * Each executor has two steal pools associated with it: the one it belongs to and the one it can steal from. In addition, an * executor can only execute activities whose {@link AbstractContext} matches with the {@link AbstractContext} of this executor. * * A <code>StealPool</code> consists of either a set of other steal pools, or a single string identification. */ public final class StealPool implements Serializable { private static final long serialVersionUID = -5231970051093339530L; private final String tag; private final StealPool[] set; /** * An executor that belongs to the <code>WORLD</code> steal pool generates activities that can in principle be stolen by any * other executor. An executor that can steal from this steal pool can steal from any pool, except <code>NONE</code>. */ public static StealPool WORLD = new StealPool("WORLD"); /** * An executor that belongs to the <code>NONE</code> steal pool generates activities that cannot be stolen. An executor that * can steal from this steal pool can in fact not steal at all. Note that <code>NONE</code> takes preference over * <code>WORLD</code>. */ public static StealPool NONE = new StealPool("NONE"); /** * Constructs a StealPool that in fact is a collection of other steal pools. If an executor can steal from such a steal pool, * it can in fact steal from any of its members. If an executor belongs to such a steal pool, it can be the steal target of * any executor having a member of this steal pool as its steal target. * * This constructor is private, so that the only way it can be used is through the {@link #merge(StealPool...)} method. * * @param set * the list of steal pools comprising the created steal pool. * @throws IllegalArgumentException * thrown when the argument list has null references or less than 2 elements. */ private StealPool(StealPool... set) { // Unreachable // //if (set == null || set.length < 2) { // throw new IllegalArgumentException("StealPool set should have at least 2 elements!"); //} this.set = set.clone(); // Return sorted result. Arrays.sort(this.set, new Comparator<StealPool>() { @Override public int compare(StealPool o1, StealPool o2) { return o1.tag.compareTo(o2.tag); } }); tag = null; } /** * Constructs a StealPool of the specified tag. * * @param tag * the tag * @throws IllegalArgumentException * when a null pointer is provided. */ public StealPool(String tag) { if (tag == null) { throw new IllegalArgumentException("tag of a single-string stealpool cannot be null"); } this.tag = tag; this.set = null; } /** * Returns a StealPool that is the merge result of the steal pools passed as arguments. If the argument list is empty, * {@link #NONE} is returned. If the argument list contains {@link #WORLD}, {@link #WORLD} is returned. * * @param pools * the steal pool arguments * @return the merge result * @throws IllegalArgumentException * when the argument list contains a null pointer. */ public static StealPool merge(StealPool... pools) { if (pools == null || pools.length == 0) { return NONE; } Set<StealPool> tmp = new HashSet<StealPool>(); for (StealPool pool : pools) { if (pool == null) { throw new IllegalArgumentException("StealPool list cannot have null references!"); } } for (StealPool pool : pools) { StealPool s = pool; if (s.set != null) { /* // Mode of this code is dead, since it test for cases that cannot occur due to the way a set is constructed. StealPool s2 = merge(s.set); // Now it is flattened. if (s2.equals(WORLD)) { return WORLD; } if (s2.set != null) { for (StealPool s3 : s2.set) { assert (s3.set == null); if (!s3.equals(NONE)) { tmp.add(s3); } } } else if (!s2.equals(NONE)) { tmp.add(s2); } */ for (StealPool s2 : s.set) { tmp.add(s2); } } else { if (s.equals(WORLD)) { return WORLD; } if (!s.equals(NONE)) { tmp.add(pool); } } } return getStealPoolFromSet(tmp); } private static StealPool getStealPoolFromSet(Set<StealPool> tmp) { if (tmp.size() == 0) { // May happen if all StealPools are NONE return NONE; } if (tmp.size() == 1) { return tmp.iterator().next(); } return new StealPool(tmp.toArray(new StealPool[tmp.size()])); } /** * Determines if this steal pool has some member steal pool in common with the specified steal pool. {@link #NONE} never * overlaps, not even with {@link #NONE}. And then {@link #WORLD} overlaps with any steal pool, except {@link #NONE}. * * @param other * the steal pool to determine overlap with. * @return whether there is overlap between the steal pools. */ public boolean overlap(StealPool other) { // None does not overlap with anything, not even with None. if (isNone() || other.isNone()) { return false; } if (other == this) { return true; } // WORLD overlaps with anything. if (isWorld() || other.isWorld()) { return true; } if (set != null) { if (other.set != null) { return setOverlap(set, other.set); } return setContains(set, other.tag); } if (other.set != null) { return setContains(other.set, tag); } return tag.equals(other.tag); } private static boolean setContains(StealPool[] set, String tag) { for (StealPool s : set) { int cmp = tag.compareTo(s.tag); if (cmp == 0) { return true; } if (cmp < 0) { break; } } return false; } private static boolean setOverlap(StealPool[] set1, StealPool[] set2) { int i = 0; int oi = 0; while (i < set1.length && oi < set2.length) { int cmp = set1[i].tag.compareTo(set2[oi].tag); if (cmp < 0) { i++; } else if (cmp > 0) { oi++; } else { return true; } } return false; } /** * Returns the tag of this steal pool. It this steal pool in fact consists of other steal pools, <code>null</code> is * returned. * * @return the tag of this steal pool. */ public String getTag() { return tag; } /** * Determines if this steal pool is equal to the {@link #WORLD} steal pool. * * @return if this steal pool is the {@link #WORLD} steal pool. */ public boolean isWorld() { return this.equals(WORLD); } /** * Determines if this steal pool is equal to the {@link #NONE} steal pool. * * @return if this steal pool is the {@link #NONE} steal pool. */ public boolean isNone() { return this.equals(NONE); } /** * Returns the list of steal pools of which this steal pool consists. If this steal pool has no members, a list with only this * steal pools is returned. * * @return the member steal pools. */ public StealPool[] set() { if (set == null) { return new StealPool[] { this }; } return set.clone(); } /** * Selects a random member steal pool from the steal pool at hand. * * @param random * the random number generator to use * * @return a random member steal pool. */ public StealPool randomlySelectPool(Random random) { StealPool[] tmp = set(); return tmp[random.nextInt(tmp.length)]; } @Override public String toString() { if (set != null) { return Arrays.toString(set); } return tag; } @Override public int hashCode() { if (set != null) { return Arrays.hashCode(set); } return tag.hashCode(); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } StealPool other = (StealPool) obj; if ((set == null) != (other.set == null)) { return false; } if (set == null) { return tag.equals(other.tag); } if (other.set.length != set.length) { return false; } for (int i = 0; i < set.length; i++) { if (!set[i].equals(other.set[i])) { return false; } } return true; } }
Forget ‘Yes’ and ‘No’ or ‘In’ and ‘Out’, the EU referendum question looks set to be a choice between ‘remain’ or ‘leave’. The Electoral Commission has recommended a change from the current question in the EU Referendum Bill: ‘Should the United Kingdom remain a member of the European Union?’ To which the response is either ‘Yes’ or ‘No’. This wording could be judged to be helpful for those who want Britain to remain in the EU, as the question was posed in a particularly negative way as well as involving a change from the status quo. The Commission has assessed this wording and has recommended the question should be changed to: ‘Should the United Kingdom remain a member of the European Union or leave the European Union?’ And the new responses would be ‘Remain a member of the European Union’ or ‘Leave the European Union’. The government has announced they will accept this new wording and the bill will be amended when Parliament returns next week. This new wording still has negative connotations for the Out-ers because the question is suggesting changing the status quo but ‘leave’ is certainly less negative than ‘no’ or ‘out’. The Eurosceptic camps will be pleased that the Electoral Commission is making their lives a little bit easier — but the question is not overwhelmingly in their favour. So, expect to see the ‘Leave’ camps bombarding voters over the coming months with positive messages about the life Britain could have outside the EU. And the ‘Remain’ camps will talk up the dangers if we change Britain’s current arrangement. UPDATE: Ukip’s Nigel Farage approves of the new wording:
/// Returns a future that will return a result for the command `cmd`. fn cmd( &mut self, cmd: <Self::ProcLoop as ProcLoop>::Cmd, ) -> SessFuture<'_, Result<<Self::ProcLoop as ProcLoop>::CmdRes, Self::Error>> { let mut input = In::from_cmd(cmd); Box::pin(async move { loop { let fib::Yielded(output) = self.fib().resume(input); input = match output { Out::Req(req) => In::from_req_res(self.run_req(req).await?), Out::CmdRes(res) => break Ok(res), } } }) }
import { compress } from './core'; export { compress }; export default compress;
def computeMoleFractions(targets, reactionModel, reactionSystem): moleFractions = np.zeros(len(targets), np.float64) for i, label in enumerate(targets): targetIndex = searchTargetIndex(label, reactionModel) moleFractions[i] = reactionSystem.y[targetIndex] return moleFractions
/** * Unit test for {@link AbstractAction}. */ public class AbstractActionTest { @Test public void perform_noListener_outputReturned() { var action = new TestAction(); var output = action.perform(); assertThat(output).isEqualTo(action.getPerformCount()); } @Test public void perform_listener_eventFired() { var action = new TestAction(); var listenerNotified = new AtomicBoolean(false); action.addPerformListener(event -> { assertThat(event.getOutput()).isEqualTo(action.getPerformCount()); assertThat(event.getAction()).isSameAs(action); listenerNotified.set(true); }); action.perform(); assertThat(listenerNotified).isTrue(); } @Test(expected = IllegalStateException.class) public void perform_notPerformable_exceptionThrown() { var action = new TestAction(); action.setPerformable(false); action.perform(); } }
/** * This map implementation use an algorithm to maintains a fixed size into the map with the elements * least recently used (LRU). * @param <K> Key type. * @param <V> Value type. */ public class LruMap<K extends Object, V extends Object> implements Map<K,V> { private Integer maxSize; private final List<Key<K>> keys; private final Map<K,Key<K>> metadata; private final Map<K,V> mapInstance; private final List<RemoveOverflowListener<K, V>> listeners; public LruMap() { this(SystemProperties.getInteger(SystemProperties.HCJF_DEFAULT_LRU_MAP_SIZE)); } public LruMap(Integer maxSize) { this.keys = new ArrayList<>(); this.metadata = new HashMap<>(); this.mapInstance = new HashMap<>(); this.maxSize = maxSize; this.listeners = new ArrayList<>(); } /** * Add a listener remove overflow listener. * @param listener Listener instance. */ public final void addRemoveOverflowListener(RemoveOverflowListener<K,V> listener) { if(listener != null) { listeners.add(listener); } } /** * Returns the max size of the map. * @return Max size of the map. */ public final Integer getMaxSize() { return maxSize; } /** * Set the max size of the map. * @param maxSize Max size of the map. */ public synchronized final void setMaxSize(Integer maxSize) { this.maxSize = maxSize; removeOverflow(); } /** * Update the temporal component into the keys. * @param keys Keys to updateMetadata. */ private void updateMetadata(Key<K>... keys) { for(Key<K> key : keys) { key.update(); } Collections.sort(this.keys); } /** * This method remove the overflow elements into the map. */ private void removeOverflow() { for (int i = 0; i < keys.size() - maxSize; i++) { Key<K> key = keys.remove(keys.size() -1); metadata.remove(key.getKey()); V value = mapInstance.remove(key.getKey()); listeners.forEach(L -> L.onRemove(key.getKey(), value)); } } @Override public int size() { return mapInstance.size(); } @Override public boolean isEmpty() { return mapInstance.isEmpty(); } @Override public boolean containsKey(Object key) { return mapInstance.containsKey(key); } @Override public boolean containsValue(Object value) { return mapInstance.containsValue(value); } @Override public synchronized V get(Object key) { V result = null; if(mapInstance.containsKey(key)) { updateMetadata(metadata.get(key)); result = mapInstance.get(key); } return result; } @Override public synchronized V put(K key, V value) { V result = mapInstance.put(key, value); Key<K> temporalKey = new Key<>(key); keys.add(temporalKey); metadata.put(key, temporalKey); updateMetadata(); removeOverflow(); return result; } @Override public synchronized V remove(Object key) { V result = mapInstance.remove(key); keys.remove(metadata.remove(key)); updateMetadata(); return result; } @Override public synchronized void putAll(Map<? extends K, ? extends V> m) { mapInstance.putAll(m); Key<K> temporalKey; for(K key : m.keySet()) { temporalKey = new Key<>(key); keys.add(temporalKey); metadata.put(key, temporalKey); } updateMetadata(); removeOverflow(); } @Override public synchronized void clear() { mapInstance.clear(); metadata.clear(); keys.clear(); } @Override public Set<K> keySet() { return mapInstance.keySet(); } @Override public Collection<V> values() { return mapInstance.values(); } @Override public Set<Entry<K, V>> entrySet() { return mapInstance.entrySet(); } @Override public boolean equals(Object o) { return mapInstance.equals(o); } @Override public int hashCode() { return mapInstance.hashCode(); } @Override public V getOrDefault(Object key, V defaultValue) { return mapInstance.getOrDefault(key, defaultValue); } @Override public void forEach(BiConsumer<? super K, ? super V> action) { mapInstance.forEach(action); } @Override public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { mapInstance.replaceAll(function); } @Override public V putIfAbsent(K key, V value) { return mapInstance.putIfAbsent(key, value); } @Override public boolean remove(Object key, Object value) { return mapInstance.remove(key, value); } @Override public boolean replace(K key, V oldValue, V newValue) { return mapInstance.replace(key, oldValue, newValue); } @Override public V replace(K key, V value) { return mapInstance.replace(key, value); } @Override public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { return mapInstance.computeIfAbsent(key, mappingFunction); } @Override public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { return mapInstance.computeIfPresent(key, remappingFunction); } @Override public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { return mapInstance.compute(key, remappingFunction); } @Override public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { return mapInstance.merge(key, value, remappingFunction); } /** * This class represents a key with a temporal component in order to knows what key is older than other. * @param <K> Key type. */ private static final class Key<K extends Object> implements Comparable<Key> { private final K key; private Long lastUpdate; public Key(K key) { this.key = key; this.lastUpdate = System.currentTimeMillis(); } public K getKey() { return key; } public void update() { lastUpdate = System.currentTimeMillis(); } @Override public int hashCode() { return key.hashCode(); } @Override public boolean equals(Object obj) { boolean result = false; if(obj instanceof Key) { result = key.equals(((Key)obj).key); } return result; } @Override public String toString() { return key.toString(); } @Override public int compareTo(Key o) { return lastUpdate.compareTo(o.lastUpdate) * -1; } } /** * This interface provides the method to listener when an object is deleted because is part fo the overflow. * @param <K> Expected key data type. * @param <V> Expected value data type. */ public interface RemoveOverflowListener<K extends Object, V extends Object> { void onRemove(K key, V value); } }
<filename>linear_acc [old]/CtrlGig_Mega/CtrlGigMega/ArduinoCore/include/defines.h #ifndef DEFINES #define DEFINES #include <Arduino.h> #define F_CPU 16000000UL #define _DEBUG //#define _DEBUG_COM_WHILE_SPEED_CTRL //#define max(a,b) \ //({ __typeof__ (a) _a = (a); \ // __typeof__ (b) _b = (b); \ //_a > _b ? _a : _b; }) // //#define min(a,b) \ //({ __typeof__ (a) _a = (a); \ // __typeof__ (b) _b = (b); \ //_a < _b ? _a : _b; }) //Casting and masks for 8 bits for different variables sizes and bytes positions. #define lo8(x) ((uint8_t)((x) & 0xff)) #define hi8(x) ((uint8_t)((x & 0xff00)>>8)) #define hi16(x) ((uint8_t)((x & 0xff0000)>>16)) #define hi24(x) ((uint8_t)((x & 0xff000000)>>24)) #define UART_HEADER_1 'S' #define UART_HEADER_COM 'c' #define UART_HEADER_START 't' #define REC_VECTOR_SIZE 400 const uint8_t END_MSG[] = {0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00}; #define MBASE_DIR 22 #define MBASE_STEP 24 #define MBASE_RESET 26 #define MBASE_MS3 2 #define MBASE_MS2 3 #define MBASE_MS1 4 #define MBASE_ENABLE 5 #define MTOP_DIR 6 #define MTOP_STEP 7 #define MTOP_RESET 8 #define MTOP_MS3 9 #define MTOP_MS2 10 #define MTOP_MS1 11 #define MTOP_ENABLE 12 #define PIN_LED_NATIVE 13 #define PIN_LED_BASE_LOCK 25 #define SERIAL_BAUD 57600 #define DEFAULT_MS 16 //the microsteping that is used here #define FS_REVOLUTION 200 //full steps per revolution #define MAX_STEPS_SINGLE_DIR_BASE ((int32_t)(DEFAULT_MS*FS_REVOLUTION*2)) //this is 16*200*2, two turns of the top motor 6400 steps #define INITIAL_SPEED 2 //Initial speed in degrees per second #define MAX_SPEED 720 //Max angular speed (2 turns per second) #define INITIAL_SPEED_PERIOD ((float)((1E6*360)/(INITIAL_SPEED*DEFAULT_MS*FS_REVOLUTION))) //Maximum period which corresponds to the minimum speed #define MAX_SPEED_PERIOD ((float)((1E6*360)/(MAX_SPEED*DEFAULT_MS*FS_REVOLUTION))) //Minimum period which corresponds to the maximum speed #define ACC_MOD .25 //degrees per second square #define INV_ACC_MOD 4 #define BREAKING_MOD .25 #define INV_BREAKING_MOD 4 #define RATIO_ACC_BREAKING 0.5 //this is the % of total steps that will be dedicated to acceleration. if the acceleration is twice slower than breaking, than this ratio should be .666 for example typedef enum{ FULL_STEP, HALF_STEP, QUARTER_STEP, EIGHTH_STEP, SIXTEENTH_STEP, }TYPE_MICRO_STEP; typedef enum{ IDLE, ROTATING, RECEIVING, }TYPE_SYSTEM_STATE; //direction definitions typedef enum{ DIR_NEGATIVE = 0, //CW DIR_POSITIVE = 1, //CCW }TYPE_DIRECTION; //PIN DEFINES: FOR ARDUINO MEGA -- those defines improve the performance of a key part of the code #define CLEAR_NATIVE_LED PORTB &= ~(1<<(PB7)); #define SET_NATIVE_LED PORTB |= (1<<(PB7)); #define TOGGLE_NATIVE_LED PINB |= (1<<(PB7)); #define CLEAR_MTOP_STEP_PIN PORTH &= ~(1<<(PH4)); #define SET_MTOP_STEP_PIN PORTH |= (1<<(PH4)); #define CLEAR_MBASE_STEP_PIN PORTA &= ~(1<<(PA2)); #define SET_MBASE_STEP_PIN PORTA |= (1<<(PA2)); #endif