diff
stringlengths
262
553k
is_single_chunk
bool
2 classes
is_single_function
bool
1 class
buggy_function
stringlengths
20
391k
fixed_function
stringlengths
0
392k
diff --git a/test/src/main/java/org/jvnet/hudson/test/WarExploder.java b/test/src/main/java/org/jvnet/hudson/test/WarExploder.java index 11311475a..3df822581 100644 --- a/test/src/main/java/org/jvnet/hudson/test/WarExploder.java +++ b/test/src/main/java/org/jvnet/hudson/test/WarExploder.java @@ -1,53 +1,53 @@ package org.jvnet.hudson.test; import hudson.remoting.Which; import hudson.FilePath; import java.io.File; import java.io.IOException; import java.io.FileOutputStream; import java.net.URL; /** * Ensures that <tt>hudson.war</tt> is exploded. * * @author Kohsuke Kawaguchi */ final class WarExploder { public static final File EXPLODE_DIR = explode(); /** * Explodes a war, if necesasry, and returns its root dir. */ private static File explode() { try { // locate hudson.war URL winstone = WarExploder.class.getResource("/winstone.jar"); if(winstone==null) // impossible, since the test harness pulls in hudson.war throw new AssertionError("hudson.war is not in the classpath."); File war = Which.jarFile(Class.forName("executable.Executable")); File explodeDir = new File("./target/hudson-for-test"); File timestamp = new File(explodeDir,".timestamp"); - if(!timestamp.exists() || (timestamp.lastModified()==war.lastModified())) { + if(!timestamp.exists() || (timestamp.lastModified()!=war.lastModified())) { System.out.println("Exploding hudson.war at "+war); new FilePath(explodeDir).deleteRecursive(); new FilePath(war).unzip(new FilePath(explodeDir)); new FileOutputStream(timestamp).close(); timestamp.setLastModified(war.lastModified()); } else { System.out.println("Picking up existing exploded hudson.war at "+explodeDir.getAbsolutePath()); } return explodeDir; } catch (IOException e) { throw new Error(e); } catch (InterruptedException e) { throw new Error(e); } catch (ClassNotFoundException e) { throw new Error(e); } } }
true
true
private static File explode() { try { // locate hudson.war URL winstone = WarExploder.class.getResource("/winstone.jar"); if(winstone==null) // impossible, since the test harness pulls in hudson.war throw new AssertionError("hudson.war is not in the classpath."); File war = Which.jarFile(Class.forName("executable.Executable")); File explodeDir = new File("./target/hudson-for-test"); File timestamp = new File(explodeDir,".timestamp"); if(!timestamp.exists() || (timestamp.lastModified()==war.lastModified())) { System.out.println("Exploding hudson.war at "+war); new FilePath(explodeDir).deleteRecursive(); new FilePath(war).unzip(new FilePath(explodeDir)); new FileOutputStream(timestamp).close(); timestamp.setLastModified(war.lastModified()); } else { System.out.println("Picking up existing exploded hudson.war at "+explodeDir.getAbsolutePath()); } return explodeDir; } catch (IOException e) { throw new Error(e); } catch (InterruptedException e) { throw new Error(e); } catch (ClassNotFoundException e) { throw new Error(e); } }
private static File explode() { try { // locate hudson.war URL winstone = WarExploder.class.getResource("/winstone.jar"); if(winstone==null) // impossible, since the test harness pulls in hudson.war throw new AssertionError("hudson.war is not in the classpath."); File war = Which.jarFile(Class.forName("executable.Executable")); File explodeDir = new File("./target/hudson-for-test"); File timestamp = new File(explodeDir,".timestamp"); if(!timestamp.exists() || (timestamp.lastModified()!=war.lastModified())) { System.out.println("Exploding hudson.war at "+war); new FilePath(explodeDir).deleteRecursive(); new FilePath(war).unzip(new FilePath(explodeDir)); new FileOutputStream(timestamp).close(); timestamp.setLastModified(war.lastModified()); } else { System.out.println("Picking up existing exploded hudson.war at "+explodeDir.getAbsolutePath()); } return explodeDir; } catch (IOException e) { throw new Error(e); } catch (InterruptedException e) { throw new Error(e); } catch (ClassNotFoundException e) { throw new Error(e); } }
diff --git a/module/com.noelios.restlet/src/com/noelios/restlet/Factory.java b/module/com.noelios.restlet/src/com/noelios/restlet/Factory.java index 48269fd9a..9a2652542 100644 --- a/module/com.noelios.restlet/src/com/noelios/restlet/Factory.java +++ b/module/com.noelios.restlet/src/com/noelios/restlet/Factory.java @@ -1,670 +1,672 @@ /* * Copyright 2005-2006 Noelios Consulting. * * The contents of this file are subject to the terms of the Common Development * and Distribution License (the "License"). You may not use this file except in * compliance with the License. * * You can obtain a copy of the license at * http://www.opensource.org/licenses/cddl1.txt See the License for the specific * language governing permissions and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each file and * include the License file at http://www.opensource.org/licenses/cddl1.txt If * applicable, add the following below this CDDL HEADER, with the fields * enclosed by brackets "[]" replaced with your own identifying information: * Portions Copyright [yyyy] [name of copyright owner] */ package com.noelios.restlet; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.net.URL; import java.util.ArrayList; import java.util.Enumeration; import java.util.Iterator; import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; import org.restlet.Application; import org.restlet.Client; import org.restlet.Container; import org.restlet.Context; import org.restlet.Directory; import org.restlet.Restlet; import org.restlet.Router; import org.restlet.Scorer; import org.restlet.Server; import org.restlet.data.ClientInfo; import org.restlet.data.Form; import org.restlet.data.Language; import org.restlet.data.MediaType; import org.restlet.data.Parameter; import org.restlet.data.Preference; import org.restlet.data.Protocol; import org.restlet.data.Request; import org.restlet.resource.Representation; import org.restlet.resource.Resource; import org.restlet.util.Helper; import com.noelios.restlet.application.ApplicationHelper; import com.noelios.restlet.container.ContainerHelper; import com.noelios.restlet.local.DirectoryResource; import com.noelios.restlet.util.FormUtils; /** * Restlet factory supported by the engine. * * @author Jerome Louvel ([email protected]) */ public class Factory extends org.restlet.util.Factory { /** Obtain a suitable logger. */ private static Logger logger = Logger.getLogger(Factory.class .getCanonicalName()); public static final String VERSION_LONG = org.restlet.util.Factory.VERSION_LONG; public static final String VERSION_SHORT = org.restlet.util.Factory.VERSION_SHORT; public static final String VERSION_HEADER = "Noelios-Restlet-Engine/" + VERSION_SHORT; /** * Registers a new Noelios Restlet Engine. */ public static void register() { Factory.setInstance(new Factory()); } /** List of available client connectors. */ private List<ConnectorHelper> registeredClients; /** List of available server connectors. */ private List<ConnectorHelper> registeredServers; /** * Constructor that will automatically attempt to discover connectors. */ @SuppressWarnings("unchecked") public Factory() { this(true); } /** * Constructor. * * @param discoverConnectors * True if connectors should be automatically discovered. */ @SuppressWarnings("unchecked") public Factory(boolean discoverConnectors) { if (discoverConnectors) { // Find the factory class name String line = null; String provider = null; // Find the factory class name ClassLoader cl = org.restlet.util.Factory.getClassLoader(); URL configURL; // Register the client connector providers try { for (Enumeration<URL> configUrls = cl .getResources("META-INF/services/com.noelios.restlet.ClientHelper"); configUrls .hasMoreElements();) { configURL = configUrls.nextElement(); BufferedReader reader = null; try { reader = new BufferedReader(new InputStreamReader( configURL.openStream(), "utf-8")); line = reader.readLine(); while (line != null) { provider = getProviderClassName(line); if ((provider != null) && (!provider.equals(""))) { // Instantiate the factory try { Class<? extends ConnectorHelper> providerClass = (Class<? extends ConnectorHelper>) Class .forName(provider); getRegisteredClients().add( providerClass.getConstructor( Client.class).newInstance( (Client) null)); } catch (Exception e) { logger.log(Level.SEVERE, "Unable to register the client connector " + provider, e); } } line = reader.readLine(); } } catch (IOException e) { logger.log(Level.SEVERE, "Unable to read the provider descriptor: " + configURL.toString()); } finally { if (reader != null) reader.close(); } } } catch (IOException ioe) { logger .log( Level.SEVERE, "Exception while detecting the client connectors.", ioe); } // Register the server connector providers try { for (Enumeration<URL> configUrls = cl .getResources("META-INF/services/com.noelios.restlet.ServerHelper"); configUrls .hasMoreElements();) { configURL = configUrls.nextElement(); BufferedReader reader = null; try { reader = new BufferedReader(new InputStreamReader( configURL.openStream(), "utf-8")); line = reader.readLine(); while (line != null) { provider = getProviderClassName(line); if ((provider != null) && (!provider.equals(""))) { // Instantiate the factory try { Class<? extends ConnectorHelper> providerClass = (Class<? extends ConnectorHelper>) Class .forName(provider); getRegisteredServers().add( providerClass.getConstructor( Server.class).newInstance( (Server) null)); } catch (Exception e) { logger.log(Level.SEVERE, "Unable to register the server connector " + provider, e); } } line = reader.readLine(); } } catch (IOException e) { logger.log(Level.SEVERE, "Unable to read the provider descriptor: " + configURL.toString()); } finally { if (reader != null) reader.close(); } } } catch (IOException ioe) { logger .log( Level.SEVERE, "Exception while detecting the client connectors.", ioe); } } } /** * Returns the list of available client connectors. * * @return The list of available client connectors. */ public List<ConnectorHelper> getRegisteredClients() { if (this.registeredClients == null) this.registeredClients = new ArrayList<ConnectorHelper>(); return this.registeredClients; } /** * Returns the list of available server connectors. * * @return The list of available server connectors. */ public List<ConnectorHelper> getRegisteredServers() { if (this.registeredServers == null) this.registeredServers = new ArrayList<ConnectorHelper>(); return this.registeredServers; } /** * Creates a directory resource. * * @param handler * The parent directory handler. * @param request * The handled call. * @return A new directory resource. * @throws IOException */ public Resource createDirectoryResource(Directory handler, Request request) throws IOException { return new DirectoryResource(handler, request); } /** * Creates a new helper for a given container. * * @param application * The application to help. * @param parentContext * The parent context, typically the container's context. * @return The new helper. */ public Helper createHelper(Application application, Context parentContext) { return new ApplicationHelper(application, parentContext); } /** * Creates a new helper for a given client connector. * * @param client * The client to help. * @return The new helper. */ public Helper createHelper(Client client) { for (ConnectorHelper registeredClient : getRegisteredClients()) { if (registeredClient.getProtocols().containsAll( client.getProtocols())) { try { return registeredClient.getClass().getConstructor( Client.class).newInstance(client); } catch (Exception e) { logger .log( Level.SEVERE, "Exception while instantiation the client connector.", e); } return registeredClient; } } logger.log(Level.WARNING, "No available client connector supports the required protocols: " + client.getProtocols()); return null; } /** * Creates a new helper for a given container. * * @param container * The container to help. * @return The new helper. */ public Helper createHelper(Container container) { return new ContainerHelper(container); } /** * Creates a new helper for a given server connector. * * @param server * The server to help. * @return The new helper. */ public Helper createHelper(Server server) { Helper result = null; if (server.getProtocols().size() > 0) { for (ConnectorHelper registeredServer : getRegisteredServers()) { if (registeredServer.getProtocols().containsAll( server.getProtocols())) { try { result = registeredServer.getClass().getConstructor( Server.class).newInstance(server); } catch (Exception e) { logger .log( Level.SEVERE, "Exception while instantiation the server connector.", e); } } } if (result == null) { // Couldn't find a matching connector StringBuilder sb = new StringBuilder(); sb .append("No available server connector supports the required protocols: "); for (Protocol p : server.getProtocols()) { sb.append(p.getName()).append(" "); } logger.log(Level.WARNING, sb.toString()); } } return result; } /** * Creates a URI-based Restlet attachment that will score target instance * shared by all calls. The score will be proportional to the number of * chararacters matched by the pattern, from the start of the context * resource path. * * @param router * The parent router. * @param pattern * The URI pattern used to map calls (see * {@link java.util.regex.Pattern} for the syntax). * @param target * The target Restlet to attach. * @see java.util.regex.Pattern */ public Scorer createScorer(Router router, String pattern, Restlet target) { return new PatternScorer(router, pattern, target); } /** * Returns the preferred variant representation for a given resource * according the the client preferences. * * @param client * The client preferences. * @param variants * The list of variants to compare. * @return The best variant representation. * @see <a * href="http://httpd.apache.org/docs/2.2/en/content-negotiation.html#algorithm">Apache * content negotiation algorithm</a> */ public Representation getPreferredVariant(ClientInfo client, List<Representation> variants) { if (variants == null) { return null; } else { Parameter currentParam = null; Language currentLanguage = null; Language variantLanguage = null; MediaType currentMediaType = null; MediaType variantMediaType = null; boolean compatiblePref = false; boolean compatibleLanguage = false; boolean compatibleMediaType = false; Representation currentVariant = null; Representation bestVariant = null; Preference<Language> currentLanguagePref = null; Preference<Language> bestLanguagePref = null; Preference<MediaType> currentMediaTypePref = null; Preference<MediaType> bestMediaTypePref = null; float bestQuality = 0; float currentScore = 0; float bestLanguageScore = 0; float bestMediaTypeScore = 0; // For each available variant, we will compute the negotiation score // which is dependant on the language score and on the media type // score for (Iterator iter1 = variants.iterator(); iter1.hasNext();) { currentVariant = (Representation) iter1.next(); variantLanguage = currentVariant.getLanguage(); variantMediaType = currentVariant.getMediaType(); // If no language preference is defined, assume that all // languages are acceptable List<Preference<Language>> languagePrefs = client .getAcceptedLanguages(); if (languagePrefs.size() == 0) languagePrefs.add(new Preference<Language>(Language.ALL)); // For each language preference defined in the call // Calculate the score and remember the best scoring preference for (Iterator<Preference<Language>> iter2 = languagePrefs .iterator(); (variantLanguage != null) && iter2.hasNext();) { currentLanguagePref = iter2.next(); currentLanguage = currentLanguagePref.getMetadata(); compatiblePref = true; currentScore = 0; // 1) Compare the main tag if (variantLanguage.getPrimaryTag().equalsIgnoreCase( currentLanguage.getPrimaryTag())) { currentScore += 100; } else if (!currentLanguage.getPrimaryTag().equals("*")) { compatiblePref = false; } else if (!currentLanguage.getSubTags().isEmpty()) { // Only "*" is an acceptable language range compatiblePref = false; } else { // The valid "*" range has the lowest valid score currentScore++; } if (compatiblePref) { // 2) Compare the sub tags if ((currentLanguage.getSubTags().isEmpty()) || (variantLanguage.getSubTags().isEmpty())) { if (variantLanguage.getSubTags().isEmpty() && currentLanguage.getSubTags().isEmpty()) { currentScore += 10; } else { // Don't change the score } } else { int maxSize = Math.min(currentLanguage.getSubTags() .size(), variantLanguage.getSubTags() .size()); for (int i = 0; i < maxSize && compatiblePref; i++) { if (currentLanguage.getSubTags().get(i) .equalsIgnoreCase( variantLanguage.getSubTags() .get(i))) { + // Each subtag contribution to the score is + // getting less and less important currentScore += Math.pow(10, 1 - i); } else { // SubTags are different compatiblePref = false; } } } // 3) Do we have a better preference? // currentScore *= currentPref.getQuality(); if (compatiblePref && ((bestLanguagePref == null) || (currentScore > bestLanguageScore))) { bestLanguagePref = currentLanguagePref; bestLanguageScore = currentScore; } } } // Are the preferences compatible with the current variant // language? compatibleLanguage = (variantLanguage == null) || (bestLanguagePref != null); // If no media type preference is defined, assume that all media // types are acceptable List<Preference<MediaType>> mediaTypePrefs = client .getAcceptedMediaTypes(); if (mediaTypePrefs.size() == 0) mediaTypePrefs .add(new Preference<MediaType>(MediaType.ALL)); // For each media range preference defined in the call // Calculate the score and remember the best scoring preference for (Iterator<Preference<MediaType>> iter2 = mediaTypePrefs .iterator(); compatibleLanguage && iter2.hasNext();) { currentMediaTypePref = iter2.next(); currentMediaType = currentMediaTypePref.getMetadata(); compatiblePref = true; currentScore = 0; // 1) Compare the main types if (currentMediaType.getMainType().equals( variantMediaType.getMainType())) { currentScore += 1000; } else if (!currentMediaType.getMainType().equals("*")) { compatiblePref = false; } else if (!currentMediaType.getSubType().equals("*")) { // Ranges such as "*/html" are not supported // Only "*/*" is acceptable in this case compatiblePref = false; } if (compatiblePref) { // 2) Compare the sub types if (variantMediaType.getSubType().equals( currentMediaType.getSubType())) { currentScore += 100; } else if (!currentMediaType.getSubType().equals("*")) { // Subtype are different compatiblePref = false; } if (compatiblePref && (variantMediaType.getParameters() != null)) { // 3) Compare the parameters // If current media type is compatible with the // current // media range then the parameters need to be // checked too for (Iterator iter3 = variantMediaType .getParameters().iterator(); iter3 .hasNext();) { currentParam = (Parameter) iter3.next(); if (isParameterFound(currentParam, currentMediaType)) { currentScore++; } } } // 3) Do we have a better preference? // currentScore *= currentPref.getQuality(); if (compatiblePref && ((bestMediaTypePref == null) || (currentScore > bestMediaTypeScore))) { bestMediaTypePref = currentMediaTypePref; bestMediaTypeScore = currentScore; } } } // Are the preferences compatible with the current media type? compatibleMediaType = (variantMediaType == null) || (bestMediaTypePref != null); if (compatibleLanguage && compatibleMediaType) { // Do we have a compatible media type? float currentQuality = 0; if (bestLanguagePref != null) { currentQuality += (bestLanguagePref.getQuality() * 10F); } else if (variantLanguage != null) { currentQuality += 0.1F * 10F; } if (bestMediaTypePref != null) { // So, let's conclude on the current variant, its // quality currentQuality += bestMediaTypePref.getQuality(); } if (bestVariant == null) { bestVariant = currentVariant; bestQuality = currentQuality; } else if (currentQuality > bestQuality) { bestVariant = currentVariant; bestQuality = currentQuality; } } // Reset the preference variables bestLanguagePref = null; bestLanguageScore = 0; bestMediaTypePref = null; bestMediaTypeScore = 0; } return bestVariant; } } /** * Parses a line to extract the provider class name. * * @param line * The line to parse. * @return The provider's class name or an empty string. */ private String getProviderClassName(String line) { int index = line.indexOf('#'); if (index != -1) line = line.substring(0, index); return line.trim(); } /** * Indicates if the searched parameter is specified in the given media * range. * * @param searchedParam * The searched parameter. * @param mediaRange * The media range to inspect. * @return True if the searched parameter is specified in the given media * range. */ private boolean isParameterFound(Parameter searchedParam, MediaType mediaRange) { boolean result = false; for (Iterator iter = mediaRange.getParameters().iterator(); !result && iter.hasNext();) { result = searchedParam.equals((Parameter) iter.next()); } return result; } /** * Parses an URL encoded Web form. * * @param logger * The logger to use. * @param form * The target form. * @param webForm * The posted form. */ public void parse(Logger logger, Form form, Representation webForm) { if (webForm != null) { FormUtils.parsePost(logger, form, webForm); } } /** * Parses an URL encoded query string into a given form. * * @param logger * The logger to use. * @param form * The target form. * @param queryString * Query string. */ public void parse(Logger logger, Form form, String queryString) { if ((queryString != null) && !queryString.equals("")) { FormUtils.parseQuery(logger, form, queryString); } } }
true
true
public Representation getPreferredVariant(ClientInfo client, List<Representation> variants) { if (variants == null) { return null; } else { Parameter currentParam = null; Language currentLanguage = null; Language variantLanguage = null; MediaType currentMediaType = null; MediaType variantMediaType = null; boolean compatiblePref = false; boolean compatibleLanguage = false; boolean compatibleMediaType = false; Representation currentVariant = null; Representation bestVariant = null; Preference<Language> currentLanguagePref = null; Preference<Language> bestLanguagePref = null; Preference<MediaType> currentMediaTypePref = null; Preference<MediaType> bestMediaTypePref = null; float bestQuality = 0; float currentScore = 0; float bestLanguageScore = 0; float bestMediaTypeScore = 0; // For each available variant, we will compute the negotiation score // which is dependant on the language score and on the media type // score for (Iterator iter1 = variants.iterator(); iter1.hasNext();) { currentVariant = (Representation) iter1.next(); variantLanguage = currentVariant.getLanguage(); variantMediaType = currentVariant.getMediaType(); // If no language preference is defined, assume that all // languages are acceptable List<Preference<Language>> languagePrefs = client .getAcceptedLanguages(); if (languagePrefs.size() == 0) languagePrefs.add(new Preference<Language>(Language.ALL)); // For each language preference defined in the call // Calculate the score and remember the best scoring preference for (Iterator<Preference<Language>> iter2 = languagePrefs .iterator(); (variantLanguage != null) && iter2.hasNext();) { currentLanguagePref = iter2.next(); currentLanguage = currentLanguagePref.getMetadata(); compatiblePref = true; currentScore = 0; // 1) Compare the main tag if (variantLanguage.getPrimaryTag().equalsIgnoreCase( currentLanguage.getPrimaryTag())) { currentScore += 100; } else if (!currentLanguage.getPrimaryTag().equals("*")) { compatiblePref = false; } else if (!currentLanguage.getSubTags().isEmpty()) { // Only "*" is an acceptable language range compatiblePref = false; } else { // The valid "*" range has the lowest valid score currentScore++; } if (compatiblePref) { // 2) Compare the sub tags if ((currentLanguage.getSubTags().isEmpty()) || (variantLanguage.getSubTags().isEmpty())) { if (variantLanguage.getSubTags().isEmpty() && currentLanguage.getSubTags().isEmpty()) { currentScore += 10; } else { // Don't change the score } } else { int maxSize = Math.min(currentLanguage.getSubTags() .size(), variantLanguage.getSubTags() .size()); for (int i = 0; i < maxSize && compatiblePref; i++) { if (currentLanguage.getSubTags().get(i) .equalsIgnoreCase( variantLanguage.getSubTags() .get(i))) { currentScore += Math.pow(10, 1 - i); } else { // SubTags are different compatiblePref = false; } } } // 3) Do we have a better preference? // currentScore *= currentPref.getQuality(); if (compatiblePref && ((bestLanguagePref == null) || (currentScore > bestLanguageScore))) { bestLanguagePref = currentLanguagePref; bestLanguageScore = currentScore; } } } // Are the preferences compatible with the current variant // language? compatibleLanguage = (variantLanguage == null) || (bestLanguagePref != null); // If no media type preference is defined, assume that all media // types are acceptable List<Preference<MediaType>> mediaTypePrefs = client .getAcceptedMediaTypes(); if (mediaTypePrefs.size() == 0) mediaTypePrefs .add(new Preference<MediaType>(MediaType.ALL)); // For each media range preference defined in the call // Calculate the score and remember the best scoring preference for (Iterator<Preference<MediaType>> iter2 = mediaTypePrefs .iterator(); compatibleLanguage && iter2.hasNext();) { currentMediaTypePref = iter2.next(); currentMediaType = currentMediaTypePref.getMetadata(); compatiblePref = true; currentScore = 0; // 1) Compare the main types if (currentMediaType.getMainType().equals( variantMediaType.getMainType())) { currentScore += 1000; } else if (!currentMediaType.getMainType().equals("*")) { compatiblePref = false; } else if (!currentMediaType.getSubType().equals("*")) { // Ranges such as "*/html" are not supported // Only "*/*" is acceptable in this case compatiblePref = false; } if (compatiblePref) { // 2) Compare the sub types if (variantMediaType.getSubType().equals( currentMediaType.getSubType())) { currentScore += 100; } else if (!currentMediaType.getSubType().equals("*")) { // Subtype are different compatiblePref = false; } if (compatiblePref && (variantMediaType.getParameters() != null)) { // 3) Compare the parameters // If current media type is compatible with the // current // media range then the parameters need to be // checked too for (Iterator iter3 = variantMediaType .getParameters().iterator(); iter3 .hasNext();) { currentParam = (Parameter) iter3.next(); if (isParameterFound(currentParam, currentMediaType)) { currentScore++; } } } // 3) Do we have a better preference? // currentScore *= currentPref.getQuality(); if (compatiblePref && ((bestMediaTypePref == null) || (currentScore > bestMediaTypeScore))) { bestMediaTypePref = currentMediaTypePref; bestMediaTypeScore = currentScore; } } } // Are the preferences compatible with the current media type? compatibleMediaType = (variantMediaType == null) || (bestMediaTypePref != null); if (compatibleLanguage && compatibleMediaType) { // Do we have a compatible media type? float currentQuality = 0; if (bestLanguagePref != null) { currentQuality += (bestLanguagePref.getQuality() * 10F); } else if (variantLanguage != null) { currentQuality += 0.1F * 10F; } if (bestMediaTypePref != null) { // So, let's conclude on the current variant, its // quality currentQuality += bestMediaTypePref.getQuality(); } if (bestVariant == null) { bestVariant = currentVariant; bestQuality = currentQuality; } else if (currentQuality > bestQuality) { bestVariant = currentVariant; bestQuality = currentQuality; } } // Reset the preference variables bestLanguagePref = null; bestLanguageScore = 0; bestMediaTypePref = null; bestMediaTypeScore = 0; } return bestVariant; } }
public Representation getPreferredVariant(ClientInfo client, List<Representation> variants) { if (variants == null) { return null; } else { Parameter currentParam = null; Language currentLanguage = null; Language variantLanguage = null; MediaType currentMediaType = null; MediaType variantMediaType = null; boolean compatiblePref = false; boolean compatibleLanguage = false; boolean compatibleMediaType = false; Representation currentVariant = null; Representation bestVariant = null; Preference<Language> currentLanguagePref = null; Preference<Language> bestLanguagePref = null; Preference<MediaType> currentMediaTypePref = null; Preference<MediaType> bestMediaTypePref = null; float bestQuality = 0; float currentScore = 0; float bestLanguageScore = 0; float bestMediaTypeScore = 0; // For each available variant, we will compute the negotiation score // which is dependant on the language score and on the media type // score for (Iterator iter1 = variants.iterator(); iter1.hasNext();) { currentVariant = (Representation) iter1.next(); variantLanguage = currentVariant.getLanguage(); variantMediaType = currentVariant.getMediaType(); // If no language preference is defined, assume that all // languages are acceptable List<Preference<Language>> languagePrefs = client .getAcceptedLanguages(); if (languagePrefs.size() == 0) languagePrefs.add(new Preference<Language>(Language.ALL)); // For each language preference defined in the call // Calculate the score and remember the best scoring preference for (Iterator<Preference<Language>> iter2 = languagePrefs .iterator(); (variantLanguage != null) && iter2.hasNext();) { currentLanguagePref = iter2.next(); currentLanguage = currentLanguagePref.getMetadata(); compatiblePref = true; currentScore = 0; // 1) Compare the main tag if (variantLanguage.getPrimaryTag().equalsIgnoreCase( currentLanguage.getPrimaryTag())) { currentScore += 100; } else if (!currentLanguage.getPrimaryTag().equals("*")) { compatiblePref = false; } else if (!currentLanguage.getSubTags().isEmpty()) { // Only "*" is an acceptable language range compatiblePref = false; } else { // The valid "*" range has the lowest valid score currentScore++; } if (compatiblePref) { // 2) Compare the sub tags if ((currentLanguage.getSubTags().isEmpty()) || (variantLanguage.getSubTags().isEmpty())) { if (variantLanguage.getSubTags().isEmpty() && currentLanguage.getSubTags().isEmpty()) { currentScore += 10; } else { // Don't change the score } } else { int maxSize = Math.min(currentLanguage.getSubTags() .size(), variantLanguage.getSubTags() .size()); for (int i = 0; i < maxSize && compatiblePref; i++) { if (currentLanguage.getSubTags().get(i) .equalsIgnoreCase( variantLanguage.getSubTags() .get(i))) { // Each subtag contribution to the score is // getting less and less important currentScore += Math.pow(10, 1 - i); } else { // SubTags are different compatiblePref = false; } } } // 3) Do we have a better preference? // currentScore *= currentPref.getQuality(); if (compatiblePref && ((bestLanguagePref == null) || (currentScore > bestLanguageScore))) { bestLanguagePref = currentLanguagePref; bestLanguageScore = currentScore; } } } // Are the preferences compatible with the current variant // language? compatibleLanguage = (variantLanguage == null) || (bestLanguagePref != null); // If no media type preference is defined, assume that all media // types are acceptable List<Preference<MediaType>> mediaTypePrefs = client .getAcceptedMediaTypes(); if (mediaTypePrefs.size() == 0) mediaTypePrefs .add(new Preference<MediaType>(MediaType.ALL)); // For each media range preference defined in the call // Calculate the score and remember the best scoring preference for (Iterator<Preference<MediaType>> iter2 = mediaTypePrefs .iterator(); compatibleLanguage && iter2.hasNext();) { currentMediaTypePref = iter2.next(); currentMediaType = currentMediaTypePref.getMetadata(); compatiblePref = true; currentScore = 0; // 1) Compare the main types if (currentMediaType.getMainType().equals( variantMediaType.getMainType())) { currentScore += 1000; } else if (!currentMediaType.getMainType().equals("*")) { compatiblePref = false; } else if (!currentMediaType.getSubType().equals("*")) { // Ranges such as "*/html" are not supported // Only "*/*" is acceptable in this case compatiblePref = false; } if (compatiblePref) { // 2) Compare the sub types if (variantMediaType.getSubType().equals( currentMediaType.getSubType())) { currentScore += 100; } else if (!currentMediaType.getSubType().equals("*")) { // Subtype are different compatiblePref = false; } if (compatiblePref && (variantMediaType.getParameters() != null)) { // 3) Compare the parameters // If current media type is compatible with the // current // media range then the parameters need to be // checked too for (Iterator iter3 = variantMediaType .getParameters().iterator(); iter3 .hasNext();) { currentParam = (Parameter) iter3.next(); if (isParameterFound(currentParam, currentMediaType)) { currentScore++; } } } // 3) Do we have a better preference? // currentScore *= currentPref.getQuality(); if (compatiblePref && ((bestMediaTypePref == null) || (currentScore > bestMediaTypeScore))) { bestMediaTypePref = currentMediaTypePref; bestMediaTypeScore = currentScore; } } } // Are the preferences compatible with the current media type? compatibleMediaType = (variantMediaType == null) || (bestMediaTypePref != null); if (compatibleLanguage && compatibleMediaType) { // Do we have a compatible media type? float currentQuality = 0; if (bestLanguagePref != null) { currentQuality += (bestLanguagePref.getQuality() * 10F); } else if (variantLanguage != null) { currentQuality += 0.1F * 10F; } if (bestMediaTypePref != null) { // So, let's conclude on the current variant, its // quality currentQuality += bestMediaTypePref.getQuality(); } if (bestVariant == null) { bestVariant = currentVariant; bestQuality = currentQuality; } else if (currentQuality > bestQuality) { bestVariant = currentVariant; bestQuality = currentQuality; } } // Reset the preference variables bestLanguagePref = null; bestLanguageScore = 0; bestMediaTypePref = null; bestMediaTypeScore = 0; } return bestVariant; } }
diff --git a/intelligence/src/test/java/de/spektrumprojekt/informationextraction/extractors/JerichoTextCleanerTest.java b/intelligence/src/test/java/de/spektrumprojekt/informationextraction/extractors/JerichoTextCleanerTest.java index 530f43b..dc093f5 100644 --- a/intelligence/src/test/java/de/spektrumprojekt/informationextraction/extractors/JerichoTextCleanerTest.java +++ b/intelligence/src/test/java/de/spektrumprojekt/informationextraction/extractors/JerichoTextCleanerTest.java @@ -1,62 +1,62 @@ package de.spektrumprojekt.informationextraction.extractors; import static org.junit.Assert.assertEquals; import java.util.Date; import junit.framework.Assert; import org.junit.Test; import de.spektrumprojekt.datamodel.common.MimeType; import de.spektrumprojekt.datamodel.message.Message; import de.spektrumprojekt.datamodel.message.MessagePart; import de.spektrumprojekt.datamodel.message.MessageType; import de.spektrumprojekt.datamodel.subscription.status.StatusType; import de.spektrumprojekt.informationextraction.InformationExtractionContext; import de.spektrumprojekt.persistence.simple.PersistenceMock; public class JerichoTextCleanerTest { @Test public void testTextCleaner() { JerichoTextCleanerCommand textCleaner = new JerichoTextCleanerCommand(false); Message message = new Message(MessageType.CONTENT, StatusType.OK, new Date()); String text = "<!-- comment --><p>the <i>quick</i> brown fox jumps over the lazy dog.</p>"; MessagePart messagePart = new MessagePart(MimeType.TEXT_PLAIN, text); message.addMessagePart(messagePart); InformationExtractionContext context = new InformationExtractionContext( new PersistenceMock(), message, messagePart); textCleaner.process(context); - assertEquals("the quick brown fox jumps over the lazy dog.", context.getCleanText()); + assertEquals("the quick brown fox jumps over the lazy dog", context.getCleanText()); message = new Message(MessageType.CONTENT, StatusType.OK, new Date()); text = "<p>In <a href=\"http://arstechnica.com/apple/2012/07/the-server-simplified-a-power-users-guide-to-os-x-server/\">our review of OS X Server</a>, we found that Mountain Lion has a lot to offer home users or Mac-centric small businesses. Enterprise-level features, however, have fallen by the wayside. Luckily, some great first- and third-party tools exist to help close the gap between Apple's server product and more robust enterprise management systems from the likes of Microsoft and Dell.</p><p>Some of the products are free open-source programs, and some are strong, for-pay products intended for use with hundreds if not thousands of Macs. Whatever your needs are, this list of applications should point you in the right direction if you're looking to extend OS X Server's capabilities.</p><h2>Apple Remote Desktop</h2><div class=\"image center full-width\" style=\"width:631px\"><img src=\"http://cdn.arstechnica.net/wp-content/uploads/2012/08/ARD-UNIX-command.png\" width=\"631\" height=\"547\"><div class=\"caption\"><div class=\"caption-text\">Sending a Software Update UNIX command with Apple Remote Desktop.</div> </div></div><p>One of OS X Server's most glaring blind spots relative to Windows Server and Active Directory is software management. There's no way to install third-party applications on Macs that are already out in the field. And if you use a program like DeployStudio to install applications when you set up your Mac, it isn't much help to you once the Mac is off your desk and out in the field.</p><p><a href=\"http://arstechnica.com/information-technology/2012/08/filling-in-the-gaps-great-add-ons-for-os-x-server/\">Read 17 remaining paragraphs</a> | <a href=\"http://arstechnica.com/information-technology/2012/08/filling-in-the-gaps-great-add-ons-for-os-x-server/?comments=1#comments-bar\">Comments</a></p><p><a href=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/0/da\"><img src=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/0/di\" border=\"0\" ismap=\"true\"></img></a><br/><a href=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/1/da\"><img src=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/1/di\" border=\"0\" ismap=\"true\"></img></a></p><div class=\"feedflare\"><a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:V_sGLiPBpWU\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?i=gqo4KE45VGA:EF43jNJcvAM:V_sGLiPBpWU\" border=\"0\"></img></a> <a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:F7zBnMyn0Lo\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?i=gqo4KE45VGA:EF43jNJcvAM:F7zBnMyn0Lo\" border=\"0\"></img></a> <a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:qj6IDK7rITs\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?d=qj6IDK7rITs\" border=\"0\"></img></a> <a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:yIl2AUoC8zA\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?d=yIl2AUoC8zA\" border=\"0\"></img></a></div><img src=\"http://feeds.feedburner.com/~r/arstechnica/index/~4/gqo4KE45VGA\" height=\"1\" width=\"1\"/>"; messagePart = new MessagePart(MimeType.TEXT_PLAIN, text); message.addMessagePart(messagePart); context = new InformationExtractionContext(new PersistenceMock(), message, messagePart); textCleaner.process(context); - assertEquals("Extracted text is: " + context.getCleanText(), 1180, context.getCleanText() + assertEquals("Extracted text is: " + context.getCleanText(), 1171, context.getCleanText() .length()); message = new Message(MessageType.CONTENT, StatusType.OK, new Date()); text = "<p>Test1</p><p>Test2</p><ul><li>Test3</li><li>Test4</li><li>Test5 Test6&#160; Test7</li><ul><li>test17</li></ul><li>Test8</li></ul><ol><li>Test9</li><ol><li>Test10</li><li>Test11</li></ol><li><u>Test12</u></li><li><b>Test13</b></li><li>Test14</li><ol><li><i>Test15</i></li><li><a href=\"http://www.communote.com\" target=\"_blank\">Test16</a></li><ol><li><u><i><b>Test18</b></i></u>esdfzisdfzsdf</li></ol><li>#test19</li><li>#test20</li></ol></ol><p>&#8203;</p><p>&#8203;</p><p><br/><br/>test21 test22 test23</p><p>test24 test25 test26 test27 test28<br/>test29 test30</p><p>&#8203;</p>"; messagePart = new MessagePart(MimeType.TEXT_PLAIN, text); message.addMessagePart(messagePart); context = new InformationExtractionContext(new PersistenceMock(), message, messagePart); textCleaner.process(context); String lowerText = context.getCleanText().toLowerCase(); for (int i = 0; i < lowerText.length(); i++) { System.out.println("'" + lowerText.charAt(i) + "' = " + (double) lowerText.charAt(i) + " isWhiteSpace=" + Character.isWhitespace(lowerText.charAt(i))); } char ch = '\0'; System.out.println("'" + ch + "' = " + (int) ch); for (int i = 1; i <= 30; i++) { Assert.assertTrue("Text should contain test" + i, lowerText.contains("test" + i)); } } }
false
true
public void testTextCleaner() { JerichoTextCleanerCommand textCleaner = new JerichoTextCleanerCommand(false); Message message = new Message(MessageType.CONTENT, StatusType.OK, new Date()); String text = "<!-- comment --><p>the <i>quick</i> brown fox jumps over the lazy dog.</p>"; MessagePart messagePart = new MessagePart(MimeType.TEXT_PLAIN, text); message.addMessagePart(messagePart); InformationExtractionContext context = new InformationExtractionContext( new PersistenceMock(), message, messagePart); textCleaner.process(context); assertEquals("the quick brown fox jumps over the lazy dog.", context.getCleanText()); message = new Message(MessageType.CONTENT, StatusType.OK, new Date()); text = "<p>In <a href=\"http://arstechnica.com/apple/2012/07/the-server-simplified-a-power-users-guide-to-os-x-server/\">our review of OS X Server</a>, we found that Mountain Lion has a lot to offer home users or Mac-centric small businesses. Enterprise-level features, however, have fallen by the wayside. Luckily, some great first- and third-party tools exist to help close the gap between Apple's server product and more robust enterprise management systems from the likes of Microsoft and Dell.</p><p>Some of the products are free open-source programs, and some are strong, for-pay products intended for use with hundreds if not thousands of Macs. Whatever your needs are, this list of applications should point you in the right direction if you're looking to extend OS X Server's capabilities.</p><h2>Apple Remote Desktop</h2><div class=\"image center full-width\" style=\"width:631px\"><img src=\"http://cdn.arstechnica.net/wp-content/uploads/2012/08/ARD-UNIX-command.png\" width=\"631\" height=\"547\"><div class=\"caption\"><div class=\"caption-text\">Sending a Software Update UNIX command with Apple Remote Desktop.</div> </div></div><p>One of OS X Server's most glaring blind spots relative to Windows Server and Active Directory is software management. There's no way to install third-party applications on Macs that are already out in the field. And if you use a program like DeployStudio to install applications when you set up your Mac, it isn't much help to you once the Mac is off your desk and out in the field.</p><p><a href=\"http://arstechnica.com/information-technology/2012/08/filling-in-the-gaps-great-add-ons-for-os-x-server/\">Read 17 remaining paragraphs</a> | <a href=\"http://arstechnica.com/information-technology/2012/08/filling-in-the-gaps-great-add-ons-for-os-x-server/?comments=1#comments-bar\">Comments</a></p><p><a href=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/0/da\"><img src=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/0/di\" border=\"0\" ismap=\"true\"></img></a><br/><a href=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/1/da\"><img src=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/1/di\" border=\"0\" ismap=\"true\"></img></a></p><div class=\"feedflare\"><a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:V_sGLiPBpWU\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?i=gqo4KE45VGA:EF43jNJcvAM:V_sGLiPBpWU\" border=\"0\"></img></a> <a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:F7zBnMyn0Lo\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?i=gqo4KE45VGA:EF43jNJcvAM:F7zBnMyn0Lo\" border=\"0\"></img></a> <a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:qj6IDK7rITs\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?d=qj6IDK7rITs\" border=\"0\"></img></a> <a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:yIl2AUoC8zA\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?d=yIl2AUoC8zA\" border=\"0\"></img></a></div><img src=\"http://feeds.feedburner.com/~r/arstechnica/index/~4/gqo4KE45VGA\" height=\"1\" width=\"1\"/>"; messagePart = new MessagePart(MimeType.TEXT_PLAIN, text); message.addMessagePart(messagePart); context = new InformationExtractionContext(new PersistenceMock(), message, messagePart); textCleaner.process(context); assertEquals("Extracted text is: " + context.getCleanText(), 1180, context.getCleanText() .length()); message = new Message(MessageType.CONTENT, StatusType.OK, new Date()); text = "<p>Test1</p><p>Test2</p><ul><li>Test3</li><li>Test4</li><li>Test5 Test6&#160; Test7</li><ul><li>test17</li></ul><li>Test8</li></ul><ol><li>Test9</li><ol><li>Test10</li><li>Test11</li></ol><li><u>Test12</u></li><li><b>Test13</b></li><li>Test14</li><ol><li><i>Test15</i></li><li><a href=\"http://www.communote.com\" target=\"_blank\">Test16</a></li><ol><li><u><i><b>Test18</b></i></u>esdfzisdfzsdf</li></ol><li>#test19</li><li>#test20</li></ol></ol><p>&#8203;</p><p>&#8203;</p><p><br/><br/>test21 test22 test23</p><p>test24 test25 test26 test27 test28<br/>test29 test30</p><p>&#8203;</p>"; messagePart = new MessagePart(MimeType.TEXT_PLAIN, text); message.addMessagePart(messagePart); context = new InformationExtractionContext(new PersistenceMock(), message, messagePart); textCleaner.process(context); String lowerText = context.getCleanText().toLowerCase(); for (int i = 0; i < lowerText.length(); i++) { System.out.println("'" + lowerText.charAt(i) + "' = " + (double) lowerText.charAt(i) + " isWhiteSpace=" + Character.isWhitespace(lowerText.charAt(i))); } char ch = '\0'; System.out.println("'" + ch + "' = " + (int) ch); for (int i = 1; i <= 30; i++) { Assert.assertTrue("Text should contain test" + i, lowerText.contains("test" + i)); } }
public void testTextCleaner() { JerichoTextCleanerCommand textCleaner = new JerichoTextCleanerCommand(false); Message message = new Message(MessageType.CONTENT, StatusType.OK, new Date()); String text = "<!-- comment --><p>the <i>quick</i> brown fox jumps over the lazy dog.</p>"; MessagePart messagePart = new MessagePart(MimeType.TEXT_PLAIN, text); message.addMessagePart(messagePart); InformationExtractionContext context = new InformationExtractionContext( new PersistenceMock(), message, messagePart); textCleaner.process(context); assertEquals("the quick brown fox jumps over the lazy dog", context.getCleanText()); message = new Message(MessageType.CONTENT, StatusType.OK, new Date()); text = "<p>In <a href=\"http://arstechnica.com/apple/2012/07/the-server-simplified-a-power-users-guide-to-os-x-server/\">our review of OS X Server</a>, we found that Mountain Lion has a lot to offer home users or Mac-centric small businesses. Enterprise-level features, however, have fallen by the wayside. Luckily, some great first- and third-party tools exist to help close the gap between Apple's server product and more robust enterprise management systems from the likes of Microsoft and Dell.</p><p>Some of the products are free open-source programs, and some are strong, for-pay products intended for use with hundreds if not thousands of Macs. Whatever your needs are, this list of applications should point you in the right direction if you're looking to extend OS X Server's capabilities.</p><h2>Apple Remote Desktop</h2><div class=\"image center full-width\" style=\"width:631px\"><img src=\"http://cdn.arstechnica.net/wp-content/uploads/2012/08/ARD-UNIX-command.png\" width=\"631\" height=\"547\"><div class=\"caption\"><div class=\"caption-text\">Sending a Software Update UNIX command with Apple Remote Desktop.</div> </div></div><p>One of OS X Server's most glaring blind spots relative to Windows Server and Active Directory is software management. There's no way to install third-party applications on Macs that are already out in the field. And if you use a program like DeployStudio to install applications when you set up your Mac, it isn't much help to you once the Mac is off your desk and out in the field.</p><p><a href=\"http://arstechnica.com/information-technology/2012/08/filling-in-the-gaps-great-add-ons-for-os-x-server/\">Read 17 remaining paragraphs</a> | <a href=\"http://arstechnica.com/information-technology/2012/08/filling-in-the-gaps-great-add-ons-for-os-x-server/?comments=1#comments-bar\">Comments</a></p><p><a href=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/0/da\"><img src=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/0/di\" border=\"0\" ismap=\"true\"></img></a><br/><a href=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/1/da\"><img src=\"http://feedads.g.doubleclick.net/~at/kFKNQf0ovGoFHtbBLQkks4z89Q8/1/di\" border=\"0\" ismap=\"true\"></img></a></p><div class=\"feedflare\"><a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:V_sGLiPBpWU\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?i=gqo4KE45VGA:EF43jNJcvAM:V_sGLiPBpWU\" border=\"0\"></img></a> <a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:F7zBnMyn0Lo\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?i=gqo4KE45VGA:EF43jNJcvAM:F7zBnMyn0Lo\" border=\"0\"></img></a> <a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:qj6IDK7rITs\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?d=qj6IDK7rITs\" border=\"0\"></img></a> <a href=\"http://feeds.arstechnica.com/~ff/arstechnica/index?a=gqo4KE45VGA:EF43jNJcvAM:yIl2AUoC8zA\"><img src=\"http://feeds.feedburner.com/~ff/arstechnica/index?d=yIl2AUoC8zA\" border=\"0\"></img></a></div><img src=\"http://feeds.feedburner.com/~r/arstechnica/index/~4/gqo4KE45VGA\" height=\"1\" width=\"1\"/>"; messagePart = new MessagePart(MimeType.TEXT_PLAIN, text); message.addMessagePart(messagePart); context = new InformationExtractionContext(new PersistenceMock(), message, messagePart); textCleaner.process(context); assertEquals("Extracted text is: " + context.getCleanText(), 1171, context.getCleanText() .length()); message = new Message(MessageType.CONTENT, StatusType.OK, new Date()); text = "<p>Test1</p><p>Test2</p><ul><li>Test3</li><li>Test4</li><li>Test5 Test6&#160; Test7</li><ul><li>test17</li></ul><li>Test8</li></ul><ol><li>Test9</li><ol><li>Test10</li><li>Test11</li></ol><li><u>Test12</u></li><li><b>Test13</b></li><li>Test14</li><ol><li><i>Test15</i></li><li><a href=\"http://www.communote.com\" target=\"_blank\">Test16</a></li><ol><li><u><i><b>Test18</b></i></u>esdfzisdfzsdf</li></ol><li>#test19</li><li>#test20</li></ol></ol><p>&#8203;</p><p>&#8203;</p><p><br/><br/>test21 test22 test23</p><p>test24 test25 test26 test27 test28<br/>test29 test30</p><p>&#8203;</p>"; messagePart = new MessagePart(MimeType.TEXT_PLAIN, text); message.addMessagePart(messagePart); context = new InformationExtractionContext(new PersistenceMock(), message, messagePart); textCleaner.process(context); String lowerText = context.getCleanText().toLowerCase(); for (int i = 0; i < lowerText.length(); i++) { System.out.println("'" + lowerText.charAt(i) + "' = " + (double) lowerText.charAt(i) + " isWhiteSpace=" + Character.isWhitespace(lowerText.charAt(i))); } char ch = '\0'; System.out.println("'" + ch + "' = " + (int) ch); for (int i = 1; i <= 30; i++) { Assert.assertTrue("Text should contain test" + i, lowerText.contains("test" + i)); } }
diff --git a/src/main/battlecode/world/Radio.java b/src/main/battlecode/world/Radio.java index aef3d474..ea29b5e9 100644 --- a/src/main/battlecode/world/Radio.java +++ b/src/main/battlecode/world/Radio.java @@ -1,49 +1,49 @@ package battlecode.world; import battlecode.common.BroadcastController; import battlecode.common.ComponentType; import battlecode.common.GameActionException; import battlecode.common.Message; import battlecode.world.signal.BroadcastSignal; import battlecode.world.signal.TurnOnSignal; import com.google.common.primitives.Ints; import java.util.Arrays; import java.util.ArrayList; public class Radio extends BaseComponent implements BroadcastController { public Radio(ComponentType type, InternalRobot robot) { super(type,robot); } public void broadcast(Message m) throws GameActionException { assertInactive(); assertNotNull(m); activate(new BroadcastSignal(robot,type().range,m)); } public void broadcastTurnOn(int [] ids) throws GameActionException { assertInactive(); assertNotNull(ids); int [] turnOnIDs = new int [ ids.length ]; int i, j; for(i = 0, j = 0; i<ids.length; i++) { - InternalRobot ir = gameWorld.getRobotByID(i); + InternalRobot ir = gameWorld.getRobotByID(ids[i]); if(ir!=null&&ir.getTeam()==robot.getTeam()&&checkWithinRange(ir.getLocation())) turnOnIDs[j++] = ir.getID(); } gameWorld.visitSignal(new TurnOnSignal(Arrays.copyOf(turnOnIDs,j),robot,true)); } public void broadcastTurnOnAll() throws GameActionException { assertInactive(); ArrayList<Integer> ids = new ArrayList<Integer>(); for(InternalObject o : gameWorld.allObjects()) { if(o.getTeam()==robot.getTeam()&&checkWithinRange(o)) ids.add(o.getID()); } gameWorld.visitSignal(new TurnOnSignal(Ints.toArray(ids),robot,true)); } }
true
true
public void broadcastTurnOn(int [] ids) throws GameActionException { assertInactive(); assertNotNull(ids); int [] turnOnIDs = new int [ ids.length ]; int i, j; for(i = 0, j = 0; i<ids.length; i++) { InternalRobot ir = gameWorld.getRobotByID(i); if(ir!=null&&ir.getTeam()==robot.getTeam()&&checkWithinRange(ir.getLocation())) turnOnIDs[j++] = ir.getID(); } gameWorld.visitSignal(new TurnOnSignal(Arrays.copyOf(turnOnIDs,j),robot,true)); }
public void broadcastTurnOn(int [] ids) throws GameActionException { assertInactive(); assertNotNull(ids); int [] turnOnIDs = new int [ ids.length ]; int i, j; for(i = 0, j = 0; i<ids.length; i++) { InternalRobot ir = gameWorld.getRobotByID(ids[i]); if(ir!=null&&ir.getTeam()==robot.getTeam()&&checkWithinRange(ir.getLocation())) turnOnIDs[j++] = ir.getID(); } gameWorld.visitSignal(new TurnOnSignal(Arrays.copyOf(turnOnIDs,j),robot,true)); }
diff --git a/gerrit-httpd/src/main/java/com/google/gerrit/httpd/rpc/ChangeListServiceImpl.java b/gerrit-httpd/src/main/java/com/google/gerrit/httpd/rpc/ChangeListServiceImpl.java index d26e6b850..d71a7811c 100644 --- a/gerrit-httpd/src/main/java/com/google/gerrit/httpd/rpc/ChangeListServiceImpl.java +++ b/gerrit-httpd/src/main/java/com/google/gerrit/httpd/rpc/ChangeListServiceImpl.java @@ -1,616 +1,616 @@ // Copyright (C) 2008 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.gerrit.httpd.rpc; import static com.google.gerrit.reviewdb.AccountExternalId.SCHEME_USERNAME; import com.google.gerrit.common.data.AccountDashboardInfo; import com.google.gerrit.common.data.ChangeInfo; import com.google.gerrit.common.data.ChangeListService; import com.google.gerrit.common.data.SingleListChangeInfo; import com.google.gerrit.common.data.ToggleStarRequest; import com.google.gerrit.common.errors.NoSuchEntityException; import com.google.gerrit.reviewdb.Account; import com.google.gerrit.reviewdb.AccountExternalId; import com.google.gerrit.reviewdb.Change; import com.google.gerrit.reviewdb.ChangeAccess; import com.google.gerrit.reviewdb.PatchLineComment; import com.google.gerrit.reviewdb.PatchSet; import com.google.gerrit.reviewdb.PatchSetApproval; import com.google.gerrit.reviewdb.Project; import com.google.gerrit.reviewdb.RevId; import com.google.gerrit.reviewdb.ReviewDb; import com.google.gerrit.reviewdb.StarredChange; import com.google.gerrit.reviewdb.TrackingId; import com.google.gerrit.server.CurrentUser; import com.google.gerrit.server.account.AccountInfoCacheFactory; import com.google.gerrit.server.project.ChangeControl; import com.google.gerrit.server.project.NoSuchChangeException; import com.google.gwt.user.client.rpc.AsyncCallback; import com.google.gwtjsonrpc.client.VoidResult; import com.google.gwtorm.client.OrmException; import com.google.gwtorm.client.ResultSet; import com.google.gwtorm.client.impl.ListResultSet; import com.google.inject.Inject; import com.google.inject.Provider; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Set; public class ChangeListServiceImpl extends BaseServiceImplementation implements ChangeListService { private static final Comparator<ChangeInfo> ID_COMP = new Comparator<ChangeInfo>() { public int compare(final ChangeInfo o1, final ChangeInfo o2) { return o1.getId().get() - o2.getId().get(); } }; private static final Comparator<ChangeInfo> SORT_KEY_COMP = new Comparator<ChangeInfo>() { public int compare(final ChangeInfo o1, final ChangeInfo o2) { return o2.getSortKey().compareTo(o1.getSortKey()); } }; private static final Comparator<Change> QUERY_PREV = new Comparator<Change>() { public int compare(final Change a, final Change b) { return a.getSortKey().compareTo(b.getSortKey()); } }; private static final Comparator<Change> QUERY_NEXT = new Comparator<Change>() { public int compare(final Change a, final Change b) { return b.getSortKey().compareTo(a.getSortKey()); } }; private static final int MAX_PER_PAGE = 100; private static int safePageSize(final int pageSize) { return 0 < pageSize && pageSize <= MAX_PER_PAGE ? pageSize : MAX_PER_PAGE; } private final Provider<CurrentUser> currentUser; private final ChangeControl.Factory changeControlFactory; private final AccountInfoCacheFactory.Factory accountInfoCacheFactory; @Inject ChangeListServiceImpl(final Provider<ReviewDb> schema, final Provider<CurrentUser> currentUser, final ChangeControl.Factory changeControlFactory, final AccountInfoCacheFactory.Factory accountInfoCacheFactory) { super(schema, currentUser); this.currentUser = currentUser; this.changeControlFactory = changeControlFactory; this.accountInfoCacheFactory = accountInfoCacheFactory; } private boolean canRead(final Change c) { try { return changeControlFactory.controlFor(c).isVisible(); } catch (NoSuchChangeException e) { return false; } } public void allOpenPrev(final String pos, final int pageSize, final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new QueryPrev(pageSize, pos) { @Override ResultSet<Change> query(ReviewDb db, int slim, String sortKey) throws OrmException { return db.changes().allOpenPrev(sortKey, slim); } }); } public void allOpenNext(final String pos, final int pageSize, final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new QueryNext(pageSize, pos) { @Override ResultSet<Change> query(ReviewDb db, int slim, String sortKey) throws OrmException { return db.changes().allOpenNext(sortKey, slim); } }); } public void byProjectOpenPrev(final Project.NameKey project, final String pos, final int pageSize, final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new QueryPrev(pageSize, pos) { @Override ResultSet<Change> query(ReviewDb db, int slim, String sortKey) throws OrmException { return db.changes().byProjectOpenPrev(project, sortKey, slim); } }); } public void byProjectOpenNext(final Project.NameKey project, final String pos, final int pageSize, final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new QueryNext(pageSize, pos) { @Override ResultSet<Change> query(ReviewDb db, int slim, String sortKey) throws OrmException { return db.changes().byProjectOpenNext(project, sortKey, slim); } }); } public void byProjectClosedPrev(final Project.NameKey project, final Change.Status s, final String pos, final int pageSize, final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new QueryPrev(pageSize, pos) { @Override ResultSet<Change> query(ReviewDb db, int slim, String sortKey) throws OrmException { return db.changes().byProjectClosedPrev(s.getCode(), project, sortKey, slim); } }); } public void byProjectClosedNext(final Project.NameKey project, final Change.Status s, final String pos, final int pageSize, final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new QueryNext(pageSize, pos) { @Override ResultSet<Change> query(ReviewDb db, int slim, String sortKey) throws OrmException { return db.changes().byProjectClosedNext(s.getCode(), project, sortKey, slim); } }); } public void allClosedPrev(final Change.Status s, final String pos, final int pageSize, final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new QueryPrev(pageSize, pos) { @Override ResultSet<Change> query(ReviewDb db, int lim, String key) throws OrmException { return db.changes().allClosedPrev(s.getCode(), key, lim); } }); } public void allClosedNext(final Change.Status s, final String pos, final int pageSize, final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new QueryNext(pageSize, pos) { @Override ResultSet<Change> query(ReviewDb db, int lim, String key) throws OrmException { return db.changes().allClosedNext(s.getCode(), key, lim); } }); } @Override public void allQueryPrev(final String query, final String pos, final int pageSize, final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new QueryPrev(pageSize, pos) { @Override ResultSet<Change> query(ReviewDb db, int lim, String key) throws OrmException { return searchQuery(db, query, lim, key, QUERY_PREV); } }); } @Override public void allQueryNext(final String query, final String pos, final int pageSize, final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new QueryNext(pageSize, pos) { @Override ResultSet<Change> query(ReviewDb db, int lim, String key) throws OrmException { return searchQuery(db, query, lim, key, QUERY_NEXT); } }); } private ResultSet<Change> searchQuery(final ReviewDb db, String query, final int limit, final String key, final Comparator<Change> cmp) throws OrmException { List<Change> result = new ArrayList<Change>(); final HashSet<Change.Id> want = new HashSet<Change.Id>(); query = query.trim(); if (query.matches("^[1-9][0-9]*$")) { want.add(Change.Id.parse(query)); } else if (query.matches("^[iI][0-9a-f]{4,}.*$")) { if (query.startsWith("i")) { query = "I" + query.substring(1); } final Change.Key a = new Change.Key(query); final Change.Key b = a.max(); filterBySortKey(result, db.changes().byKeyRange(a, b), cmp, key); Collections.sort(result, cmp); if (limit < result.size()) { result = result.subList(0, limit); } } else if (query.matches("^([0-9a-fA-F]{4," + RevId.LEN + "})$")) { final RevId id = new RevId(query); final ResultSet<PatchSet> patches; if (id.isComplete()) { patches = db.patchSets().byRevision(id); } else { patches = db.patchSets().byRevisionRange(id, id.max()); } for (PatchSet p : patches) { want.add(p.getId().getParentKey()); } } else if (query.contains("owner:")) { String[] parsedQuery = query.split(":"); if (parsedQuery.length > 1) { filterBySortKey(result, changesCreatedBy(db, parsedQuery[1]), cmp, key); } } else if (query.contains("reviewer:")) { String[] parsedQuery = query.split(":"); if (parsedQuery.length > 1) { want.addAll(changesReviewedBy(db, parsedQuery[1])); } - } else if (query.contains("tr:")) { + } else if (query.contains("bug:") || query.contains("tr:")) { String[] parsedQuery = query.split(":"); if (parsedQuery.length > 1) { want.addAll(changesReferencingTr(db, parsedQuery[1])); } } if (result.isEmpty() && want.isEmpty()) { return new ListResultSet<Change>(Collections.<Change> emptyList()); } filterBySortKey(result, db.changes().get(want), cmp, key); Collections.sort(result, cmp); if (limit < result.size()) { result = result.subList(0, limit); } return new ListResultSet<Change>(result); } private static void filterBySortKey(final List<Change> dst, final Iterable<Change> src, final Comparator<Change> cmp, final String key) { if (cmp == QUERY_PREV) { for (Change c : src) { if (c.getSortKey().compareTo(key) > 0) { dst.add(c); } } } else /* cmp == QUERY_NEXT */{ for (Change c : src) { if (c.getSortKey().compareTo(key) < 0) { dst.add(c); } } } } public void forAccount(final Account.Id id, final AsyncCallback<AccountDashboardInfo> callback) { final Account.Id me = getAccountId(); final Account.Id target = id != null ? id : me; if (target == null) { callback.onFailure(new NoSuchEntityException()); return; } run(callback, new Action<AccountDashboardInfo>() { public AccountDashboardInfo run(final ReviewDb db) throws OrmException, Failure { final AccountInfoCacheFactory ac = accountInfoCacheFactory.create(); final Account user = ac.get(target); if (user == null) { throw new Failure(new NoSuchEntityException()); } final Set<Change.Id> stars = currentUser.get().getStarredChanges(); final ChangeAccess changes = db.changes(); final AccountDashboardInfo d; final Set<Change.Id> openReviews = new HashSet<Change.Id>(); final Set<Change.Id> closedReviews = new HashSet<Change.Id>(); for (final PatchSetApproval ca : db.patchSetApprovals().openByUser(id)) { openReviews.add(ca.getPatchSetId().getParentKey()); } for (final PatchSetApproval ca : db.patchSetApprovals() .closedByUser(id)) { closedReviews.add(ca.getPatchSetId().getParentKey()); } d = new AccountDashboardInfo(target); d.setByOwner(filter(changes.byOwnerOpen(target), stars, ac)); d.setClosed(filter(changes.byOwnerClosed(target), stars, ac)); for (final ChangeInfo c : d.getByOwner()) { openReviews.remove(c.getId()); } d.setForReview(filter(changes.get(openReviews), stars, ac)); Collections.sort(d.getForReview(), ID_COMP); for (final ChangeInfo c : d.getClosed()) { closedReviews.remove(c.getId()); } if (!closedReviews.isEmpty()) { d.getClosed().addAll(filter(changes.get(closedReviews), stars, ac)); Collections.sort(d.getClosed(), SORT_KEY_COMP); } d.setAccounts(ac.create()); return d; } }); } public void myStarredChanges( final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new Action<SingleListChangeInfo>() { public SingleListChangeInfo run(final ReviewDb db) throws OrmException { final AccountInfoCacheFactory ac = accountInfoCacheFactory.create(); final SingleListChangeInfo d = new SingleListChangeInfo(); final Set<Change.Id> starred = currentUser.get().getStarredChanges(); d.setChanges(filter(db.changes().get(starred), starred, ac)); Collections.sort(d.getChanges(), new Comparator<ChangeInfo>() { public int compare(final ChangeInfo o1, final ChangeInfo o2) { return o1.getLastUpdatedOn().compareTo(o2.getLastUpdatedOn()); } }); d.setAccounts(ac.create()); return d; } }); } public void myDraftChanges(final AsyncCallback<SingleListChangeInfo> callback) { run(callback, new Action<SingleListChangeInfo>() { public SingleListChangeInfo run(final ReviewDb db) throws OrmException { final Account.Id me = getAccountId(); final AccountInfoCacheFactory ac = accountInfoCacheFactory.create(); final SingleListChangeInfo d = new SingleListChangeInfo(); final Set<Change.Id> starred = currentUser.get().getStarredChanges(); final Set<Change.Id> drafted = draftedBy(db, me); d.setChanges(filter(db.changes().get(drafted), starred, ac)); Collections.sort(d.getChanges(), new Comparator<ChangeInfo>() { public int compare(final ChangeInfo o1, final ChangeInfo o2) { return o1.getLastUpdatedOn().compareTo(o2.getLastUpdatedOn()); } }); d.setAccounts(ac.create()); return d; } }); } public void toggleStars(final ToggleStarRequest req, final AsyncCallback<VoidResult> callback) { run(callback, new Action<VoidResult>() { public VoidResult run(final ReviewDb db) throws OrmException { final Account.Id me = getAccountId(); final Set<Change.Id> existing = currentUser.get().getStarredChanges(); List<StarredChange> add = new ArrayList<StarredChange>(); List<StarredChange.Key> remove = new ArrayList<StarredChange.Key>(); if (req.getAddSet() != null) { for (final Change.Id id : req.getAddSet()) { if (!existing.contains(id)) { add.add(new StarredChange(new StarredChange.Key(me, id))); } } } if (req.getRemoveSet() != null) { for (final Change.Id id : req.getRemoveSet()) { remove.add(new StarredChange.Key(me, id)); } } db.starredChanges().insert(add); db.starredChanges().deleteKeys(remove); return VoidResult.INSTANCE; } }); } public void myStarredChangeIds(final AsyncCallback<Set<Change.Id>> callback) { callback.onSuccess(currentUser.get().getStarredChanges()); } private List<ChangeInfo> filter(final ResultSet<Change> rs, final Set<Change.Id> starred, final AccountInfoCacheFactory accts) { final ArrayList<ChangeInfo> r = new ArrayList<ChangeInfo>(); for (final Change c : rs) { if (canRead(c)) { final ChangeInfo ci = new ChangeInfo(c); accts.want(ci.getOwner()); ci.setStarred(starred.contains(ci.getId())); r.add(ci); } } return r; } private static Set<Change.Id> draftedBy(final ReviewDb db, final Account.Id me) throws OrmException { final Set<Change.Id> existing = new HashSet<Change.Id>(); if (me != null) { for (final PatchLineComment sc : db.patchComments().draftByAuthor(me)) { final Change.Id c = sc.getKey().getParentKey().getParentKey().getParentKey(); existing.add(c); } } return existing; } /** * @return a set of all the account ID's matching the given user name in * either of the following columns: ssh name, email address, full name */ private static Set<Account.Id> getAccountSources(final ReviewDb db, final String userName) throws OrmException { Set<Account.Id> result = new HashSet<Account.Id>(); String a = userName; String b = userName + "\u9fa5"; addAll(result, db.accounts().suggestByFullName(a, b, 10)); for (AccountExternalId extId : db.accountExternalIds().suggestByKey( new AccountExternalId.Key(SCHEME_USERNAME, a), new AccountExternalId.Key(SCHEME_USERNAME, b), 10)) { result.add(extId.getAccountId()); } for (AccountExternalId extId : db.accountExternalIds() .suggestByEmailAddress(a, b, 10)) { result.add(extId.getAccountId()); } return result; } private static void addAll(Set<Account.Id> result, ResultSet<Account> rs) { for (Account account : rs) { result.add(account.getId()); } } /** * @return a set of all the changes created by userName. This method tries to * find userName in 1) the ssh user names, 2) the full names and 3) * the email addresses. The returned changes are unique and sorted by * time stamp, newer first. */ private List<Change> changesCreatedBy(final ReviewDb db, final String userName) throws OrmException { final List<Change> resultChanges = new ArrayList<Change>(); for (Account.Id account : getAccountSources(db, userName)) { for (Change change : db.changes().byOwnerOpen(account)) { resultChanges.add(change); } for (Change change : db.changes().byOwnerClosedAll(account)) { resultChanges.add(change); } } return resultChanges; } /** * @return a set of all the changes reviewed by userName. This method tries to * find userName in 1) the ssh user names, 2) the full names and the * email addresses. The returned changes are unique and sorted by time * stamp, newer first. */ private Set<Change.Id> changesReviewedBy(final ReviewDb db, final String userName) throws OrmException { final Set<Change.Id> resultChanges = new HashSet<Change.Id>(); for (Account.Id account : getAccountSources(db, userName)) { for (PatchSetApproval a : db.patchSetApprovals().openByUser(account)) { resultChanges.add(a.getPatchSetId().getParentKey()); } for (PatchSetApproval a : db.patchSetApprovals().closedByUserAll(account)) { resultChanges.add(a.getPatchSetId().getParentKey()); } } return resultChanges; } /** * @return a set of all the changes referencing tracking id. This method find * all changes with a reference to the given external tracking id. * The returned changes are unique and sorted by time stamp, newer first. */ private Set<Change.Id> changesReferencingTr(final ReviewDb db, final String trackingId) throws OrmException { final Set<Change.Id> resultChanges = new HashSet<Change.Id>(); for (final TrackingId tr : db.trackingIds().byTrackingId( new TrackingId.Id(trackingId))) { resultChanges.add(tr.getChangeId()); } return resultChanges; } private abstract class QueryNext implements Action<SingleListChangeInfo> { protected final String pos; protected final int limit; protected final int slim; QueryNext(final int pageSize, final String pos) { this.pos = pos; this.limit = safePageSize(pageSize); this.slim = limit + 1; } public SingleListChangeInfo run(final ReviewDb db) throws OrmException { final AccountInfoCacheFactory ac = accountInfoCacheFactory.create(); final SingleListChangeInfo d = new SingleListChangeInfo(); final Set<Change.Id> starred = currentUser.get().getStarredChanges(); boolean results = true; String sortKey = pos; final ArrayList<ChangeInfo> list = new ArrayList<ChangeInfo>(); while (results && list.size() < slim) { results = false; final ResultSet<Change> rs = query(db, slim, sortKey); for (final Change c : rs) { results = true; if (canRead(c)) { final ChangeInfo ci = new ChangeInfo(c); ac.want(ci.getOwner()); ci.setStarred(starred.contains(ci.getId())); list.add(ci); if (list.size() == slim) { rs.close(); break; } } sortKey = c.getSortKey(); } } final boolean atEnd = finish(list); d.setChanges(list, atEnd); d.setAccounts(ac.create()); return d; } boolean finish(final ArrayList<ChangeInfo> list) { final boolean atEnd = list.size() <= limit; if (list.size() == slim) { list.remove(limit); } return atEnd; } abstract ResultSet<Change> query(final ReviewDb db, final int slim, String sortKey) throws OrmException; } private abstract class QueryPrev extends QueryNext { QueryPrev(int pageSize, String pos) { super(pageSize, pos); } @Override boolean finish(final ArrayList<ChangeInfo> list) { final boolean atEnd = super.finish(list); Collections.reverse(list); return atEnd; } } }
true
true
private ResultSet<Change> searchQuery(final ReviewDb db, String query, final int limit, final String key, final Comparator<Change> cmp) throws OrmException { List<Change> result = new ArrayList<Change>(); final HashSet<Change.Id> want = new HashSet<Change.Id>(); query = query.trim(); if (query.matches("^[1-9][0-9]*$")) { want.add(Change.Id.parse(query)); } else if (query.matches("^[iI][0-9a-f]{4,}.*$")) { if (query.startsWith("i")) { query = "I" + query.substring(1); } final Change.Key a = new Change.Key(query); final Change.Key b = a.max(); filterBySortKey(result, db.changes().byKeyRange(a, b), cmp, key); Collections.sort(result, cmp); if (limit < result.size()) { result = result.subList(0, limit); } } else if (query.matches("^([0-9a-fA-F]{4," + RevId.LEN + "})$")) { final RevId id = new RevId(query); final ResultSet<PatchSet> patches; if (id.isComplete()) { patches = db.patchSets().byRevision(id); } else { patches = db.patchSets().byRevisionRange(id, id.max()); } for (PatchSet p : patches) { want.add(p.getId().getParentKey()); } } else if (query.contains("owner:")) { String[] parsedQuery = query.split(":"); if (parsedQuery.length > 1) { filterBySortKey(result, changesCreatedBy(db, parsedQuery[1]), cmp, key); } } else if (query.contains("reviewer:")) { String[] parsedQuery = query.split(":"); if (parsedQuery.length > 1) { want.addAll(changesReviewedBy(db, parsedQuery[1])); } } else if (query.contains("tr:")) { String[] parsedQuery = query.split(":"); if (parsedQuery.length > 1) { want.addAll(changesReferencingTr(db, parsedQuery[1])); } } if (result.isEmpty() && want.isEmpty()) { return new ListResultSet<Change>(Collections.<Change> emptyList()); } filterBySortKey(result, db.changes().get(want), cmp, key); Collections.sort(result, cmp); if (limit < result.size()) { result = result.subList(0, limit); } return new ListResultSet<Change>(result); }
private ResultSet<Change> searchQuery(final ReviewDb db, String query, final int limit, final String key, final Comparator<Change> cmp) throws OrmException { List<Change> result = new ArrayList<Change>(); final HashSet<Change.Id> want = new HashSet<Change.Id>(); query = query.trim(); if (query.matches("^[1-9][0-9]*$")) { want.add(Change.Id.parse(query)); } else if (query.matches("^[iI][0-9a-f]{4,}.*$")) { if (query.startsWith("i")) { query = "I" + query.substring(1); } final Change.Key a = new Change.Key(query); final Change.Key b = a.max(); filterBySortKey(result, db.changes().byKeyRange(a, b), cmp, key); Collections.sort(result, cmp); if (limit < result.size()) { result = result.subList(0, limit); } } else if (query.matches("^([0-9a-fA-F]{4," + RevId.LEN + "})$")) { final RevId id = new RevId(query); final ResultSet<PatchSet> patches; if (id.isComplete()) { patches = db.patchSets().byRevision(id); } else { patches = db.patchSets().byRevisionRange(id, id.max()); } for (PatchSet p : patches) { want.add(p.getId().getParentKey()); } } else if (query.contains("owner:")) { String[] parsedQuery = query.split(":"); if (parsedQuery.length > 1) { filterBySortKey(result, changesCreatedBy(db, parsedQuery[1]), cmp, key); } } else if (query.contains("reviewer:")) { String[] parsedQuery = query.split(":"); if (parsedQuery.length > 1) { want.addAll(changesReviewedBy(db, parsedQuery[1])); } } else if (query.contains("bug:") || query.contains("tr:")) { String[] parsedQuery = query.split(":"); if (parsedQuery.length > 1) { want.addAll(changesReferencingTr(db, parsedQuery[1])); } } if (result.isEmpty() && want.isEmpty()) { return new ListResultSet<Change>(Collections.<Change> emptyList()); } filterBySortKey(result, db.changes().get(want), cmp, key); Collections.sort(result, cmp); if (limit < result.size()) { result = result.subList(0, limit); } return new ListResultSet<Change>(result); }
diff --git a/Servers/JavaServer/src/dbs/search/qb/QueryBuilder.java b/Servers/JavaServer/src/dbs/search/qb/QueryBuilder.java index 0fb8dea7..8f2d749e 100644 --- a/Servers/JavaServer/src/dbs/search/qb/QueryBuilder.java +++ b/Servers/JavaServer/src/dbs/search/qb/QueryBuilder.java @@ -1,1456 +1,1461 @@ package dbs.search.qb; import java.util.StringTokenizer; import java.util.ArrayList; import java.util.Vector; import java.util.List; import java.util.Iterator; import java.util.Set; import dbs.api.DBSApiDQLogic; import dbs.search.parser.Constraint; import dbs.search.graph.GraphUtil; import dbs.sql.DBSSql; import dbs.util.Validate; import edu.uci.ics.jung.graph.Vertex; import edu.uci.ics.jung.graph.Edge; public class QueryBuilder { int MAX_ITERATION = 999; KeyMap km; //RelationMap rm = new RelationMap(); private boolean upper; private ArrayList bindValues; private ArrayList bindIntValues; GraphUtil u = null; private String db = ""; private String countQuery = ""; public QueryBuilder(String db) { this.db = db; bindValues = new ArrayList(); bindIntValues = new ArrayList(); km = new KeyMap(); //u = GraphUtil.getInstance("/home/sekhri/DBS/Servers/JavaServer/etc/DBSSchemaGraph.xml"); u = GraphUtil.getInstance("WEB-INF/DBSSchemaGraph.xml"); upper = true; } private String owner() throws Exception { return DBSSql.owner(); } public String getCountQuery() { return countQuery; } public ArrayList getBindValues() { return bindValues; } public ArrayList getBindIntValues() { return bindIntValues; } public String genQuery(ArrayList kws, ArrayList cs, ArrayList okws) throws Exception{ return genQuery(kws, cs, okws, "", "", "", true); } private void checkMax(int iter) throws Exception { if(iter > MAX_ITERATION) throw new Exception("Unexpected query. Could not process this query"); } private void fixConstForLike(ArrayList cs) throws Exception { int iter = 0; for (int i =0 ; i!= cs.size(); ++i) { ++iter; checkMax(iter); Object obj = cs.get(i); if(i%2 == 0) { Constraint co = (Constraint)obj; String key = (String)co.getKey(); String op = (String)co.getOp(); String val = (String)co.getValue(); if(isNotLike(op)) co.setOp("not like"); if((val.indexOf('%') != -1 ) || (val.indexOf('*') != -1) ) { if(Util.isSame(op, "in")) throw new Exception("Operator in CANNOT be used with values that have * or % in them"); //System.out.println("Fixing conts"); if(Util.isSame(op, "!=") || isNotLike(op)) co.setOp("not like"); else co.setOp("like"); } } } } private boolean isNotLike(String token) { if(token == null) return false; token = token.toLowerCase(); if(token.startsWith("not") && (token.endsWith("like"))) return true; return false; } //public String genQuery(ArrayList kws, ArrayList cs, String begin, String end) throws Exception{ public String genQuery(ArrayList kws, ArrayList cs, ArrayList okws, String orderingkw, String begin, String end, boolean upper) throws Exception{ this.upper = upper; //Store all the keywors both from select and where in allKws fixConstForLike(cs); String personJoinQuery = ""; String parentJoinQuery = ""; String childJoinQuery = ""; String pathParentWhereQuery = ""; String groupByQuery = ""; String sumGroupByQuery = ""; String sumQuery = ""; String selectStr = "SELECT "; boolean invalidFile = false; boolean modByAdded = false; boolean createByAdded = false; boolean fileParentAdded = false; boolean fileChildAdded = false; boolean datasetParentAdded = false; boolean procDsParentAdded = false; boolean iLumi = isInList(kws, "ilumi"); boolean countPresent = false; boolean sumPresent = false; int iter = 0; ArrayList allKws = new ArrayList(); if(isInList(kws, "file") || isInList(kws, "file.status")) { invalidFile = true; allKws = addUniqueInList(allKws, "FileStatus"); } for (int i =0 ; i!= kws.size(); ++i) { ++iter; checkMax(iter); String aKw = (String)kws.get(i); if(aKw.toLowerCase().startsWith("count") || aKw.toLowerCase().endsWith("count")) countPresent = true; if(aKw.toLowerCase().startsWith("sum")) sumPresent = true; } if(sumPresent || countPresent) sumQuery += selectStr; String query = "SELECT DISTINCT \n\t"; + // If requested CLOB data, such as QueryableParameterSet.Content + // we should not either converted it to string data type + if (isInList(kws, "config.content") ) { + query = "SELECT \n\t"; + } for (int i =0 ; i!= kws.size(); ++i) { ++iter; checkMax(iter); String aKw = (String)kws.get(i); if (i!=0) query += "\n\t,"; //If path supplied in select then always use block path. If supplied in where then user procDS ID if(Util.isSame(aKw, "ilumi")) { query += getIntLumiSelectQuery(); //System.out.println("line 2.1.1"); } else if(aKw.toLowerCase().startsWith("sum")) { checkMax(iter); aKw = aKw.toLowerCase(); String keyword = aKw.substring(aKw.indexOf("(") + 1, aKw.indexOf(")")); keyword = keyword.trim(); String asKeyword = keyword.replace('.', '_'); String entity = (new StringTokenizer(keyword, ".")).nextToken(); //System.out.println("entity " + entity); String realName = u.getMappedRealName(entity); allKws = addUniqueInList(allKws, realName); //if(!sumQuery.startsWith("SELECT")) sumQuery += " SELECT "; if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += "SUM(" + asKeyword + ") AS SUM_" + asKeyword + " "; //query += "SUM(" + km.getMappedValue(keyword, true) + ") AS SUM_" + keyword.replace('.', '_') ; String tmpKw = km.getMappedValue(keyword, true); query += tmpKw + " AS " + asKeyword ; if(iLumi) groupByQuery += tmpKw + ","; String tmp = makeQueryFromDefaults(u.getMappedVertex(entity)); tmp = tmp.substring(0, tmp.length() - 1); // To get rid of last space query += "\n\t," + tmp + "_SUM "; } else if(aKw.toLowerCase().startsWith("count")) { checkMax(iter); aKw = aKw.toLowerCase(); String entity = aKw.substring(aKw.indexOf("(") + 1, aKw.indexOf(")")); entity = entity.trim(); //System.out.println("entity = " + entity); String realName = u.getMappedRealName(entity); allKws = addUniqueInList(allKws, realName); String defaultStr = u.getDefaultFromVertex(u.getVertex(realName)); if(defaultStr.indexOf(",") != -1) throw new Exception("Cannot use count(" + entity + ")"); //query += "COUNT(DISTINCT " + realName + "." + defaultStr + ") AS COUNT"; query += realName + "." + defaultStr + " AS COUNT_SUB_" + realName; if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; //if(sumQuery.length() != 0) sumQuery += ",\n\t"; //else if(!sumQuery.startsWith("SELECT")) sumQuery += "SELECT "; sumQuery += "COUNT(DISTINCT COUNT_SUB_" + realName + ") AS COUNT_" + realName; /*if(sumPresent) { sumQuery += ",\n\t COUNT AS COUNT"; sumGroupByQuery += " COUNT ,"; }*/ } else if(Util.isSame(aKw, "dataset")) { checkMax(iter); allKws = addUniqueInList(allKws, "Block"); query += "Block.Path AS PATH"; if(iLumi) groupByQuery += "Block.Path,"; if(sumPresent || countPresent) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += " PATH AS PATH"; sumGroupByQuery += " PATH ,"; } } else { //System.out.println("line 2.2"); if(iLumi && (i < 2) ) { allKws = addUniqueInList(allKws, "Runs"); allKws = addUniqueInList(allKws, "LumiSection"); checkMax(iter); } StringTokenizer st = new StringTokenizer(aKw, "."); int count = st.countTokens(); String token = st.nextToken(); Vertex vFirst = u.getMappedVertex(token); String real = u.getRealFromVertex(vFirst); allKws = addUniqueInList(allKws, real); //System.out.println("line 4"); //if(Util.isSame(real, "LumiSection")) allKws = addUniqueInList(allKws, "Runs"); if(count == 1) { //Get default from vertex //System.out.println("line 5"); checkMax(iter); String tmp = makeQueryFromDefaults(vFirst); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(vFirst); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect; sumGroupByQuery += toSelect + ","; } } } else { //System.out.println("line 6"); checkMax(iter); boolean addQuery = true; String token2 = st.nextToken(); String tmpTableName = token + "_" + token2; /*if(Util.isSame(token2, "algo")) { allKws = addUniqueInList(allKws, "AppFamily"); allKws = addUniqueInList(allKws, "AppVersion"); allKws = addUniqueInList(allKws, "AppExecutable"); allKws = addUniqueInList(allKws, "QueryableParameterSet"); query += makeQueryFromDefaults(u.getVertex("AppFamily")); query += makeQueryFromDefaults(u.getVertex("AppVersion")); query += makeQueryFromDefaults(u.getVertex("AppExecutable")); query += makeQueryFromDefaults(u.getVertex("QueryableParameterSet")); adQuery = false; }*/ if(Util.isSame(token2, "release") || Util.isSame(token2, "tier")) { checkMax(iter); String realName = u.getMappedRealName(token2);//AppVersion allKws = addUniqueInList(allKws, realName); String tmp = makeQueryFromDefaults(u.getVertex(realName)); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(u.getVertex(realName)); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token, "release") || Util.isSame(token, "tier")) { checkMax(iter); String realName = u.getMappedRealName(token);//AppVersion allKws = addUniqueInList(allKws, realName); String tmp = makeQueryFromDefaults(u.getVertex(realName)); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(u.getVertex(realName)); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "count")) { checkMax(iter); String realName = u.getMappedRealName(token); String defaultStr = u.getDefaultFromVertex(u.getVertex(realName)); if(defaultStr.indexOf(",") != -1) throw new Exception("Cannot use count(" + token + ")"); query += realName + "." + defaultStr + " AS COUNT_SUB_" + realName; if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; //if(sumQuery.length() != 0) sumQuery += ",\n\t"; //else sumQuery += "SELECT "; sumQuery += "COUNT(DISTINCT COUNT_SUB_" + realName + ") AS COUNT_" + realName; /*query += "COUNT(DISTINCT " + realName + "." + defaultStr + ") AS COUNT"; if(sumPresent) { sumQuery += ",\n\t COUNT AS COUNT "; sumGroupByQuery += " COUNT ,"; }*/ addQuery = false; } if(Util.isSame(token2, "modby") || Util.isSame(token2, "createby")) { checkMax(iter); boolean dontJoin = false; String personField = "CreatedBy"; if(Util.isSame(token2, "modby")) { if(modByAdded) dontJoin = true; modByAdded = true; personField = "LastModifiedBy"; } else { if(createByAdded) dontJoin = true; createByAdded = true; } //String tmpTableName = token + "_" + token2; if(!dontJoin) { personJoinQuery += "\tJOIN " + owner() + "Person " + tmpTableName + "\n" + "\t\tON " + real + "." + personField + " = " + tmpTableName + ".ID\n"; } String fqName = tmpTableName + ".DistinguishedName"; query += fqName + makeAs(tmpTableName + "_DN"); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += tmpTableName + "_DN AS " + tmpTableName + "_DN "; sumGroupByQuery += tmpTableName + "_DN ,"; } addQuery = false; } //if(Util.isSame(token2, "evnum") && Util.isSame(token, "file")) { // throw new Exception("You can find file based on file.evnum (find file where file.evenum = blah) but cannot find file.evnum"); //} if(Util.isSame(token2, "evnum") && Util.isSame(token, "lumi")) { throw new Exception("You can find lumi based on lumi.evnum (find lumi where lumi.evenum = blah) but cannot find lumi.evnum"); } if(Util.isSame(token2, "parent") && Util.isSame(token, "file")) { checkMax(iter); boolean dontJoin = false; if(fileParentAdded) dontJoin = true; fileParentAdded = true; if(!dontJoin) parentJoinQuery += handleParent(tmpTableName, "Files", "FileParentage"); String fqName = tmpTableName + ".LogicalFileName"; query += fqName + makeAs(fqName); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(fqName)) + " "; if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "child") && Util.isSame(token, "file")) { checkMax(iter); boolean dontJoin = false; if(fileChildAdded) dontJoin = true; fileChildAdded = true; //System.out.println("childJoinQuery " + childJoinQuery+ " dontJoin " + dontJoin); if(!dontJoin) childJoinQuery += handleChild(tmpTableName, "Files", "FileParentage"); String fqName = tmpTableName + ".LogicalFileName"; query += fqName + makeAs(fqName); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(fqName)) + " "; if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "parent") && Util.isSame(token, "procds")) { checkMax(iter); boolean dontJoin = false; if(procDsParentAdded) dontJoin = true; procDsParentAdded = true; if(!dontJoin) parentJoinQuery += handleParent(tmpTableName, "ProcessedDataset", "ProcDSParent"); String fqName = tmpTableName + ".Name"; query += fqName + makeAs(fqName); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(fqName)) + " "; if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "parent") && Util.isSame(token, "dataset")) { //System.out.println("line 8"); checkMax(iter); allKws = addUniqueInList(allKws, "Block"); boolean dontJoin = false; if(datasetParentAdded) dontJoin = true; datasetParentAdded = true; if(!dontJoin) pathParentWhereQuery += handlePathParent(); String fqName = "Block.Path AS Dataset_Parent"; query += fqName; if(iLumi) groupByQuery += "Block.Path ,"; if(sumPresent || countPresent) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += " Dataset_Parent AS Dataset_Parent "; sumGroupByQuery += " Dataset_Parent ,"; } addQuery = false; } if(Util.isSame(token, "dataset")) { checkMax(iter); allKws = addUniqueInList(allKws, "ProcessedDataset"); } Vertex vCombined = u.getMappedVertex(aKw); //System.out.println("\n\n---Changing vCombined " + aKw); if(vCombined == null) { //System.out.println("IT is NULLLLLLLLLLLLL"); checkMax(iter); if(addQuery) { String mapVal = km.getMappedValue(aKw, true); //if(mapVal.equals(aKw)) throw new Exception("The keyword " + aKw + " not yet implemented in Query Builder" ); query += mapVal + makeAs(mapVal); if(iLumi) groupByQuery += mapVal + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(mapVal)); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } } } else { //System.out.println("in ELSE ---> u.getRealFromVertex " + u.getRealFromVertex(vCombined)); allKws = addUniqueInList(allKws, u.getRealFromVertex(vCombined)); checkMax(iter); if(addQuery) { checkMax(iter); String tmp = makeQueryFromDefaults(vCombined); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(vCombined); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } } } } } } checkMax(iter); if(iLumi && (cs.size() > 0) ) { allKws = addUniqueInList(allKws, "Runs"); allKws = addUniqueInList(allKws, "LumiSection"); } for (int i =0 ; i!= cs.size(); ++i) { ++iter; checkMax(iter); Object obj = cs.get(i); if(i%2 == 0) { Constraint o = (Constraint)obj; String key = (String)o.getKey(); if(Util.isSame(key, "dataset")) { } else if(Util.isSame(key, "release")) { if(isInList(kws, "procds") || isInList(kws, "dataset")) allKws = addUniqueInList(allKws, "ProcAlgo"); else addUniqueInList(allKws, "FileAlgo"); } else if(Util.isSame(key, "tier")) { if(isInList(kws, "procds") || isInList(kws, "dataset")) allKws = addUniqueInList(allKws, "ProcDSTier"); else addUniqueInList(allKws, "FileTier"); } else if(Util.isSame(key, "dq")) { allKws = addUniqueInList(allKws, km.getMappedValue(key, false)); } else if(Util.isSame(key, "pset")) { allKws = addUniqueInList(allKws, km.getMappedValue(key, false)); } else { if(Util.isSame(key, "file.status")) invalidFile = false; StringTokenizer st = new StringTokenizer(key, "."); int count = st.countTokens(); allKws = addUniqueInList(allKws, u.getMappedRealName(st.nextToken())); if(count != 1) { Vertex vCombined = u.getMappedVertex(key); if(vCombined != null) allKws = addUniqueInList(allKws, u.getRealFromVertex(vCombined)); } } /*else { //allKws = addUniqueInList(allKws, "ProcessedDataset"); allKws = addUniqueInList(allKws, "Block"); }*/ } } //Get the route which determines the join table if(allKws.size() > 0) allKws = makeCompleteListOfVertexs(allKws); //If File is not there then add Block //Otherwise for (int i =0 ; i!= cs.size(); ++i) { ++iter; checkMax(iter); Object obj = cs.get(i); if(i%2 == 0) { Constraint o = (Constraint)obj; String key = (String)o.getKey(); if(Util.isSame(key, "dataset")) { if(!isIn(allKws, "Files")) allKws = addUniqueInList(allKws, "Block"); }else if(key.startsWith("dataset")) allKws = addUniqueInList(allKws, "ProcessedDataset"); } } if(allKws.size() > 0) { allKws = makeCompleteListOfVertexs(allKws); allKws = sortVertexs(allKws); } int len = allKws.size(); /*for(int i = 0 ; i != len ; ++i ) { System.out.println("kw " + (String)allKws.get(i)); }*/ if(isInList(kws, "ilumi")) { if(len == 0) query += getIntLumiFromQuery(); else { query += genJoins(allKws); query += getIntLumiJoinQuery(); } } else query += genJoins(allKws); query += personJoinQuery; query += parentJoinQuery; query += childJoinQuery; personJoinQuery = ""; parentJoinQuery = ""; childJoinQuery = ""; String queryWhere = ""; if (cs.size() > 0) queryWhere += "\nWHERE\n"; for (int i =0 ; i!= cs.size(); ++i) { ++iter; checkMax(iter); Object obj = cs.get(i); if(i%2 == 0) { Constraint co = (Constraint)obj; String key = (String)co.getKey(); String op = (String)co.getOp(); String val = (String)co.getValue(); if(Util.isSame(key, "dataset")) { if(pathParentWhereQuery.length() > 0) { queryWhere += pathParentWhereQuery + ""; bindValues.add(val); }else { // If path is given in where clause it should op should always be = //if(!Util.isSame(op, "=")) throw new Exception("When Path is provided operater should be = . Invalid operater given " + op); //queryWhere += "\tProcessedDataset.ID " + handlePath(val); if(isIn(allKws, "Files")) queryWhere += "\tFiles.Block "; else queryWhere += "\tBlock.ID "; queryWhere += handlePath(val, op); } } else if(Util.isSame(key, "dq")) { if(!Util.isSame(op, "=")) throw new Exception("When dq is provided operator should be = . Invalid operator given " + op); queryWhere += "\tRuns.ID" + handleDQ(val, cs); } else if(Util.isSame(key, "pset")) { if(!Util.isSame(op, "=")) throw new Exception("When pset is provided operator should be = . Invalid operator given " + op); queryWhere += "\tQueryableParameterSet.Hash" + handlePset(val); } else if(Util.isSame(key, "site")) { //if(!Util.isSame(op, "=") && !Util.isSame(op, "in")) throw new Exception("When site is provided operator should be = . Invalid operator given " + op); queryWhere += "\t(StorageElement.SEName" + handleSite(val, op) + ")"; //queryWhere += "\tStorageElement.SEName" + handleSite(val, op); } else if(Util.isSame(key, "release")) { boolean useAnd = false; if(isInList(kws, "procds") || isInList(kws, "dataset")) { queryWhere += "\tProcAlgo.Algorithm " + handleRelease(op, val); useAnd = true; } else { if(useAnd) queryWhere += "\tAND\n"; queryWhere += "\tFileAlgo.Algorithm " + handleRelease(op, val); } } else if(Util.isSame(key, "tier")) { boolean useAnd = false; if(isInList(kws, "procds") || isInList(kws, "dataset")) { queryWhere += "\tProcDSTier.DataTier " + handleTier(op, val); useAnd = true; } else { if(useAnd) queryWhere += "\tAND\n"; queryWhere += "\tFileTier.DataTier " + handleTier(op, val); } } else if(Util.isSame(key, "file.release")) { queryWhere += "\tFileAlgo.Algorithm" + handleRelease(op, val); } else if(Util.isSame(key, "file.tier")) { queryWhere += "\tFileTier.DataTier" + handleTier(op, val); } else if(Util.isSame(key, "lumi.evnum")) { if(!Util.isSame(op, "=")) throw new Exception("When evnum is provided operator should be = . Invalid operator given " + op); queryWhere += handleEvNum(val); } else if(Util.isSame(key, "procds.release") || Util.isSame(key, "dataset.release")) { queryWhere += "\tProcAlgo.Algorithm " + handleRelease(op, val); } else if(Util.isSame(key, "procds.tier") || Util.isSame(key, "dataset.tier") ) { queryWhere += "\tProcDSTier.DataTier" + handleTier(op, val); } else if(key.endsWith("createdate") || key.endsWith("moddate")) { queryWhere += "\t" + km.getMappedValue(key, true) + handleDate(op, val); } else { //if(key.indexOf(".") == -1) throw new Exception("In specifying constraints qualify keys with dot operater. Invalid key " + key); StringTokenizer st = new StringTokenizer(key, "."); int count = st.countTokens(); boolean doGeneric = false; if(count == 2) { String token = st.nextToken(); String token2 = st.nextToken(); String tmpTableName = token + "_" + token2; if(Util.isSame(token2, "modby") || Util.isSame(token2, "createby")) { boolean dontJoin = false; String personField = "CreatedBy"; if(Util.isSame(token2, "modby")) { if(modByAdded) dontJoin = true; personField = "LastModifiedBy"; modByAdded = true; } else { if(createByAdded) dontJoin = true; createByAdded = true; } //String tmpTableName = token + "_" + token2; if(!dontJoin) personJoinQuery += "\tJOIN " + owner() + "Person " + tmpTableName + "\n" + "\t\tON " + u.getMappedRealName(token) + "." + personField + " = " + tmpTableName + ".ID\n"; queryWhere += tmpTableName + ".DistinguishedName "; } else if(Util.isSame(token2, "parent") && Util.isSame(token, "file")) { boolean dontJoin = false; if(fileParentAdded) dontJoin = true; fileParentAdded = true; if(!dontJoin) parentJoinQuery += handleParent(tmpTableName, "Files", "FileParentage"); queryWhere += tmpTableName + ".LogicalFileName "; } else if(Util.isSame(token2, "parent") && Util.isSame(token, "procds")) { boolean dontJoin = false; if(procDsParentAdded) dontJoin = true; procDsParentAdded = true; //String tmpTableName = token + "_" + token2; if(!dontJoin) parentJoinQuery += handleParent(tmpTableName, "ProcessedDataset", "ProcDSParent"); queryWhere += tmpTableName + ".Name "; } else if(Util.isSame(token2, "child") && Util.isSame(token, "file")) { boolean dontJoin = false; if(fileChildAdded) dontJoin = true; fileChildAdded = true; if(!dontJoin) childJoinQuery += handleChild(tmpTableName, "Files", "FileParentage"); queryWhere += tmpTableName + ".LogicalFileName "; } else doGeneric = true; }else doGeneric = true; if(doGeneric) { //Vertex vFirst = u.getMappedVertex(token); Vertex vCombined = u.getMappedVertex(key); if(vCombined == null) { if(Util.isSame(op, "like") || Util.isSame(op, "not like")) queryWhere += "\t " + makeUpper(km.getMappedValue(key, true)) ; else queryWhere += "\t" + km.getMappedValue(key, true) + " " ; } else { if(Util.isSame(op, "like") || Util.isSame(op, "not like")) queryWhere += "\t " + makeUpper(u.getRealFromVertex(vCombined) + "." + u.getDefaultFromVertex(vCombined)) ; else queryWhere += "\t" + u.getRealFromVertex(vCombined) + "." + u.getDefaultFromVertex(vCombined) + " "; //FIXME default can be list } } queryWhere += handleOp(op, val, bindValues); } } else { //System.out.println("REL " + (String)obj); queryWhere += "\n" + ((String)obj).toUpperCase() + "\n"; } } //System.out.println("\n\nFINAL query is \n\n" + query); String circularConst = ""; boolean useAnd = false; if((queryWhere.length() == 0) && isIn(allKws, "FileRunLumi")) circularConst = "\nWHERE "; if(isIn(allKws, "Files") && isIn(allKws, "FileRunLumi")) { if(queryWhere.length() != 0 || useAnd) circularConst += "\n\tAND "; circularConst += "FileRunLumi.Fileid = Files.ID"; useAnd = true; } if(isIn(allKws, "Runs") && isIn(allKws, "FileRunLumi")) { if(queryWhere.length() != 0 || useAnd) circularConst += "\n\tAND "; circularConst += "\n\tFileRunLumi.Run = Runs.ID"; useAnd = true; } if(isIn(allKws, "LumiSection") && isIn(allKws, "FileRunLumi")) { if(queryWhere.length() != 0 || useAnd) circularConst += "\n\tAND "; circularConst += "\n\tFileRunLumi.Lumi = LumiSection.ID"; } String invalidFileQuery = "FileStatus.Status <> ?"; String invalidConst = ""; if((queryWhere.length() == 0) && (circularConst.length() == 0) && (invalidFile)) { invalidConst = "\nWHERE " + invalidFileQuery; bindValues.add("INVALID"); } if(((queryWhere.length() != 0) || (circularConst.length() != 0)) && (invalidFile)) { invalidConst = "\nAND " + invalidFileQuery; bindValues.add("INVALID"); } query += personJoinQuery + parentJoinQuery + childJoinQuery + queryWhere + circularConst + invalidConst; if(groupByQuery.length() > 0) { groupByQuery = groupByQuery.substring(0, groupByQuery.length() - 1);// to get rid of extra comma query += "\n GROUP BY " + groupByQuery; } boolean orderOnce = false; for(Object o: okws){ ++iter; checkMax(iter); String orderBy = (String)o; if(!orderOnce) { query += " ORDER BY "; } if(orderOnce) query += ","; String orderToken = ""; if(Util.isSame(orderBy, "dataset")) orderToken = "Block.Path"; else { Vertex vCombined = u.getMappedVertex(orderBy); if(vCombined == null) orderToken = km.getMappedValue(orderBy, true); else orderToken = u.getRealFromVertex(vCombined) + "." + u.getDefaultFromVertex(vCombined); } query += orderToken; orderOnce = true; } if(okws.size() > 0) { if (Util.isSame(orderingkw, "asc")) query += " ASC"; else if (Util.isSame(orderingkw, "desc")) query += " DESC"; } if(sumQuery.length() != 0) { query = sumQuery + " FROM (" + query + ") sumtable "; if(sumGroupByQuery.length() > 0) { sumGroupByQuery = sumGroupByQuery.substring(0, sumGroupByQuery.length() - 1);// to get rid of extra comma query += "\n GROUP BY " + sumGroupByQuery; } } //countQuery = "SELECT COUNT(*) " + query.substring(query.indexOf("FROM")); countQuery = "SELECT COUNT(*) AS CNT FROM (" + query + ") x"; if(!begin.equals("") && !end.equals("")) { int bInt = Integer.parseInt(begin); int eInt = Integer.parseInt(end); bindIntValues.add(new Integer(bInt)); if(db.equals("mysql")) { bindIntValues.add(new Integer(eInt - bInt)); query += "\n\tLIMIT ?, ?"; } if(db.equals("oracle")) { bindIntValues.add(new Integer(eInt)); //query = "SELECT * FROM (SELECT x.*, rownum as rnum FROM (\n" + query + "\n) x) where rnum between ? and ?"; query = genOraclePageQuery(query); } } return query; } private String makeSumSelect(String tmp) { String asStr = "AS"; int asIndex = tmp.indexOf(asStr); if(asIndex != -1) { return tmp.substring(asIndex + asStr.length(), tmp.length()).trim(); } return ""; } private String makeUpper(String in) { if(upper) return " upper(" + in + ") "; return " " + in + " "; } private String makeAs(String in) { int len = in.length(); int endIndex = len; if (len > 30) endIndex = 30; return " AS " + in.replace('.', '_').substring(0, endIndex) + " "; } private String genOraclePageQuery(String query) throws Exception{ System.out.println(query); String tokenAS = "AS"; String tokenFrom = "FROM"; //String tokenDistinct = "DISTINCT"; String tokenDistinct = "SELECT"; int indexOfFrom = query.indexOf(tokenFrom); int indexOfDistinct = query.indexOf(tokenDistinct); if(indexOfFrom == -1 || indexOfDistinct == -1) return query; //System.out.println(indexOfFrom); //System.out.println(indexOfDistinct); String tmpStr = query.substring(indexOfDistinct + tokenDistinct.length(), indexOfFrom); //System.out.println("tmp str " + tmpStr); StringTokenizer st = new StringTokenizer(tmpStr, ","); int numberOfKeywords = st.countTokens(); String toReturn = "SELECT "; int iter = 0 ; for(int i = 0; i != numberOfKeywords; ++i) { ++iter; checkMax(iter); String tmpToken = st.nextToken(); int indexOfAs = tmpToken.indexOf(tokenAS); if(indexOfAs == -1) return query; String finalKeyword = tmpToken.substring(indexOfAs + tokenAS.length(), tmpToken.length()).trim(); //System.out.println("Keyword " + finalKeyword); if(i != 0) toReturn += ", "; toReturn += finalKeyword; } toReturn += " FROM (SELECT x.*, rownum as rnum FROM (\n" + query + "\n) x) where rnum between ? and ?"; return toReturn; } private String makeQueryFromDefaults(Vertex v) throws Exception { String realVal = u.getRealFromVertex(v); StringTokenizer st = new StringTokenizer(u.getDefaultFromVertex(v), ","); int countDefTokens = st.countTokens(); String query = ""; int iter = 0 ; for (int j = 0; j != countDefTokens; ++j) { ++iter; checkMax(iter); if(j != 0) query += ","; String token = st.nextToken(); query += realVal + "." + token + makeAs(realVal + "." + token); } return query; } private String makeGroupQueryFromDefaults(Vertex v) throws Exception { String realVal = u.getRealFromVertex(v); StringTokenizer st = new StringTokenizer(u.getDefaultFromVertex(v), ","); int countDefTokens = st.countTokens(); String query = ""; int iter = 0 ; for (int j = 0; j != countDefTokens; ++j) { ++iter; checkMax(iter); String token = st.nextToken(); query += realVal + "." + token + ","; } return query; } private String genJoins(ArrayList lKeywords) throws Exception { //ArrayList uniquePassed = new ArrayList(); int iter = 0 ; String prev = ""; String query = "\nFROM\n\t" + owner() + (String)lKeywords.get(0) + "\n"; int len = lKeywords.size(); for(int i = 1 ; i != len ; ++i ) { ++iter; checkMax(iter); for(int j = (i-1) ; j != -1 ; --j ) { ++iter; checkMax(iter); String v1 = (String)lKeywords.get(i); String v2 = (String)lKeywords.get(j); //if(! (isIn(uniquePassed, v1 + "," + v2 )) && !(isIn(uniquePassed, v2 + "," + v1))) { if(u.doesEdgeExist(v1, v2)) { //System.out.println("Relation bwteen " + v1 + " and " + v2 + " is " + u.getRealtionFromVertex(v1, v2)); String tmp = u.getRealtionFromVertex(v1, v2); query += "\t"; if(Util.isSame(v1, "FileChildage")) v1 = "FileParentage"; if(Util.isSame(v1, "FileParentage") || Util.isSame(v1, "ProcDSParent")) query += "LEFT OUTER "; query += "JOIN " + owner() + v1 + "\n"; query += "\t\tON " + tmp + "\n"; //uniquePassed.add(v1 + "," + v2); break; } //} } } return query; } private boolean isIn(ArrayList aList, String key) throws Exception { int iter = 0 ; for (int i = 0 ; i != aList.size(); ++i) { if( ((String)(aList.get(i) )).equals(key)) return true; ++iter; checkMax(iter); } return false; } /*private String genJoins(String[] routes) { String prev = ""; String query = "\nFROM\n\t"; for(String s: routes) { if(!prev.equals("")) { //System.out.println(prev + "," + s); String tmp = rm.getMappedValue(prev + "," + s); //System.out.println(tmp); query += "\tJOIN " + s + "\n"; query += "\t\tON " + tmp + "\n"; } else query += s + "\n"; prev = s; } return query; }*/ private String handleParent(String tmpTableName, String table1, String table2) throws Exception { return ( "\tLEFT OUTER JOIN " + owner() + table1 + " " + tmpTableName + "\n" + "\t\tON " + tmpTableName + ".ID = " + table2 + ".ItsParent\n" ); } private String handleChild(String tmpTableName, String table1, String table2) throws Exception { return ( "\tLEFT OUTER JOIN " + owner() + table1 + " " + tmpTableName + "\n" + "\t\tON " + tmpTableName + ".ID = " + table2 + ".ThisFile\n" ); } private String handlePathParent() throws Exception { String sql = "Block.ID in \n" + "\t(" + DBSSql.listPathParent() + ")\n"; return sql; } private String handleLike(String val, List<String> bindValues) { bindValues.add(val.replace('*','%')); return "LIKE " + makeUpper("?"); } private String handleBetween(String val, List<String> bindValues) throws Exception { String token[] = val.split("and"); if(token.length != 2) throw new Exception("Invalid syntax is used for between keyword \n " + val + "\n The valid syntax exmaple is where abc between 34 and 13"); bindValues.add(token[0]); bindValues.add(token[1]); return "BETWEEN ? AND ?"; } private String handleNotLike(String val, List<String> bindValues) { bindValues.add(val.replace('*','%')); return "NOT LIKE " + makeUpper("?"); } private String handleIn(String val, List<String> bindValues) throws Exception { String query = "IN ("; StringTokenizer st = new StringTokenizer(val, ","); int count = st.countTokens(); int iter = 0 ; for(int k = 0 ; k != count ; ++k) { ++iter; checkMax(iter); if(k != 0) query += ","; //query += "'" + st.nextToken() + "'"; query += "?"; bindValues.add(st.nextToken()); } query += ")"; return query; } private String handleOp(String op, String val, List<String> bindValues) throws Exception { String query = ""; if(Util.isSame(op, "in")) query += handleIn(val, bindValues); else if(Util.isSame(op, "like")) query += handleLike(val, bindValues); else if(Util.isSame(op, "not like")) query += handleNotLike(val, bindValues); else if(Util.isSame(op, "between")) query += handleBetween(val, bindValues); else { query += op + " ?\n"; bindValues.add(val); } return query; } private String handleEvNum(String val) { String query = "\tLumiSection.StartEventNumber <= ?\n" + "\t AND \n" + "\tLumiSection.EndEventNumber >= ?\n"; bindValues.add(val); bindValues.add(val); return query; } /*private String handlePath(String path) throws Exception { Validate.checkPath(path); String[] data = path.split("/"); if(data.length != 4) { throw new Exception("Invalid path " + path); } ArrayList route = new ArrayList(); route.add("PrimaryDataset"); route.add("ProcessedDataset"); String query = " IN ( \n" + "SELECT \n" + "\tProcessedDataset.ID " + genJoins(route) + "WHERE \n" + //"\tPrimaryDataset.Name = '" + data[1] + "'\n" + "\tPrimaryDataset.Name = ?\n" + "\tAND\n" + //"\tProcessedDataset.Name = '" + data[2] + "'" + "\tProcessedDataset.Name = ?" + ")"; bindValues.add(data[1]); bindValues.add(data[2]); return query; }*/ private String handleDate(String op, String val) throws Exception { if(Util.isSame(op, "in")) throw new Exception("Operator IN not supported with date. Please use =, < or >"); if(Util.isSame(op, "like") || Util.isSame(op, "not like")) throw new Exception("Operator LIKE is not supported with date. Please use =, < or >"); String query = ""; String epoch1 = String.valueOf(DateUtil.dateStr2Epoch(val) / 1000); if(Util.isSame(op, "=")) { String epoch2 = String.valueOf(DateUtil.getNextDate(val).getTime() / 1000); query += " BETWEEN ? AND ?\n"; bindValues.add(epoch1); bindValues.add(epoch2); } else { query += " " + op + " ?\n"; bindValues.add(epoch1); } return query; } private String handlePath(String path, String op) throws Exception { String query = " IN ( \n" + "SELECT \n" + "\tBlock.ID FROM " + owner() + "Block" + "\tWHERE \n" ; //"\tBlock.Path " + op + " '" + path + "'\n" + if(Util.isSame(op, "like") || Util.isSame(op, "not like")) query += "\t" + makeUpper("Block.Path"); else query += "\tBlock.Path ";// + op + " ?\n" + //")"; /*if(Util.isSame(op, "in")) query += handleIn(path); else if(Util.isSame(op, "like")) query += handleLike(path); else { query += op + " ?\n"; bindValues.add(path); }*/ query += handleOp(op, path, bindValues) + ")"; return query; } private String handleDQ(String dqVal, List<?> cs) throws Exception { boolean found = false; int iter = 0; ArrayList<String> tmpBindValues = new ArrayList<String>(); StringBuffer dsQueryForDQ = new StringBuffer("SELECT BLOCK.PATH FROM \n"); dsQueryForDQ.append(owner()); dsQueryForDQ.append("BLOCK WHERE \n"); Object lastObj = new String(""); for (int i =0 ; i!= cs.size(); ++i) { ++iter; checkMax(iter); Object obj = cs.get(i); if(i%2 == 0) { Constraint co = (Constraint)obj; String key = (String)co.getKey(); String op = (String)co.getOp(); String val = (String)co.getValue(); if(Util.isSame(key, "dataset")) { if(found) { dsQueryForDQ.append("\n"); dsQueryForDQ.append(((String)lastObj).toUpperCase()); dsQueryForDQ.append("\n"); } if(Util.isSame(op, "like") || Util.isSame(op, "not like")) dsQueryForDQ.append("\t" + makeUpper("Block.Path")); else dsQueryForDQ.append("\tBlock.Path "); dsQueryForDQ.append(handleOp(op, val, tmpBindValues)); found = true; } } lastObj = obj; } System.out.println("QUERY is " + dsQueryForDQ.toString()); //for (String s: tmpBindValues) System.out.println("BValue " + s); if(!found) throw new Exception("dataset is required when using dq queries. Please provide a dataset name in the query. Example query would be : find run where dq=blahblah and dataset = /prim/proc/tier"); //System.out.println("VAL is " + dqVal); //Pass in dsQueryForDQ.toString() and tmpBindValues to DBSSql.listRunsForRunLumiDQ //ArrayList sqlObj = DBSSql.listRunsForRunLumiDQ(null, dqVal); ArrayList sqlObj = (new DBSApiDQLogic(null)).listRunsForRunLumiDQ(null, dsQueryForDQ.toString(), tmpBindValues, dqVal); String dqQuery = ""; if(sqlObj.size() == 2) { dqQuery = (String)sqlObj.get(0); Vector bindVals = (Vector)sqlObj.get(1); iter = 0 ; for(Object s: bindVals) { ++iter; checkMax(iter); bindValues.add((String)s); } } //call DQ function //List<String> bindValuesFromDQ = ; //Get from DQ function //for(String s: bindValues) bindValues.add(s); String query = " IN ( \n" + dqQuery + ")"; return query; } private String handlePset(String val) throws Exception { System.out.println("VAL is " + val); CfgClient cc = new CfgClient(); List<String> hashs = cc.getPsetHash(val); String query = " IN ( \n"; int count = 0; int iter = 0 ; for (String aHash: hashs) { ++iter; checkMax(iter); //System.out.println("Hash is " + aHash); if(count != 0) query += ","; ++count; query += "?"; bindValues.add(aHash); } if(count == 0) { query += "?"; bindValues.add("HASH_NOT_RETURNED_FROM_INDEX_SERVICE"); } query += "\n)"; return query; } private String handleSite(String val, String op) throws Exception { System.out.println("VAL is " + val); String extraQuery = ""; if(Util.isSame(op, "like")) extraQuery = "\t" + makeUpper("StorageElement.SEName"); if(Util.isSame(op, "not like")) throw new Exception("NOT LIKE is not supported with site"); else extraQuery = "\tStorageElement.SEName "; String query = " IN ( \n"; if(Util.isSame(op, "!=")) query = " NOT IN ( \n"; SiteClient cc = new SiteClient(); int iter = 0 ; Vector tmpList = new Vector(); if(op.equals("in")) { extraQuery += "IN (" ; StringTokenizer st = new StringTokenizer(val, ","); int numTokens = st.countTokens(); for(int k = 0 ; k != numTokens ; ++k) { ++iter; checkMax(iter); String nextToken = st.nextToken().trim(); if(k != 0) extraQuery += ","; extraQuery += "?"; tmpList.add(nextToken); List<String> sites = cc.getSE(nextToken); int count = 0; for (String aSite: sites) { //System.out.println("Hash is " + aSite); if(k != 0 || count != 0) query += ","; ++count; query += "?"; bindValues.add(aSite); } } extraQuery += ")\n"; } else { val = val.replace('*', '%'); if(Util.isSame(op, "like")) extraQuery += " LIKE " + makeUpper("?") + "\n"; else extraQuery += " = ? \n"; tmpList.add(val); //System.out.println("-line 1 op is " + op ); List<String> sites = cc.getSE(val); if(sites.size() == 1) { bindValues.add((String)sites.get(0)); if(Util.isSame(op, "like")) query = " LIKE ? "; if(Util.isSame(op, "in")) query = " IN (?) "; if(Util.isSame(op, "=")) query = " = ? "; if(Util.isSame(op, "!=")) { query = " != ? "; return query; } query += "\n OR \n" + extraQuery; for(Object o: tmpList) bindValues.add((String)o); return query; } int count = 0; for (String aSite: sites) { ++iter; checkMax(iter); //System.out.println("Hash is " + aSite); if(count != 0) query += ","; ++count; query += "?"; bindValues.add(aSite); } /*if(count == 0) { query += "?"; bindValues.add("SITE_NOT_RETURNED_FROM_SITE_DB"); }*/ } query += "\n)"; if(Util.isSame(op, "!=")) return query; query += "\n OR \n" + extraQuery ; for(Object o: tmpList) bindValues.add((String)o); return query; } private String handleRelease(String op, String version) throws Exception { Validate.checkWord("AppVersion", version); ArrayList route = new ArrayList(); route.add("AlgorithmConfig"); route.add("AppVersion"); String query = " IN ( \n" + "SELECT \n" + "\tAlgorithmConfig.ID " + genJoins(route) + "WHERE \n" + //"\tAppVersion.Version = '" + version + "'\n" + "\tAppVersion.Version " + handleOp(op, version, bindValues) + "\n" + ")"; return query; } private String handleTier(String op, String tier) throws Exception { Validate.checkWord("DataTier", tier); ArrayList route = new ArrayList(); String query = " IN ( \n" + "SELECT \n" + "\tDataTier.ID FROM " + owner() + "DataTier " + "WHERE \n" + "\tDataTier.Name " + handleOp(op, tier, bindValues) + "\n" + ")"; return query; } private ArrayList addUniqueInList(ArrayList keyWords, String aKw) { for(Object kw: keyWords) { if(((String)kw).equals(aKw))return keyWords; } keyWords.add(aKw); return keyWords; } private boolean isInList(ArrayList keyWords, String aKw) { //System.out.println("line 3.1"); for(Object kw: keyWords) { if(((String)kw).equals(aKw))return true; } return false; } private String getIntLumiSelectQuery() { return ("\n\tSUM(ldblsum.INSTANT_LUMI * 93 * (1 - ldblsum.DEADTIME_NORMALIZATION) * ldblsum.NORMALIZATION) AS INTEGRATED_LUMINOSITY, " + "\n\tSUM(ldblsum.INSTANT_LUMI_ERR * ldblsum.INSTANT_LUMI_ERR) AS INTEGRATED_ERROR"); } private String getIntLumiFromQuery() { return ("\n\tFROM CMS_LUMI_PROD_OFFLINE.LUMI_SUMMARIES ldblsum" + "\n\tJOIN CMS_LUMI_PROD_OFFLINE.LUMI_SECTIONS ldbls" + "\n\t\tON ldblsum.SECTION_ID = ldbls.SECTION_ID" + "\n\tJOIN CMS_LUMI_PROD_OFFLINE.LUMI_VERSION_TAG_MAPS ldblvtm" + "\n\t\tON ldblvtm.LUMI_SUMMARY_ID = ldblsum.LUMI_SUMMARY_ID" + "\n\tJOIN CMS_LUMI_PROD_OFFLINE.LUMI_TAGS ldblt" + "\n\t\tON ldblt.LUMI_TAG_ID = ldblvtm.LUMI_TAG_ID"); } private String getIntLumiJoinQuery() { return ("\n\tJOIN CMS_LUMI_PROD_OFFLINE.LUMI_SECTIONS ldbls" + "\n\t\tON Runs.RunNumber = ldbls.RUN_NUMBER" + "\n\t\tAND LumiSection.LumiSectionNumber = ldbls.LUMI_SECTION_NUMBER" + "\n\tJOIN CMS_LUMI_PROD_OFFLINE.LUMI_SUMMARIES ldblsum" + "\n\t\tON ldblsum.SECTION_ID = ldbls.SECTION_ID" + "\n\tJOIN CMS_LUMI_PROD_OFFLINE.LUMI_VERSION_TAG_MAPS ldblvtm" + "\n\t\tON ldblvtm.LUMI_SUMMARY_ID = ldblsum.LUMI_SUMMARY_ID" + "\n\tJOIN CMS_LUMI_PROD_OFFLINE.LUMI_TAGS ldblt" + "\n\t\tON ldblt.LUMI_TAG_ID = ldblvtm.LUMI_TAG_ID"); } /*private ArrayList makeCompleteListOfVertexsOld(ArrayList lKeywords) throws Exception { int len = lKeywords.size(); if(len <= 1) return lKeywords; for(int i = 0 ; i != len ; ++i ) { boolean isEdge = false; for(int j = 0 ; j != len ; ++j ) { if(i != j) { //System.out.println("Checking " + lKeywords.get(i) + " with " + lKeywords.get(j) ); if(u.doesEdgeExist((String)lKeywords.get(i), (String)lKeywords.get(j))) { isEdge = true; break; } } } if(!isEdge) { //System.out.println("Shoertest edge in " + (String)lKeywords.get(i) + " --- " + (String)lKeywords.get((i+1)%len)); List<Edge> lEdges = u.getShortestPath((String)lKeywords.get(i), (String)lKeywords.get((i+1)%len)); for (Edge e: lEdges) { //System.out.println("PATH " + u.getFirstNameFromEdge(e) + " --- " + u.getSecondNameFromEdge(e)); lKeywords = addUniqueInList(lKeywords, u.getFirstNameFromEdge(e)); lKeywords = addUniqueInList(lKeywords, u.getSecondNameFromEdge(e)); } //System.out.println("No edge callin again ---------> \n"); lKeywords = makeCompleteListOfVertexs (lKeywords); return lKeywords; } } return lKeywords; }*/ private ArrayList makeCompleteListOfVertexs(ArrayList lKeywords) throws Exception { ArrayList myRoute = new ArrayList(); myRoute.add(lKeywords.get(0)); lKeywords.remove(0); int len = lKeywords.size(); int prevLen = 0; int iter = 0; while(len != 0) { ++iter; checkMax(iter); boolean breakFree = false; for(int i = 0 ; i != len ; ++i ) { ++iter; checkMax(iter); int lenRount = myRoute.size(); for(int j = 0 ; j != lenRount ; ++j ) { ++iter; checkMax(iter); String keyInMyRoute = (String)myRoute.get(j); String keyInArray = (String)lKeywords.get(i); if(keyInArray.equals(keyInMyRoute)) { lKeywords.remove(i); breakFree = true; break; } else if(u.doesEdgeExist(keyInMyRoute, keyInArray)) { myRoute = addUniqueInList(myRoute, keyInArray); lKeywords.remove(i); breakFree = true; break; } } if(breakFree) break; } if(prevLen == len) { //System.out.println("Shortest edge in " + (String)lKeywords.get(0) + " --- " + (String)myRoute.get(0)); List<Edge> lEdges = u.getShortestPath((String)lKeywords.get(0), (String)myRoute.get(0)); for (Edge e: lEdges) { //System.out.println("PATH " + u.getFirstNameFromEdge(e) + " --- " + u.getSecondNameFromEdge(e)); myRoute = addUniqueInList(myRoute, u.getFirstNameFromEdge(e)); myRoute = addUniqueInList(myRoute, u.getSecondNameFromEdge(e)); ++iter; checkMax(iter); } if(lEdges.size() > 0) lKeywords.remove(0); else { myRoute = addUniqueInList(myRoute, (String)lKeywords.get(0)); lKeywords.remove(0); ////System.out.println("Path length is 0"); } } prevLen = len; len = lKeywords.size(); } return myRoute; } public ArrayList sortVertexs(ArrayList lKeywords) throws Exception { //System.out.println("INSIDE sortVertexs"); int len = lKeywords.size(); String leaf = ""; int iter = 0; for(int i = 0 ; i != len ; ++i ) { ++iter; checkMax(iter); String aVertex = (String)lKeywords.get(i); if(isLeaf(aVertex, lKeywords)) { leaf = aVertex; break; } } //System.out.println("leaf " + leaf); if(leaf.equals("")) leaf = (String)lKeywords.get(0); //System.out.println("leaf again " + leaf); ArrayList toReturn = new ArrayList(); toReturn.add(leaf); int reps = -1; while( toReturn.size() != len) { ++reps; ++iter; checkMax(iter); for(int j = 0 ; j != len ; ++j ) { ++iter; checkMax(iter); String aVertex = (String)lKeywords.get(j); if(!aVertex.equals(leaf)) { if(!isIn(toReturn, aVertex)) { if(isLeaf(aVertex, lKeywords)) { //System.out.println(aVertex + " is a leaf toreturn size " + toReturn.size() + " len -1 " + (len - 1)); //if(toReturn.size() ==1) System.out.println("toReturn.0 " + (String)toReturn.get(0)); if(toReturn.size() == (len - 1)) toReturn = addUniqueInList(toReturn, aVertex); else if(reps > len) { toReturn = addUniqueInList(toReturn, aVertex); //System.out.println("adding " + aVertex); } } else { for (int k = (toReturn.size() - 1) ; k != -1 ; --k) { ++iter; checkMax(iter); //System.out.println("Cheking edge between " + (String)toReturn.get(k) + " and " + aVertex); if(u.doesEdgeExist((String)toReturn.get(k), aVertex)) { toReturn = addUniqueInList(toReturn, aVertex); break; } else { if(reps > len) toReturn = addUniqueInList(toReturn, aVertex); //System.out.println("no edge between " + (String)toReturn.get(k) + " and " + aVertex); } } } } } } } return toReturn; } private boolean isLeaf(String aVertex, ArrayList lKeyword) throws Exception { int count = 0; Set s = u.getVertex(aVertex).getNeighbors(); int iter = 0; for (Iterator eIt = s.iterator(); eIt.hasNext(); ) { ++iter; checkMax(iter); String neighbor = u.getRealFromVertex((Vertex) eIt.next()); //System.out.println("neighbour " + neighbor); if(isIn(lKeyword, neighbor)) ++count; } if(count == 1) return true; return false; } public static void main(String args[]) throws Exception{ QueryBuilder qb = new QueryBuilder("oracle"); ArrayList tmp = new ArrayList(); /*GraphUtil u = GraphUtil.getInstance("/home/sekhri/DBS/Servers/JavaServer/etc/DBSSchemaGraph.xml"); List<Edge> lEdges = u.getShortestPath("ProcessedDataset", "LumiSection"); for (Edge e: lEdges) { System.out.println("PATH " + u.getFirstNameFromEdge(e) + " --- " + u.getSecondNameFromEdge(e)); }*/ //tmp.add("PrimaryDataset"); tmp.add("file"); System.out.println(qb.genQuery(tmp, new ArrayList(),new ArrayList(),"", "4", "10", true)); //tmp.add("Runs"); //tmp.add("FileRunLumi"); //tmp.add("ProcessedDataset"); //tmp.add("FileType"); //tmp.add("ProcDSRuns"); /*tmp = qb.sortVertexs(tmp); //tmp = qb.makeCompleteListOfVertexs(tmp); for (int i =0 ; i!=tmp.size() ;++i ) { System.out.println("ID " + tmp.get(i)); }*/ } }
true
true
public String genQuery(ArrayList kws, ArrayList cs, ArrayList okws, String orderingkw, String begin, String end, boolean upper) throws Exception{ this.upper = upper; //Store all the keywors both from select and where in allKws fixConstForLike(cs); String personJoinQuery = ""; String parentJoinQuery = ""; String childJoinQuery = ""; String pathParentWhereQuery = ""; String groupByQuery = ""; String sumGroupByQuery = ""; String sumQuery = ""; String selectStr = "SELECT "; boolean invalidFile = false; boolean modByAdded = false; boolean createByAdded = false; boolean fileParentAdded = false; boolean fileChildAdded = false; boolean datasetParentAdded = false; boolean procDsParentAdded = false; boolean iLumi = isInList(kws, "ilumi"); boolean countPresent = false; boolean sumPresent = false; int iter = 0; ArrayList allKws = new ArrayList(); if(isInList(kws, "file") || isInList(kws, "file.status")) { invalidFile = true; allKws = addUniqueInList(allKws, "FileStatus"); } for (int i =0 ; i!= kws.size(); ++i) { ++iter; checkMax(iter); String aKw = (String)kws.get(i); if(aKw.toLowerCase().startsWith("count") || aKw.toLowerCase().endsWith("count")) countPresent = true; if(aKw.toLowerCase().startsWith("sum")) sumPresent = true; } if(sumPresent || countPresent) sumQuery += selectStr; String query = "SELECT DISTINCT \n\t"; for (int i =0 ; i!= kws.size(); ++i) { ++iter; checkMax(iter); String aKw = (String)kws.get(i); if (i!=0) query += "\n\t,"; //If path supplied in select then always use block path. If supplied in where then user procDS ID if(Util.isSame(aKw, "ilumi")) { query += getIntLumiSelectQuery(); //System.out.println("line 2.1.1"); } else if(aKw.toLowerCase().startsWith("sum")) { checkMax(iter); aKw = aKw.toLowerCase(); String keyword = aKw.substring(aKw.indexOf("(") + 1, aKw.indexOf(")")); keyword = keyword.trim(); String asKeyword = keyword.replace('.', '_'); String entity = (new StringTokenizer(keyword, ".")).nextToken(); //System.out.println("entity " + entity); String realName = u.getMappedRealName(entity); allKws = addUniqueInList(allKws, realName); //if(!sumQuery.startsWith("SELECT")) sumQuery += " SELECT "; if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += "SUM(" + asKeyword + ") AS SUM_" + asKeyword + " "; //query += "SUM(" + km.getMappedValue(keyword, true) + ") AS SUM_" + keyword.replace('.', '_') ; String tmpKw = km.getMappedValue(keyword, true); query += tmpKw + " AS " + asKeyword ; if(iLumi) groupByQuery += tmpKw + ","; String tmp = makeQueryFromDefaults(u.getMappedVertex(entity)); tmp = tmp.substring(0, tmp.length() - 1); // To get rid of last space query += "\n\t," + tmp + "_SUM "; } else if(aKw.toLowerCase().startsWith("count")) { checkMax(iter); aKw = aKw.toLowerCase(); String entity = aKw.substring(aKw.indexOf("(") + 1, aKw.indexOf(")")); entity = entity.trim(); //System.out.println("entity = " + entity); String realName = u.getMappedRealName(entity); allKws = addUniqueInList(allKws, realName); String defaultStr = u.getDefaultFromVertex(u.getVertex(realName)); if(defaultStr.indexOf(",") != -1) throw new Exception("Cannot use count(" + entity + ")"); //query += "COUNT(DISTINCT " + realName + "." + defaultStr + ") AS COUNT"; query += realName + "." + defaultStr + " AS COUNT_SUB_" + realName; if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; //if(sumQuery.length() != 0) sumQuery += ",\n\t"; //else if(!sumQuery.startsWith("SELECT")) sumQuery += "SELECT "; sumQuery += "COUNT(DISTINCT COUNT_SUB_" + realName + ") AS COUNT_" + realName; /*if(sumPresent) { sumQuery += ",\n\t COUNT AS COUNT"; sumGroupByQuery += " COUNT ,"; }*/ } else if(Util.isSame(aKw, "dataset")) { checkMax(iter); allKws = addUniqueInList(allKws, "Block"); query += "Block.Path AS PATH"; if(iLumi) groupByQuery += "Block.Path,"; if(sumPresent || countPresent) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += " PATH AS PATH"; sumGroupByQuery += " PATH ,"; } } else { //System.out.println("line 2.2"); if(iLumi && (i < 2) ) { allKws = addUniqueInList(allKws, "Runs"); allKws = addUniqueInList(allKws, "LumiSection"); checkMax(iter); } StringTokenizer st = new StringTokenizer(aKw, "."); int count = st.countTokens(); String token = st.nextToken(); Vertex vFirst = u.getMappedVertex(token); String real = u.getRealFromVertex(vFirst); allKws = addUniqueInList(allKws, real); //System.out.println("line 4"); //if(Util.isSame(real, "LumiSection")) allKws = addUniqueInList(allKws, "Runs"); if(count == 1) { //Get default from vertex //System.out.println("line 5"); checkMax(iter); String tmp = makeQueryFromDefaults(vFirst); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(vFirst); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect; sumGroupByQuery += toSelect + ","; } } } else { //System.out.println("line 6"); checkMax(iter); boolean addQuery = true; String token2 = st.nextToken(); String tmpTableName = token + "_" + token2; /*if(Util.isSame(token2, "algo")) { allKws = addUniqueInList(allKws, "AppFamily"); allKws = addUniqueInList(allKws, "AppVersion"); allKws = addUniqueInList(allKws, "AppExecutable"); allKws = addUniqueInList(allKws, "QueryableParameterSet"); query += makeQueryFromDefaults(u.getVertex("AppFamily")); query += makeQueryFromDefaults(u.getVertex("AppVersion")); query += makeQueryFromDefaults(u.getVertex("AppExecutable")); query += makeQueryFromDefaults(u.getVertex("QueryableParameterSet")); adQuery = false; }*/ if(Util.isSame(token2, "release") || Util.isSame(token2, "tier")) { checkMax(iter); String realName = u.getMappedRealName(token2);//AppVersion allKws = addUniqueInList(allKws, realName); String tmp = makeQueryFromDefaults(u.getVertex(realName)); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(u.getVertex(realName)); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token, "release") || Util.isSame(token, "tier")) { checkMax(iter); String realName = u.getMappedRealName(token);//AppVersion allKws = addUniqueInList(allKws, realName); String tmp = makeQueryFromDefaults(u.getVertex(realName)); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(u.getVertex(realName)); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "count")) { checkMax(iter); String realName = u.getMappedRealName(token); String defaultStr = u.getDefaultFromVertex(u.getVertex(realName)); if(defaultStr.indexOf(",") != -1) throw new Exception("Cannot use count(" + token + ")"); query += realName + "." + defaultStr + " AS COUNT_SUB_" + realName; if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; //if(sumQuery.length() != 0) sumQuery += ",\n\t"; //else sumQuery += "SELECT "; sumQuery += "COUNT(DISTINCT COUNT_SUB_" + realName + ") AS COUNT_" + realName; /*query += "COUNT(DISTINCT " + realName + "." + defaultStr + ") AS COUNT"; if(sumPresent) { sumQuery += ",\n\t COUNT AS COUNT "; sumGroupByQuery += " COUNT ,"; }*/ addQuery = false; } if(Util.isSame(token2, "modby") || Util.isSame(token2, "createby")) { checkMax(iter); boolean dontJoin = false; String personField = "CreatedBy"; if(Util.isSame(token2, "modby")) { if(modByAdded) dontJoin = true; modByAdded = true; personField = "LastModifiedBy"; } else { if(createByAdded) dontJoin = true; createByAdded = true; } //String tmpTableName = token + "_" + token2; if(!dontJoin) { personJoinQuery += "\tJOIN " + owner() + "Person " + tmpTableName + "\n" + "\t\tON " + real + "." + personField + " = " + tmpTableName + ".ID\n"; } String fqName = tmpTableName + ".DistinguishedName"; query += fqName + makeAs(tmpTableName + "_DN"); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += tmpTableName + "_DN AS " + tmpTableName + "_DN "; sumGroupByQuery += tmpTableName + "_DN ,"; } addQuery = false; } //if(Util.isSame(token2, "evnum") && Util.isSame(token, "file")) { // throw new Exception("You can find file based on file.evnum (find file where file.evenum = blah) but cannot find file.evnum"); //} if(Util.isSame(token2, "evnum") && Util.isSame(token, "lumi")) { throw new Exception("You can find lumi based on lumi.evnum (find lumi where lumi.evenum = blah) but cannot find lumi.evnum"); } if(Util.isSame(token2, "parent") && Util.isSame(token, "file")) { checkMax(iter); boolean dontJoin = false; if(fileParentAdded) dontJoin = true; fileParentAdded = true; if(!dontJoin) parentJoinQuery += handleParent(tmpTableName, "Files", "FileParentage"); String fqName = tmpTableName + ".LogicalFileName"; query += fqName + makeAs(fqName); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(fqName)) + " "; if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "child") && Util.isSame(token, "file")) { checkMax(iter); boolean dontJoin = false; if(fileChildAdded) dontJoin = true; fileChildAdded = true; //System.out.println("childJoinQuery " + childJoinQuery+ " dontJoin " + dontJoin); if(!dontJoin) childJoinQuery += handleChild(tmpTableName, "Files", "FileParentage"); String fqName = tmpTableName + ".LogicalFileName"; query += fqName + makeAs(fqName); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(fqName)) + " "; if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "parent") && Util.isSame(token, "procds")) { checkMax(iter); boolean dontJoin = false; if(procDsParentAdded) dontJoin = true; procDsParentAdded = true; if(!dontJoin) parentJoinQuery += handleParent(tmpTableName, "ProcessedDataset", "ProcDSParent"); String fqName = tmpTableName + ".Name"; query += fqName + makeAs(fqName); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(fqName)) + " "; if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "parent") && Util.isSame(token, "dataset")) { //System.out.println("line 8"); checkMax(iter); allKws = addUniqueInList(allKws, "Block"); boolean dontJoin = false; if(datasetParentAdded) dontJoin = true; datasetParentAdded = true; if(!dontJoin) pathParentWhereQuery += handlePathParent(); String fqName = "Block.Path AS Dataset_Parent"; query += fqName; if(iLumi) groupByQuery += "Block.Path ,"; if(sumPresent || countPresent) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += " Dataset_Parent AS Dataset_Parent "; sumGroupByQuery += " Dataset_Parent ,"; } addQuery = false; } if(Util.isSame(token, "dataset")) { checkMax(iter); allKws = addUniqueInList(allKws, "ProcessedDataset"); } Vertex vCombined = u.getMappedVertex(aKw); //System.out.println("\n\n---Changing vCombined " + aKw); if(vCombined == null) { //System.out.println("IT is NULLLLLLLLLLLLL"); checkMax(iter); if(addQuery) { String mapVal = km.getMappedValue(aKw, true); //if(mapVal.equals(aKw)) throw new Exception("The keyword " + aKw + " not yet implemented in Query Builder" ); query += mapVal + makeAs(mapVal); if(iLumi) groupByQuery += mapVal + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(mapVal)); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } } } else { //System.out.println("in ELSE ---> u.getRealFromVertex " + u.getRealFromVertex(vCombined)); allKws = addUniqueInList(allKws, u.getRealFromVertex(vCombined)); checkMax(iter); if(addQuery) { checkMax(iter); String tmp = makeQueryFromDefaults(vCombined); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(vCombined); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } } } } }
public String genQuery(ArrayList kws, ArrayList cs, ArrayList okws, String orderingkw, String begin, String end, boolean upper) throws Exception{ this.upper = upper; //Store all the keywors both from select and where in allKws fixConstForLike(cs); String personJoinQuery = ""; String parentJoinQuery = ""; String childJoinQuery = ""; String pathParentWhereQuery = ""; String groupByQuery = ""; String sumGroupByQuery = ""; String sumQuery = ""; String selectStr = "SELECT "; boolean invalidFile = false; boolean modByAdded = false; boolean createByAdded = false; boolean fileParentAdded = false; boolean fileChildAdded = false; boolean datasetParentAdded = false; boolean procDsParentAdded = false; boolean iLumi = isInList(kws, "ilumi"); boolean countPresent = false; boolean sumPresent = false; int iter = 0; ArrayList allKws = new ArrayList(); if(isInList(kws, "file") || isInList(kws, "file.status")) { invalidFile = true; allKws = addUniqueInList(allKws, "FileStatus"); } for (int i =0 ; i!= kws.size(); ++i) { ++iter; checkMax(iter); String aKw = (String)kws.get(i); if(aKw.toLowerCase().startsWith("count") || aKw.toLowerCase().endsWith("count")) countPresent = true; if(aKw.toLowerCase().startsWith("sum")) sumPresent = true; } if(sumPresent || countPresent) sumQuery += selectStr; String query = "SELECT DISTINCT \n\t"; // If requested CLOB data, such as QueryableParameterSet.Content // we should not either converted it to string data type if (isInList(kws, "config.content") ) { query = "SELECT \n\t"; } for (int i =0 ; i!= kws.size(); ++i) { ++iter; checkMax(iter); String aKw = (String)kws.get(i); if (i!=0) query += "\n\t,"; //If path supplied in select then always use block path. If supplied in where then user procDS ID if(Util.isSame(aKw, "ilumi")) { query += getIntLumiSelectQuery(); //System.out.println("line 2.1.1"); } else if(aKw.toLowerCase().startsWith("sum")) { checkMax(iter); aKw = aKw.toLowerCase(); String keyword = aKw.substring(aKw.indexOf("(") + 1, aKw.indexOf(")")); keyword = keyword.trim(); String asKeyword = keyword.replace('.', '_'); String entity = (new StringTokenizer(keyword, ".")).nextToken(); //System.out.println("entity " + entity); String realName = u.getMappedRealName(entity); allKws = addUniqueInList(allKws, realName); //if(!sumQuery.startsWith("SELECT")) sumQuery += " SELECT "; if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += "SUM(" + asKeyword + ") AS SUM_" + asKeyword + " "; //query += "SUM(" + km.getMappedValue(keyword, true) + ") AS SUM_" + keyword.replace('.', '_') ; String tmpKw = km.getMappedValue(keyword, true); query += tmpKw + " AS " + asKeyword ; if(iLumi) groupByQuery += tmpKw + ","; String tmp = makeQueryFromDefaults(u.getMappedVertex(entity)); tmp = tmp.substring(0, tmp.length() - 1); // To get rid of last space query += "\n\t," + tmp + "_SUM "; } else if(aKw.toLowerCase().startsWith("count")) { checkMax(iter); aKw = aKw.toLowerCase(); String entity = aKw.substring(aKw.indexOf("(") + 1, aKw.indexOf(")")); entity = entity.trim(); //System.out.println("entity = " + entity); String realName = u.getMappedRealName(entity); allKws = addUniqueInList(allKws, realName); String defaultStr = u.getDefaultFromVertex(u.getVertex(realName)); if(defaultStr.indexOf(",") != -1) throw new Exception("Cannot use count(" + entity + ")"); //query += "COUNT(DISTINCT " + realName + "." + defaultStr + ") AS COUNT"; query += realName + "." + defaultStr + " AS COUNT_SUB_" + realName; if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; //if(sumQuery.length() != 0) sumQuery += ",\n\t"; //else if(!sumQuery.startsWith("SELECT")) sumQuery += "SELECT "; sumQuery += "COUNT(DISTINCT COUNT_SUB_" + realName + ") AS COUNT_" + realName; /*if(sumPresent) { sumQuery += ",\n\t COUNT AS COUNT"; sumGroupByQuery += " COUNT ,"; }*/ } else if(Util.isSame(aKw, "dataset")) { checkMax(iter); allKws = addUniqueInList(allKws, "Block"); query += "Block.Path AS PATH"; if(iLumi) groupByQuery += "Block.Path,"; if(sumPresent || countPresent) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += " PATH AS PATH"; sumGroupByQuery += " PATH ,"; } } else { //System.out.println("line 2.2"); if(iLumi && (i < 2) ) { allKws = addUniqueInList(allKws, "Runs"); allKws = addUniqueInList(allKws, "LumiSection"); checkMax(iter); } StringTokenizer st = new StringTokenizer(aKw, "."); int count = st.countTokens(); String token = st.nextToken(); Vertex vFirst = u.getMappedVertex(token); String real = u.getRealFromVertex(vFirst); allKws = addUniqueInList(allKws, real); //System.out.println("line 4"); //if(Util.isSame(real, "LumiSection")) allKws = addUniqueInList(allKws, "Runs"); if(count == 1) { //Get default from vertex //System.out.println("line 5"); checkMax(iter); String tmp = makeQueryFromDefaults(vFirst); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(vFirst); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect; sumGroupByQuery += toSelect + ","; } } } else { //System.out.println("line 6"); checkMax(iter); boolean addQuery = true; String token2 = st.nextToken(); String tmpTableName = token + "_" + token2; /*if(Util.isSame(token2, "algo")) { allKws = addUniqueInList(allKws, "AppFamily"); allKws = addUniqueInList(allKws, "AppVersion"); allKws = addUniqueInList(allKws, "AppExecutable"); allKws = addUniqueInList(allKws, "QueryableParameterSet"); query += makeQueryFromDefaults(u.getVertex("AppFamily")); query += makeQueryFromDefaults(u.getVertex("AppVersion")); query += makeQueryFromDefaults(u.getVertex("AppExecutable")); query += makeQueryFromDefaults(u.getVertex("QueryableParameterSet")); adQuery = false; }*/ if(Util.isSame(token2, "release") || Util.isSame(token2, "tier")) { checkMax(iter); String realName = u.getMappedRealName(token2);//AppVersion allKws = addUniqueInList(allKws, realName); String tmp = makeQueryFromDefaults(u.getVertex(realName)); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(u.getVertex(realName)); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token, "release") || Util.isSame(token, "tier")) { checkMax(iter); String realName = u.getMappedRealName(token);//AppVersion allKws = addUniqueInList(allKws, realName); String tmp = makeQueryFromDefaults(u.getVertex(realName)); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(u.getVertex(realName)); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "count")) { checkMax(iter); String realName = u.getMappedRealName(token); String defaultStr = u.getDefaultFromVertex(u.getVertex(realName)); if(defaultStr.indexOf(",") != -1) throw new Exception("Cannot use count(" + token + ")"); query += realName + "." + defaultStr + " AS COUNT_SUB_" + realName; if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; //if(sumQuery.length() != 0) sumQuery += ",\n\t"; //else sumQuery += "SELECT "; sumQuery += "COUNT(DISTINCT COUNT_SUB_" + realName + ") AS COUNT_" + realName; /*query += "COUNT(DISTINCT " + realName + "." + defaultStr + ") AS COUNT"; if(sumPresent) { sumQuery += ",\n\t COUNT AS COUNT "; sumGroupByQuery += " COUNT ,"; }*/ addQuery = false; } if(Util.isSame(token2, "modby") || Util.isSame(token2, "createby")) { checkMax(iter); boolean dontJoin = false; String personField = "CreatedBy"; if(Util.isSame(token2, "modby")) { if(modByAdded) dontJoin = true; modByAdded = true; personField = "LastModifiedBy"; } else { if(createByAdded) dontJoin = true; createByAdded = true; } //String tmpTableName = token + "_" + token2; if(!dontJoin) { personJoinQuery += "\tJOIN " + owner() + "Person " + tmpTableName + "\n" + "\t\tON " + real + "." + personField + " = " + tmpTableName + ".ID\n"; } String fqName = tmpTableName + ".DistinguishedName"; query += fqName + makeAs(tmpTableName + "_DN"); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += tmpTableName + "_DN AS " + tmpTableName + "_DN "; sumGroupByQuery += tmpTableName + "_DN ,"; } addQuery = false; } //if(Util.isSame(token2, "evnum") && Util.isSame(token, "file")) { // throw new Exception("You can find file based on file.evnum (find file where file.evenum = blah) but cannot find file.evnum"); //} if(Util.isSame(token2, "evnum") && Util.isSame(token, "lumi")) { throw new Exception("You can find lumi based on lumi.evnum (find lumi where lumi.evenum = blah) but cannot find lumi.evnum"); } if(Util.isSame(token2, "parent") && Util.isSame(token, "file")) { checkMax(iter); boolean dontJoin = false; if(fileParentAdded) dontJoin = true; fileParentAdded = true; if(!dontJoin) parentJoinQuery += handleParent(tmpTableName, "Files", "FileParentage"); String fqName = tmpTableName + ".LogicalFileName"; query += fqName + makeAs(fqName); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(fqName)) + " "; if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "child") && Util.isSame(token, "file")) { checkMax(iter); boolean dontJoin = false; if(fileChildAdded) dontJoin = true; fileChildAdded = true; //System.out.println("childJoinQuery " + childJoinQuery+ " dontJoin " + dontJoin); if(!dontJoin) childJoinQuery += handleChild(tmpTableName, "Files", "FileParentage"); String fqName = tmpTableName + ".LogicalFileName"; query += fqName + makeAs(fqName); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(fqName)) + " "; if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "parent") && Util.isSame(token, "procds")) { checkMax(iter); boolean dontJoin = false; if(procDsParentAdded) dontJoin = true; procDsParentAdded = true; if(!dontJoin) parentJoinQuery += handleParent(tmpTableName, "ProcessedDataset", "ProcDSParent"); String fqName = tmpTableName + ".Name"; query += fqName + makeAs(fqName); if(iLumi) groupByQuery += fqName + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(fqName)) + " "; if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } addQuery = false; } if(Util.isSame(token2, "parent") && Util.isSame(token, "dataset")) { //System.out.println("line 8"); checkMax(iter); allKws = addUniqueInList(allKws, "Block"); boolean dontJoin = false; if(datasetParentAdded) dontJoin = true; datasetParentAdded = true; if(!dontJoin) pathParentWhereQuery += handlePathParent(); String fqName = "Block.Path AS Dataset_Parent"; query += fqName; if(iLumi) groupByQuery += "Block.Path ,"; if(sumPresent || countPresent) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += " Dataset_Parent AS Dataset_Parent "; sumGroupByQuery += " Dataset_Parent ,"; } addQuery = false; } if(Util.isSame(token, "dataset")) { checkMax(iter); allKws = addUniqueInList(allKws, "ProcessedDataset"); } Vertex vCombined = u.getMappedVertex(aKw); //System.out.println("\n\n---Changing vCombined " + aKw); if(vCombined == null) { //System.out.println("IT is NULLLLLLLLLLLLL"); checkMax(iter); if(addQuery) { String mapVal = km.getMappedValue(aKw, true); //if(mapVal.equals(aKw)) throw new Exception("The keyword " + aKw + " not yet implemented in Query Builder" ); query += mapVal + makeAs(mapVal); if(iLumi) groupByQuery += mapVal + ","; if(sumPresent || countPresent) { String toSelect = makeSumSelect(makeAs(mapVal)); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } } } else { //System.out.println("in ELSE ---> u.getRealFromVertex " + u.getRealFromVertex(vCombined)); allKws = addUniqueInList(allKws, u.getRealFromVertex(vCombined)); checkMax(iter); if(addQuery) { checkMax(iter); String tmp = makeQueryFromDefaults(vCombined); query += tmp; if(iLumi) groupByQuery += makeGroupQueryFromDefaults(vCombined); if(sumPresent || countPresent) { String toSelect = makeSumSelect(tmp); if(toSelect.length() != 0) { if(!sumQuery.equals(selectStr)) sumQuery += ",\n\t"; sumQuery += toSelect + " AS " + toSelect + " "; sumGroupByQuery += toSelect + ","; } } } } } }
diff --git a/GAE/src/com/gallatinsystems/survey/dao/SurveyDAO.java b/GAE/src/com/gallatinsystems/survey/dao/SurveyDAO.java index 08b201479..1c4334c46 100644 --- a/GAE/src/com/gallatinsystems/survey/dao/SurveyDAO.java +++ b/GAE/src/com/gallatinsystems/survey/dao/SurveyDAO.java @@ -1,282 +1,282 @@ /* * Copyright (C) 2010-2013 Stichting Akvo (Akvo Foundation) * * This file is part of Akvo FLOW. * * Akvo FLOW is free software: you can redistribute it and modify it under the terms of * the GNU Affero General Public License (AGPL) as published by the Free Software Foundation, * either version 3 of the License or any later version. * * Akvo FLOW is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Affero General Public License included below for more details. * * The full license text can also be seen at <http://www.gnu.org/licenses/agpl.html>. */ package com.gallatinsystems.survey.dao; import java.util.List; import java.util.Map; import java.util.logging.Level; import java.util.logging.Logger; import javax.jdo.PersistenceManager; import javax.xml.bind.JAXBException; import org.waterforpeople.mapping.dao.QuestionAnswerStoreDao; import org.waterforpeople.mapping.domain.SurveyQuestion; import com.gallatinsystems.framework.dao.BaseDAO; import com.gallatinsystems.framework.exceptions.IllegalDeletionException; import com.gallatinsystems.framework.servlet.PersistenceFilter; import com.gallatinsystems.survey.domain.QuestionGroup; import com.gallatinsystems.survey.domain.Survey; import com.gallatinsystems.survey.domain.SurveyContainer; import com.gallatinsystems.survey.domain.SurveyGroup; import com.gallatinsystems.survey.xml.SurveyXMLAdapter; import com.google.appengine.api.datastore.Key; /** * Dao for manipulating survey objects * * */ public class SurveyDAO extends BaseDAO<Survey> { private static final Logger log = Logger.getLogger(SurveyDAO.class .getName()); private QuestionGroupDao questionGroupDao; public SurveyDAO() { super(Survey.class); questionGroupDao = new QuestionGroupDao(); } public SurveyGroup save(SurveyGroup surveyGroup) { return super.save(surveyGroup); } public Survey save(Survey survey, Key surveyGroupKey) { survey = super.save(survey); return survey; } public Survey getById(Long key) { return super.getByKey(key); } public Survey getByKey(Key key) { return super.getByKey(key); } /** * loads a full survey object (whole object graph, including questions). * This method can only be called reliably from a background task or backend * * @param id * @return */ public Survey loadFullSurvey(Long surveyId) { Survey survey = getById(surveyId); survey.setQuestionGroupMap(questionGroupDao .listQuestionGroupsBySurvey(survey.getKey().getId())); return survey; } /** * saves a surveyContainer containing the xml representation of the survey * document. * * @param surveyId * @param surveyDocument * @return */ public Long save(Long surveyId, String surveyDocument) { SurveyContainer sc = new SurveyContainer(); sc.setSurveyId(surveyId); com.google.appengine.api.datastore.Text surveyText = new com.google.appengine.api.datastore.Text( surveyDocument); sc.setSurveyDocument(surveyText); sc = super.save(sc); return sc.getKey().getId(); } /** * returns a Survey xml pojo obtained after unmarshalling the * SurveyContainer * * @param id * @return */ public com.gallatinsystems.survey.domain.xml.Survey get(Long id) { SurveyContainer surveyContainer = getByKey(id, SurveyContainer.class); SurveyXMLAdapter sxa = new SurveyXMLAdapter(); com.gallatinsystems.survey.domain.xml.Survey survey = null; try { survey = sxa.unmarshall(surveyContainer.getSurveyDocument() .toString()); } catch (JAXBException e) { log.log(Level.SEVERE, "Could not unmarshal xml", e); } return survey; } /** * gets a document from the surveyContainer * * @param id * @return */ public String getSurveyDocument(Long id) { SurveyContainer surveyContainer = getByKey(id, SurveyContainer.class); return surveyContainer.getSurveyDocument().getValue(); } /** * lists all survey container objects * * @return */ public List<SurveyContainer> listSurveyContainers() { return list(SurveyContainer.class, "all"); } /** * lists all questions of a given type (across all surveys) */ public List<SurveyQuestion> listQuestionByType(String questionType) { return listByProperty("type", questionType, "String", SurveyQuestion.class); } /** * lists all survey groups * * @param cursorString * @return */ public List<SurveyGroup> listSurveyGroup(String cursorString) { return list(SurveyGroup.class, cursorString); } /** * lists all surveys in a given surveyGroup * * @param surveyGroupId * @return */ public List<Survey> listSurveysByGroup(Long surveyGroupId) { return listByProperty("surveyGroupId", surveyGroupId, "Long"); } /** * gets a survey by the surveyGroupId and survey code * * @param code * @param surveyGroupId * @return */ @SuppressWarnings("unchecked") public Survey getByParentIdAndCode(String code, Long surveyGroupId) { PersistenceManager pm = PersistenceFilter.getManager(); javax.jdo.Query query = pm.newQuery(Survey.class); query.setFilter(" code == codeParam && surveyGroupId == idParam"); query.declareParameters("String codeParam, Long idParam"); List<Survey> results = (List<Survey>) query .execute(code, surveyGroupId); if (results != null && results.size() > 0) { return results.get(0); } else { return null; } } /** * gets a single survey by code and path. path is defined as * "surveyGroupName" * * @param code * @param path * @return */ @SuppressWarnings("unchecked") public Survey getByPath(String code, String path) { PersistenceManager pm = PersistenceFilter.getManager(); javax.jdo.Query query = pm.newQuery(Survey.class); query.setFilter(" path == pathParam && name == codeParam"); query.declareParameters("String pathParam, String codeParam"); List<Survey> results = (List<Survey>) query.execute(path, code); if (results != null && results.size() > 0) { return results.get(0); } else { return null; } } /** * increments the survey version by 1 * * @param surveyId */ public void incrementVersion(Long surveyId) { Survey s = getByKey(surveyId); if (s != null) { Double v = s.getVersion(); if (v == null) { v = new Double(2); } else { v++; } s.setVersion(v); save(s); } } /** * deletes a survey and spawns delete questionGroup tasks to delete all * children asynchronously. * * @param item * @throws IllegalDeletionException * - if the system contains responses for this survey */ public void delete(Survey item) throws IllegalDeletionException { // Check to see if there are any surveys for this first item = getByKey(item.getKey()); QuestionAnswerStoreDao qasDao = new QuestionAnswerStoreDao(); if (qasDao.listBySurvey(new Long(item.getKey().getId())).size() == 0) { QuestionGroupDao qgDao = new QuestionGroupDao(); for (Map.Entry<Integer, QuestionGroup> qgItem : qgDao .listQuestionGroupsBySurvey(item.getKey().getId()) .entrySet()) { SurveyTaskUtil.spawnDeleteTask("deleteQuestionGroup", qgItem .getValue().getKey().getId()); } super.delete(item); } else { throw new IllegalDeletionException( "Cannot delete surveyId: " + item.getKey().getId() + " surveyCode:" + item.getCode() - + " because there is a QuestionAnswerStore value for this survey. Please delete all survey response first"); + + " because there are already survey responses for this survey. Please delete all survey responses first"); } } /** * lists all survey ids * * @return */ @SuppressWarnings("unchecked") public List<Key> listSurveyIds() { PersistenceManager pm = PersistenceFilter.getManager(); javax.jdo.Query query = pm.newQuery("select key from " + Survey.class.getName()); List<Key> results = (List<Key>) query.execute(); return results; } }
true
true
public void delete(Survey item) throws IllegalDeletionException { // Check to see if there are any surveys for this first item = getByKey(item.getKey()); QuestionAnswerStoreDao qasDao = new QuestionAnswerStoreDao(); if (qasDao.listBySurvey(new Long(item.getKey().getId())).size() == 0) { QuestionGroupDao qgDao = new QuestionGroupDao(); for (Map.Entry<Integer, QuestionGroup> qgItem : qgDao .listQuestionGroupsBySurvey(item.getKey().getId()) .entrySet()) { SurveyTaskUtil.spawnDeleteTask("deleteQuestionGroup", qgItem .getValue().getKey().getId()); } super.delete(item); } else { throw new IllegalDeletionException( "Cannot delete surveyId: " + item.getKey().getId() + " surveyCode:" + item.getCode() + " because there is a QuestionAnswerStore value for this survey. Please delete all survey response first"); } }
public void delete(Survey item) throws IllegalDeletionException { // Check to see if there are any surveys for this first item = getByKey(item.getKey()); QuestionAnswerStoreDao qasDao = new QuestionAnswerStoreDao(); if (qasDao.listBySurvey(new Long(item.getKey().getId())).size() == 0) { QuestionGroupDao qgDao = new QuestionGroupDao(); for (Map.Entry<Integer, QuestionGroup> qgItem : qgDao .listQuestionGroupsBySurvey(item.getKey().getId()) .entrySet()) { SurveyTaskUtil.spawnDeleteTask("deleteQuestionGroup", qgItem .getValue().getKey().getId()); } super.delete(item); } else { throw new IllegalDeletionException( "Cannot delete surveyId: " + item.getKey().getId() + " surveyCode:" + item.getCode() + " because there are already survey responses for this survey. Please delete all survey responses first"); } }
diff --git a/assets/src/main/java/services/AlbumAgregator.java b/assets/src/main/java/services/AlbumAgregator.java index cbe09b9..07dbb94 100644 --- a/assets/src/main/java/services/AlbumAgregator.java +++ b/assets/src/main/java/services/AlbumAgregator.java @@ -1,71 +1,79 @@ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package services; import java.util.ArrayList; import java.util.List; import org.osoa.sca.annotations.Init; import org.osoa.sca.annotations.Reference; import com.google.gdata.data.Link; public class AlbumAgregator implements Album { private List<String> pictures = new ArrayList<String>(); @Reference(required=false) protected Album album; @Reference(required=false) protected org.apache.tuscany.sca.binding.atom.collection.Collection albumFeed; @Reference(required=false) protected org.apache.tuscany.sca.binding.gdata.collection.Collection albumPicassa; @Init public void init() { if(album != null) { for(String picture : album.getPictures()) { pictures.add(picture); } } if (albumFeed != null) { - for(org.apache.abdera.model.Entry feedPicture : albumFeed.getFeed().getEntries()) { - String feedImageLink = feedPicture.getEnclosureLinkResolvedHref().toString(); - pictures.add(feedImageLink); - } + try { + for(org.apache.abdera.model.Entry feedPicture : albumFeed.getFeed().getEntries()) { + String feedImageLink = feedPicture.getEnclosureLinkResolvedHref().toString(); + pictures.add(feedImageLink); + } + }catch (Exception e) { + //log exception, warn user that album xxx was not processed (not found) + } } if( albumPicassa != null) { - for(com.google.gdata.data.Entry picassaPicture : albumPicassa.getFeed().getEntries()) { - String feedImageLink = picassaPicture.getLink(Link.Rel.MEDIA_EDIT, null).getHref(); - pictures.add(feedImageLink); - } + try { + for(com.google.gdata.data.Entry picassaPicture : albumPicassa.getFeed().getEntries()) { + String feedImageLink = picassaPicture.getLink(Link.Rel.MEDIA_EDIT, null).getHref(); + pictures.add(feedImageLink); + } + }catch (Exception e) { + //log exception, warn user that album xxx was not processed (not found) + } } } public String[] getPictures() { String[] pictureArray = new String[pictures.size()]; pictures.toArray(pictureArray); return pictureArray; } }
false
true
public void init() { if(album != null) { for(String picture : album.getPictures()) { pictures.add(picture); } } if (albumFeed != null) { for(org.apache.abdera.model.Entry feedPicture : albumFeed.getFeed().getEntries()) { String feedImageLink = feedPicture.getEnclosureLinkResolvedHref().toString(); pictures.add(feedImageLink); } } if( albumPicassa != null) { for(com.google.gdata.data.Entry picassaPicture : albumPicassa.getFeed().getEntries()) { String feedImageLink = picassaPicture.getLink(Link.Rel.MEDIA_EDIT, null).getHref(); pictures.add(feedImageLink); } } }
public void init() { if(album != null) { for(String picture : album.getPictures()) { pictures.add(picture); } } if (albumFeed != null) { try { for(org.apache.abdera.model.Entry feedPicture : albumFeed.getFeed().getEntries()) { String feedImageLink = feedPicture.getEnclosureLinkResolvedHref().toString(); pictures.add(feedImageLink); } }catch (Exception e) { //log exception, warn user that album xxx was not processed (not found) } } if( albumPicassa != null) { try { for(com.google.gdata.data.Entry picassaPicture : albumPicassa.getFeed().getEntries()) { String feedImageLink = picassaPicture.getLink(Link.Rel.MEDIA_EDIT, null).getHref(); pictures.add(feedImageLink); } }catch (Exception e) { //log exception, warn user that album xxx was not processed (not found) } } }
diff --git a/Carbonado/src/main/java/com/amazon/carbonado/spi/MasterStorableGenerator.java b/Carbonado/src/main/java/com/amazon/carbonado/spi/MasterStorableGenerator.java index b3bc830..ead6c3c 100644 --- a/Carbonado/src/main/java/com/amazon/carbonado/spi/MasterStorableGenerator.java +++ b/Carbonado/src/main/java/com/amazon/carbonado/spi/MasterStorableGenerator.java @@ -1,869 +1,870 @@ /* * Copyright 2006 Amazon Technologies, Inc. or its affiliates. * Amazon, Amazon.com and Carbonado are trademarks or registered trademarks * of Amazon Technologies, Inc. or its affiliates. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.amazon.carbonado.spi; import java.lang.reflect.Method; import java.util.EnumSet; import java.util.HashSet; import java.util.Map; import org.cojen.classfile.ClassFile; import org.cojen.classfile.CodeBuilder; import org.cojen.classfile.Label; import org.cojen.classfile.LocalVariable; import org.cojen.classfile.MethodInfo; import org.cojen.classfile.Modifiers; import org.cojen.classfile.Opcode; import org.cojen.classfile.TypeDesc; import org.cojen.util.ClassInjector; import org.cojen.util.KeyFactory; import org.cojen.util.SoftValuedHashMap; import com.amazon.carbonado.ConstraintException; import com.amazon.carbonado.OptimisticLockException; import com.amazon.carbonado.PersistException; import com.amazon.carbonado.Repository; import com.amazon.carbonado.Storable; import com.amazon.carbonado.SupportException; import com.amazon.carbonado.Transaction; import com.amazon.carbonado.info.StorableInfo; import com.amazon.carbonado.info.StorableIntrospector; import com.amazon.carbonado.info.StorableProperty; import static com.amazon.carbonado.spi.CommonMethodNames.*; /** * Generates and caches abstract implementations of {@link Storable} types * suitable for use by master repositories. The generated classes extend those * generated by {@link StorableGenerator}. Subclasses need not worry about * transactions since this class takes care of that. * * @author Brian S O'Neill */ public final class MasterStorableGenerator<S extends Storable> { // Note: All generated fields/methods have a "$" character in them to // prevent name collisions with any inherited fields/methods. User storable // properties are defined as fields which exactly match the property // name. We don't want collisions with those either. Legal bean properties // cannot have "$" in them, so there's nothing to worry about. /** Name of protected abstract method in generated storable */ public static final String DO_TRY_LOAD_MASTER_METHOD_NAME = StorableGenerator.DO_TRY_LOAD_METHOD_NAME, DO_TRY_INSERT_MASTER_METHOD_NAME = "doTryInsert$master", DO_TRY_UPDATE_MASTER_METHOD_NAME = "doTryUpdate$master", DO_TRY_DELETE_MASTER_METHOD_NAME = "doTryDelete$master"; private static final String APPEND_UNINIT_PROPERTY = "appendUninitializedPropertyName$"; private static final String INSERT_OP = "Insert"; private static final String UPDATE_OP = "Update"; private static final String DELETE_OP = "Delete"; // Cache of generated abstract classes. private static Map<Object, Class<? extends Storable>> cCache = new SoftValuedHashMap(); /** * Returns an abstract implementation of the given Storable type, which * is fully thread-safe. The Storable type itself may be an interface or * a class. If it is a class, then it must not be final, and it must have a * public, no-arg constructor. The constructor for the returned abstract * class looks like this: * * <pre> * public &lt;init&gt;(MasterSupport); * </pre> * * Subclasses must implement the following abstract protected methods, * whose exact names are defined by constants in this class: * * <pre> * // Load the object by examining the primary key. * protected abstract boolean doTryLoad() throws FetchException; * * // Insert the object into the storage layer. * protected abstract boolean doTryInsert_master() throws PersistException; * * // Update the object in the storage. * protected abstract boolean doTryUpdate_master() throws PersistException; * * // Delete the object from the storage layer by the primary key. * protected abstract boolean doTryDelete_master() throws PersistException; * </pre> * * Subclasses can access the MasterSupport instance via the protected field * named by {@link StorableGenerator#SUPPORT_FIELD_NAME SUPPORT_FIELD_NAME}. * * @throws com.amazon.carbonado.MalformedTypeException if Storable type is not well-formed * @throws IllegalArgumentException if type is null * @see MasterSupport */ public static <S extends Storable> Class<? extends S> getAbstractClass(Class<S> type, EnumSet<MasterFeature> features) throws SupportException, IllegalArgumentException { StorableInfo<S> info = StorableIntrospector.examine(type); anySequences: if (features.contains(MasterFeature.INSERT_SEQUENCES)) { for (StorableProperty<S> property : info.getAllProperties().values()) { if (property.getSequenceName() != null) { break anySequences; } } features.remove(MasterFeature.INSERT_SEQUENCES); } if (info.getVersionProperty() == null) { features.remove(MasterFeature.VERSIONING); } if (features.contains(MasterFeature.VERSIONING)) { // Implied feature. features.add(MasterFeature.UPDATE_FULL); } if (alwaysHasTxn(INSERT_OP, features)) { // Implied feature. features.add(MasterFeature.INSERT_TXN); } if (alwaysHasTxn(UPDATE_OP, features)) { // Implied feature. features.add(MasterFeature.UPDATE_TXN); } if (alwaysHasTxn(DELETE_OP, features)) { // Implied feature. features.add(MasterFeature.DELETE_TXN); } if (requiresTxnForUpdate(INSERT_OP, features)) { // Implied feature. features.add(MasterFeature.INSERT_TXN_FOR_UPDATE); } if (requiresTxnForUpdate(UPDATE_OP, features)) { // Implied feature. features.add(MasterFeature.UPDATE_TXN_FOR_UPDATE); } if (requiresTxnForUpdate(DELETE_OP, features)) { // Implied feature. features.add(MasterFeature.DELETE_TXN_FOR_UPDATE); } Object key = KeyFactory.createKey(new Object[] {type, features}); synchronized (cCache) { Class<? extends S> abstractClass = (Class<? extends S>) cCache.get(key); if (abstractClass != null) { return abstractClass; } abstractClass = new MasterStorableGenerator<S>(type, features).generateAndInjectClass(); cCache.put(key, abstractClass); return abstractClass; } } private final EnumSet<MasterFeature> mFeatures; private final StorableInfo<S> mInfo; private final Map<String, ? extends StorableProperty<S>> mAllProperties; private final ClassInjector mClassInjector; private final ClassFile mClassFile; private MasterStorableGenerator(Class<S> storableType, EnumSet<MasterFeature> features) { mFeatures = features; mInfo = StorableIntrospector.examine(storableType); mAllProperties = mInfo.getAllProperties(); final Class<? extends S> abstractClass = StorableGenerator.getAbstractClass(storableType); mClassInjector = ClassInjector.create (storableType.getName(), abstractClass.getClassLoader()); mClassFile = new ClassFile(mClassInjector.getClassName(), abstractClass); mClassFile.setModifiers(mClassFile.getModifiers().toAbstract(true)); mClassFile.markSynthetic(); mClassFile.setSourceFile(MasterStorableGenerator.class.getName()); mClassFile.setTarget("1.5"); } private Class<? extends S> generateAndInjectClass() throws SupportException { generateClass(); Class abstractClass = mClassInjector.defineClass(mClassFile); return (Class<? extends S>) abstractClass; } private void generateClass() throws SupportException { // Declare some types. final TypeDesc storableType = TypeDesc.forClass(Storable.class); final TypeDesc triggerSupportType = TypeDesc.forClass(TriggerSupport.class); final TypeDesc masterSupportType = TypeDesc.forClass(MasterSupport.class); final TypeDesc transactionType = TypeDesc.forClass(Transaction.class); final TypeDesc optimisticLockType = TypeDesc.forClass(OptimisticLockException.class); final TypeDesc persistExceptionType = TypeDesc.forClass(PersistException.class); // Add constructor that accepts a MasterSupport. { TypeDesc[] params = {masterSupportType}; MethodInfo mi = mClassFile.addConstructor(Modifiers.PUBLIC, params); CodeBuilder b = new CodeBuilder(mi); b.loadThis(); b.loadLocal(b.getParameter(0)); b.invokeSuperConstructor(new TypeDesc[] {triggerSupportType}); b.returnVoid(); } // Declare protected abstract methods. { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toAbstract(true), DO_TRY_INSERT_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); mi = mClassFile.addMethod (Modifiers.PROTECTED.toAbstract(true), DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); mi = mClassFile.addMethod (Modifiers.PROTECTED.toAbstract(true), DO_TRY_DELETE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); } // Add required protected doTryInsert method. { // If sequence support requested, implement special insert hook to // call sequences for properties which are UNINITIALIZED. User may // provide explicit values for properties with sequences. if (mFeatures.contains(MasterFeature.INSERT_SEQUENCES)) { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED, StorableGenerator.CHECK_PK_FOR_INSERT_METHOD_NAME, null, null); CodeBuilder b = new CodeBuilder(mi); int ordinal = 0; for (StorableProperty<S> property : mAllProperties.values()) { if (property.getSequenceName() != null) { // Check the state of this property, to see if it is // uninitialized. Uninitialized state has value zero. String stateFieldName = StorableGenerator.PROPERTY_STATE_FIELD_NAME + (ordinal >> 4); b.loadThis(); b.loadField(stateFieldName, TypeDesc.INT); int shift = (ordinal & 0xf) * 2; b.loadConstant(StorableGenerator.PROPERTY_STATE_MASK << shift); b.math(Opcode.IAND); Label isInitialized = b.createLabel(); b.ifZeroComparisonBranch(isInitialized, "!="); // Load this in preparation for storing value to property. b.loadThis(); // Call MasterSupport.getSequenceValueProducer(String). TypeDesc seqValueProdType = TypeDesc.forClass(SequenceValueProducer.class); b.loadThis(); b.loadField(StorableGenerator.SUPPORT_FIELD_NAME, triggerSupportType); b.checkCast(masterSupportType); b.loadConstant(property.getSequenceName()); b.invokeInterface (masterSupportType, "getSequenceValueProducer", seqValueProdType, new TypeDesc[] {TypeDesc.STRING}); // Find appropriate method to call for getting next sequence value. TypeDesc propertyType = TypeDesc.forClass(property.getType()); TypeDesc propertyObjType = propertyType.toObjectType(); Method method; try { if (propertyObjType == TypeDesc.LONG.toObjectType()) { method = SequenceValueProducer.class .getMethod("nextLongValue", (Class[]) null); } else if (propertyObjType == TypeDesc.INT.toObjectType()) { method = SequenceValueProducer.class .getMethod("nextIntValue", (Class[]) null); } else if (propertyObjType == TypeDesc.STRING) { method = SequenceValueProducer.class .getMethod("nextDecimalValue", (Class[]) null); } else { throw new SupportException ("Unable to support sequence of type \"" + property.getType().getName() + "\" for property: " + property.getName()); } } catch (NoSuchMethodException e) { Error err = new NoSuchMethodError(); err.initCause(e); throw err; } b.invoke(method); b.convert(TypeDesc.forClass(method.getReturnType()), propertyType); // Store property b.storeField(property.getName(), propertyType); // Set state to dirty. b.loadThis(); b.loadThis(); b.loadField(stateFieldName, TypeDesc.INT); b.loadConstant(StorableGenerator.PROPERTY_STATE_DIRTY << shift); b.math(Opcode.IOR); b.storeField(stateFieldName, TypeDesc.INT); isInitialized.setLocation(); } ordinal++; } // We've tried our best to fill in missing values, now run the // original check method. b.loadThis(); b.invokeSuper(mClassFile.getSuperClassName(), StorableGenerator.CHECK_PK_FOR_INSERT_METHOD_NAME, null, null); b.returnVoid(); } MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toFinal(true), StorableGenerator.DO_TRY_INSERT_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); CodeBuilder b = new CodeBuilder(mi); LocalVariable txnVar = b.createLocalVariable(null, transactionType); Label tryStart = addEnterTransaction(b, INSERT_OP, txnVar); if (mFeatures.contains(MasterFeature.VERSIONING)) { // Only set if uninitialized. b.loadThis(); b.invokeVirtual(StorableGenerator.IS_VERSION_INITIALIZED_METHOD_NAME, TypeDesc.BOOLEAN, null); Label isInitialized = b.createLabel(); b.ifZeroComparisonBranch(isInitialized, "!="); addAdjustVersionProperty(b, null, 1); isInitialized.setLocation(); } if (mFeatures.contains(MasterFeature.INSERT_CHECK_REQUIRED)) { // Ensure that required properties have been set. b.loadThis(); b.invokeVirtual(StorableGenerator.IS_REQUIRED_DATA_INITIALIZED_METHOD_NAME, TypeDesc.BOOLEAN, null); Label isInitialized = b.createLabel(); b.ifZeroComparisonBranch(isInitialized, "!="); // Throw a ConstraintException. TypeDesc exType = TypeDesc.forClass(ConstraintException.class); b.newObject(exType); b.dup(); // Append all the uninitialized property names to the exception message. LocalVariable countVar = b.createLocalVariable(null, TypeDesc.INT); b.loadConstant(0); b.storeLocal(countVar); TypeDesc sbType = TypeDesc.forClass(StringBuilder.class); b.newObject(sbType); b.dup(); b.loadConstant("Not all required properties have been set: "); TypeDesc[] stringParam = {TypeDesc.STRING}; b.invokeConstructor(sbType, stringParam); LocalVariable sbVar = b.createLocalVariable(null, sbType); b.storeLocal(sbVar); int ordinal = -1; HashSet<Integer> stateAppendMethods = new HashSet<Integer>(); // Parameters are: StringBuilder, count, mask, property name TypeDesc[] appendParams = {sbType, TypeDesc.INT, TypeDesc.INT, TypeDesc.STRING}; for (StorableProperty<S> property : mAllProperties.values()) { ordinal++; if (property.isJoin() || property.isPrimaryKeyMember() || property.isNullable()) { continue; } int stateField = ordinal >> 4; String stateAppendMethodName = APPEND_UNINIT_PROPERTY + stateField; if (!stateAppendMethods.contains(stateField)) { stateAppendMethods.add(stateField); MethodInfo mi2 = mClassFile.addMethod (Modifiers.PRIVATE, stateAppendMethodName, TypeDesc.INT, appendParams); CodeBuilder b2 = new CodeBuilder(mi2); // Load the StringBuilder parameter. b2.loadLocal(b2.getParameter(0)); String stateFieldName = StorableGenerator.PROPERTY_STATE_FIELD_NAME + (ordinal >> 4); b2.loadThis(); b2.loadField(stateFieldName, TypeDesc.INT); // Load the mask parameter. b2.loadLocal(b2.getParameter(2)); b2.math(Opcode.IAND); Label propIsInitialized = b2.createLabel(); b2.ifZeroComparisonBranch(propIsInitialized, "!="); // Load the count parameter. b2.loadLocal(b2.getParameter(1)); Label noComma = b2.createLabel(); b2.ifZeroComparisonBranch(noComma, "=="); b2.loadConstant(", "); b2.invokeVirtual(sbType, "append", sbType, stringParam); noComma.setLocation(); // Load the property name parameter. b2.loadLocal(b2.getParameter(3)); b2.invokeVirtual(sbType, "append", sbType, stringParam); // Increment the count parameter. b2.integerIncrement(b2.getParameter(1), 1); propIsInitialized.setLocation(); // Return the possibly updated count. b2.loadLocal(b2.getParameter(1)); b2.returnValue(TypeDesc.INT); } b.loadThis(); // Parameters are: StringBuilder, count, mask, property name b.loadLocal(sbVar); b.loadLocal(countVar); b.loadConstant(StorableGenerator.PROPERTY_STATE_MASK << ((ordinal & 0xf) * 2)); b.loadConstant(property.getName()); b.invokePrivate(stateAppendMethodName, TypeDesc.INT, appendParams); b.storeLocal(countVar); } b.loadLocal(sbVar); b.invokeVirtual(sbType, "toString", TypeDesc.STRING, null); b.invokeConstructor(exType, new TypeDesc[] {TypeDesc.STRING}); b.throwObject(); isInitialized.setLocation(); } b.loadThis(); b.invokeVirtual(DO_TRY_INSERT_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); if (tryStart == null) { b.returnValue(TypeDesc.BOOLEAN); } else { Label failed = b.createLabel(); b.ifZeroComparisonBranch(failed, "=="); addCommitAndExitTransaction(b, INSERT_OP, txnVar); b.loadConstant(true); b.returnValue(TypeDesc.BOOLEAN); failed.setLocation(); addExitTransaction(b, INSERT_OP, txnVar); b.loadConstant(false); b.returnValue(TypeDesc.BOOLEAN); addExitTransaction(b, INSERT_OP, txnVar, tryStart); } } // Add required protected doTryUpdate method. addDoTryUpdate: { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toFinal(true), StorableGenerator.DO_TRY_UPDATE_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); CodeBuilder b = new CodeBuilder(mi); if ((!mFeatures.contains(MasterFeature.VERSIONING)) && - (!mFeatures.contains(MasterFeature.UPDATE_FULL))) + (!mFeatures.contains(MasterFeature.UPDATE_FULL)) && + (!mFeatures.contains(MasterFeature.UPDATE_TXN))) { // Nothing special needs to be done, so just delegate and return. b.loadThis(); b.invokeVirtual(DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); b.returnValue(TypeDesc.BOOLEAN); break addDoTryUpdate; } LocalVariable txnVar = b.createLocalVariable(null, transactionType); LocalVariable savedVar = null; Label tryStart = addEnterTransaction(b, UPDATE_OP, txnVar); Label failed = b.createLabel(); if (mFeatures.contains(MasterFeature.UPDATE_FULL)) { // Storable saved = copy(); b.loadThis(); b.invokeVirtual(COPY_METHOD_NAME, storableType, null); b.checkCast(mClassFile.getType()); savedVar = b.createLocalVariable(null, mClassFile.getType()); b.storeLocal(savedVar); // if (!saved.tryLoad()) { // goto failed; // } b.loadLocal(savedVar); b.invokeInterface(storableType, TRY_LOAD_METHOD_NAME, TypeDesc.BOOLEAN, null); b.ifZeroComparisonBranch(failed, "=="); // if (version support enabled) { // if (this.getVersionNumber() != saved.getVersionNumber()) { // throw new OptimisticLockException // (this.getVersionNumber(), saved.getVersionNumber(), this); // } // } if (mFeatures.contains(MasterFeature.VERSIONING)) { TypeDesc versionType = TypeDesc.forClass(mInfo.getVersionProperty().getType()); b.loadThis(); b.invoke(mInfo.getVersionProperty().getReadMethod()); b.loadLocal(savedVar); b.invoke(mInfo.getVersionProperty().getReadMethod()); Label sameVersion = b.createLabel(); CodeBuilderUtil.addValuesEqualCall(b, versionType, true, sameVersion, true); b.newObject(optimisticLockType); b.dup(); b.loadThis(); b.invoke(mInfo.getVersionProperty().getReadMethod()); b.convert(versionType, TypeDesc.OBJECT); b.loadLocal(savedVar); b.invoke(mInfo.getVersionProperty().getReadMethod()); b.convert(versionType, TypeDesc.OBJECT); b.loadThis(); b.invokeConstructor (optimisticLockType, new TypeDesc[] {TypeDesc.OBJECT, TypeDesc.OBJECT, storableType}); b.throwObject(); sameVersion.setLocation(); } // this.copyDirtyProperties(saved); // if (version support enabled) { // saved.setVersionNumber(saved.getVersionNumber() + 1); // } b.loadThis(); b.loadLocal(savedVar); b.invokeVirtual(COPY_DIRTY_PROPERTIES, null, new TypeDesc[] {storableType}); if (mFeatures.contains(MasterFeature.VERSIONING)) { addAdjustVersionProperty(b, savedVar, -1); } // if (!saved.doTryUpdateMaster()) { // goto failed; // } b.loadLocal(savedVar); b.invokeVirtual(DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); b.ifZeroComparisonBranch(failed, "=="); // saved.copyUnequalProperties(this); b.loadLocal(savedVar); b.loadThis(); b.invokeInterface (storableType, COPY_UNEQUAL_PROPERTIES, null, new TypeDesc[] {storableType}); } else { // if (!this.doTryUpdateMaster()) { // goto failed; // } b.loadThis(); b.invokeVirtual(DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); b.ifZeroComparisonBranch(failed, "=="); } // txn.commit(); // txn.exit(); // return true; addCommitAndExitTransaction(b, UPDATE_OP, txnVar); b.loadConstant(true); b.returnValue(TypeDesc.BOOLEAN); // failed: // txn.exit(); failed.setLocation(); addExitTransaction(b, UPDATE_OP, txnVar); // return false; b.loadConstant(false); b.returnValue(TypeDesc.BOOLEAN); addExitTransaction(b, UPDATE_OP, txnVar, tryStart); } // Add required protected doTryDelete method. { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toFinal(true), StorableGenerator.DO_TRY_DELETE_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); CodeBuilder b = new CodeBuilder(mi); LocalVariable txnVar = b.createLocalVariable(null, transactionType); Label tryStart = addEnterTransaction(b, DELETE_OP, txnVar); b.loadThis(); b.invokeVirtual(DO_TRY_DELETE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); if (tryStart == null) { b.returnValue(TypeDesc.BOOLEAN); } else { Label failed = b.createLabel(); b.ifZeroComparisonBranch(failed, "=="); addCommitAndExitTransaction(b, DELETE_OP, txnVar); b.loadConstant(true); b.returnValue(TypeDesc.BOOLEAN); failed.setLocation(); addExitTransaction(b, DELETE_OP, txnVar); b.loadConstant(false); b.returnValue(TypeDesc.BOOLEAN); addExitTransaction(b, DELETE_OP, txnVar, tryStart); } } } /** * Generates code to enter a transaction, if required. * * @param opType type of operation, Insert, Update, or Delete * @param txnVar required variable of type Transaction for storing transaction * @return optional try start label for transaction */ private Label addEnterTransaction(CodeBuilder b, String opType, LocalVariable txnVar) { if (!alwaysHasTxn(opType)) { return null; } // txn = masterSupport.getRootRepository().enterTransaction(); TypeDesc repositoryType = TypeDesc.forClass(Repository.class); TypeDesc transactionType = TypeDesc.forClass(Transaction.class); TypeDesc triggerSupportType = TypeDesc.forClass(TriggerSupport.class); TypeDesc masterSupportType = TypeDesc.forClass(MasterSupport.class); b.loadThis(); b.loadField(StorableGenerator.SUPPORT_FIELD_NAME, triggerSupportType); b.invokeInterface(masterSupportType, "getRootRepository", repositoryType, null); b.invokeInterface(repositoryType, ENTER_TRANSACTION_METHOD_NAME, transactionType, null); b.storeLocal(txnVar); if (requiresTxnForUpdate(opType)) { // txn.setForUpdate(true); b.loadLocal(txnVar); b.loadConstant(true); b.invokeInterface(transactionType, SET_FOR_UPDATE_METHOD_NAME, null, new TypeDesc[] {TypeDesc.BOOLEAN}); } return b.createLabel().setLocation(); } private boolean alwaysHasTxn(String opType) { return alwaysHasTxn(opType, mFeatures); } private static boolean alwaysHasTxn(String opType, EnumSet<MasterFeature> features) { if (opType == UPDATE_OP) { return features.contains(MasterFeature.UPDATE_TXN) || features.contains(MasterFeature.UPDATE_TXN_FOR_UPDATE) || features.contains(MasterFeature.VERSIONING) || features.contains(MasterFeature.UPDATE_FULL); } else if (opType == INSERT_OP) { return features.contains(MasterFeature.INSERT_TXN) || features.contains(MasterFeature.INSERT_TXN_FOR_UPDATE); } else if (opType == DELETE_OP) { return features.contains(MasterFeature.DELETE_TXN) || features.contains(MasterFeature.DELETE_TXN_FOR_UPDATE); } return false; } private boolean requiresTxnForUpdate(String opType) { return requiresTxnForUpdate(opType, mFeatures); } private static boolean requiresTxnForUpdate(String opType, EnumSet<MasterFeature> features) { if (opType == UPDATE_OP) { return features.contains(MasterFeature.UPDATE_TXN_FOR_UPDATE) || features.contains(MasterFeature.VERSIONING) || features.contains(MasterFeature.UPDATE_FULL); } else if (opType == INSERT_OP) { return features.contains(MasterFeature.INSERT_TXN_FOR_UPDATE); } else if (opType == DELETE_OP) { return features.contains(MasterFeature.DELETE_TXN_FOR_UPDATE); } return false; } private void addCommitAndExitTransaction(CodeBuilder b, String opType, LocalVariable txnVar) { if (!alwaysHasTxn(opType)) { return; } TypeDesc transactionType = TypeDesc.forClass(Transaction.class); // txn.commit(); // txn.exit(); b.loadLocal(txnVar); b.invokeInterface(transactionType, COMMIT_METHOD_NAME, null, null); b.loadLocal(txnVar); b.invokeInterface(transactionType, EXIT_METHOD_NAME, null, null); } /** * * @param opType type of operation, Insert, Update, or Delete */ private void addExitTransaction(CodeBuilder b, String opType, LocalVariable txnVar) { if (!alwaysHasTxn(opType)) { return; } TypeDesc transactionType = TypeDesc.forClass(Transaction.class); // txn.exit(); b.loadLocal(txnVar); b.invokeInterface(transactionType, EXIT_METHOD_NAME, null, null); } /** * * @param opType type of operation, Insert, Update, or Delete */ private void addExitTransaction(CodeBuilder b, String opType, LocalVariable txnVar, Label tryStart) { if (tryStart == null) { addExitTransaction(b, opType, txnVar); return; } // } catch (... e) { // txn.exit(); // throw e; // } Label tryEnd = b.createLabel().setLocation(); b.exceptionHandler(tryStart, tryEnd, null); addExitTransaction(b, opType, txnVar); b.throwObject(); } /* * Generates code to adjust the version property. If value parameter is negative, then * version is incremented as follows: * * storable.setVersionNumber(storable.getVersionNumber() + 1); * * Otherwise, the version is set: * * storable.setVersionNumber(value); * * @param storableVar references storable instance, or null if this * @param value if negative, increment version, else, set version to this value */ private void addAdjustVersionProperty(CodeBuilder b, LocalVariable storableVar, int value) throws SupportException { StorableProperty<?> versionProperty = mInfo.getVersionProperty(); TypeDesc versionType = TypeDesc.forClass(versionProperty.getType()); TypeDesc versionPrimitiveType = versionType.toPrimitiveType(); supportCheck: { if (versionPrimitiveType != null) { switch (versionPrimitiveType.getTypeCode()) { case TypeDesc.INT_CODE: case TypeDesc.LONG_CODE: break supportCheck; } } throw new SupportException ("Unsupported version type: " + versionType.getFullName()); } if (storableVar == null) { b.loadThis(); } else { b.loadLocal(storableVar); } if (value >= 0) { if (versionPrimitiveType == TypeDesc.LONG) { b.loadConstant((long) value); } else { b.loadConstant(value); } } else { b.dup(); b.invoke(versionProperty.getReadMethod()); Label setVersion = b.createLabel(); if (!versionType.isPrimitive()) { b.dup(); Label versionNotNull = b.createLabel(); b.ifNullBranch(versionNotNull, false); b.pop(); if (versionPrimitiveType == TypeDesc.LONG) { b.loadConstant(1L); } else { b.loadConstant(1); } b.branch(setVersion); versionNotNull.setLocation(); b.convert(versionType, versionPrimitiveType); } if (versionPrimitiveType == TypeDesc.LONG) { b.loadConstant(1L); b.math(Opcode.LADD); } else { b.loadConstant(1); b.math(Opcode.IADD); } setVersion.setLocation(); } b.convert(versionPrimitiveType, versionType); b.invoke(versionProperty.getWriteMethod()); } }
true
true
private void generateClass() throws SupportException { // Declare some types. final TypeDesc storableType = TypeDesc.forClass(Storable.class); final TypeDesc triggerSupportType = TypeDesc.forClass(TriggerSupport.class); final TypeDesc masterSupportType = TypeDesc.forClass(MasterSupport.class); final TypeDesc transactionType = TypeDesc.forClass(Transaction.class); final TypeDesc optimisticLockType = TypeDesc.forClass(OptimisticLockException.class); final TypeDesc persistExceptionType = TypeDesc.forClass(PersistException.class); // Add constructor that accepts a MasterSupport. { TypeDesc[] params = {masterSupportType}; MethodInfo mi = mClassFile.addConstructor(Modifiers.PUBLIC, params); CodeBuilder b = new CodeBuilder(mi); b.loadThis(); b.loadLocal(b.getParameter(0)); b.invokeSuperConstructor(new TypeDesc[] {triggerSupportType}); b.returnVoid(); } // Declare protected abstract methods. { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toAbstract(true), DO_TRY_INSERT_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); mi = mClassFile.addMethod (Modifiers.PROTECTED.toAbstract(true), DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); mi = mClassFile.addMethod (Modifiers.PROTECTED.toAbstract(true), DO_TRY_DELETE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); } // Add required protected doTryInsert method. { // If sequence support requested, implement special insert hook to // call sequences for properties which are UNINITIALIZED. User may // provide explicit values for properties with sequences. if (mFeatures.contains(MasterFeature.INSERT_SEQUENCES)) { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED, StorableGenerator.CHECK_PK_FOR_INSERT_METHOD_NAME, null, null); CodeBuilder b = new CodeBuilder(mi); int ordinal = 0; for (StorableProperty<S> property : mAllProperties.values()) { if (property.getSequenceName() != null) { // Check the state of this property, to see if it is // uninitialized. Uninitialized state has value zero. String stateFieldName = StorableGenerator.PROPERTY_STATE_FIELD_NAME + (ordinal >> 4); b.loadThis(); b.loadField(stateFieldName, TypeDesc.INT); int shift = (ordinal & 0xf) * 2; b.loadConstant(StorableGenerator.PROPERTY_STATE_MASK << shift); b.math(Opcode.IAND); Label isInitialized = b.createLabel(); b.ifZeroComparisonBranch(isInitialized, "!="); // Load this in preparation for storing value to property. b.loadThis(); // Call MasterSupport.getSequenceValueProducer(String). TypeDesc seqValueProdType = TypeDesc.forClass(SequenceValueProducer.class); b.loadThis(); b.loadField(StorableGenerator.SUPPORT_FIELD_NAME, triggerSupportType); b.checkCast(masterSupportType); b.loadConstant(property.getSequenceName()); b.invokeInterface (masterSupportType, "getSequenceValueProducer", seqValueProdType, new TypeDesc[] {TypeDesc.STRING}); // Find appropriate method to call for getting next sequence value. TypeDesc propertyType = TypeDesc.forClass(property.getType()); TypeDesc propertyObjType = propertyType.toObjectType(); Method method; try { if (propertyObjType == TypeDesc.LONG.toObjectType()) { method = SequenceValueProducer.class .getMethod("nextLongValue", (Class[]) null); } else if (propertyObjType == TypeDesc.INT.toObjectType()) { method = SequenceValueProducer.class .getMethod("nextIntValue", (Class[]) null); } else if (propertyObjType == TypeDesc.STRING) { method = SequenceValueProducer.class .getMethod("nextDecimalValue", (Class[]) null); } else { throw new SupportException ("Unable to support sequence of type \"" + property.getType().getName() + "\" for property: " + property.getName()); } } catch (NoSuchMethodException e) { Error err = new NoSuchMethodError(); err.initCause(e); throw err; } b.invoke(method); b.convert(TypeDesc.forClass(method.getReturnType()), propertyType); // Store property b.storeField(property.getName(), propertyType); // Set state to dirty. b.loadThis(); b.loadThis(); b.loadField(stateFieldName, TypeDesc.INT); b.loadConstant(StorableGenerator.PROPERTY_STATE_DIRTY << shift); b.math(Opcode.IOR); b.storeField(stateFieldName, TypeDesc.INT); isInitialized.setLocation(); } ordinal++; } // We've tried our best to fill in missing values, now run the // original check method. b.loadThis(); b.invokeSuper(mClassFile.getSuperClassName(), StorableGenerator.CHECK_PK_FOR_INSERT_METHOD_NAME, null, null); b.returnVoid(); } MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toFinal(true), StorableGenerator.DO_TRY_INSERT_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); CodeBuilder b = new CodeBuilder(mi); LocalVariable txnVar = b.createLocalVariable(null, transactionType); Label tryStart = addEnterTransaction(b, INSERT_OP, txnVar); if (mFeatures.contains(MasterFeature.VERSIONING)) { // Only set if uninitialized. b.loadThis(); b.invokeVirtual(StorableGenerator.IS_VERSION_INITIALIZED_METHOD_NAME, TypeDesc.BOOLEAN, null); Label isInitialized = b.createLabel(); b.ifZeroComparisonBranch(isInitialized, "!="); addAdjustVersionProperty(b, null, 1); isInitialized.setLocation(); } if (mFeatures.contains(MasterFeature.INSERT_CHECK_REQUIRED)) { // Ensure that required properties have been set. b.loadThis(); b.invokeVirtual(StorableGenerator.IS_REQUIRED_DATA_INITIALIZED_METHOD_NAME, TypeDesc.BOOLEAN, null); Label isInitialized = b.createLabel(); b.ifZeroComparisonBranch(isInitialized, "!="); // Throw a ConstraintException. TypeDesc exType = TypeDesc.forClass(ConstraintException.class); b.newObject(exType); b.dup(); // Append all the uninitialized property names to the exception message. LocalVariable countVar = b.createLocalVariable(null, TypeDesc.INT); b.loadConstant(0); b.storeLocal(countVar); TypeDesc sbType = TypeDesc.forClass(StringBuilder.class); b.newObject(sbType); b.dup(); b.loadConstant("Not all required properties have been set: "); TypeDesc[] stringParam = {TypeDesc.STRING}; b.invokeConstructor(sbType, stringParam); LocalVariable sbVar = b.createLocalVariable(null, sbType); b.storeLocal(sbVar); int ordinal = -1; HashSet<Integer> stateAppendMethods = new HashSet<Integer>(); // Parameters are: StringBuilder, count, mask, property name TypeDesc[] appendParams = {sbType, TypeDesc.INT, TypeDesc.INT, TypeDesc.STRING}; for (StorableProperty<S> property : mAllProperties.values()) { ordinal++; if (property.isJoin() || property.isPrimaryKeyMember() || property.isNullable()) { continue; } int stateField = ordinal >> 4; String stateAppendMethodName = APPEND_UNINIT_PROPERTY + stateField; if (!stateAppendMethods.contains(stateField)) { stateAppendMethods.add(stateField); MethodInfo mi2 = mClassFile.addMethod (Modifiers.PRIVATE, stateAppendMethodName, TypeDesc.INT, appendParams); CodeBuilder b2 = new CodeBuilder(mi2); // Load the StringBuilder parameter. b2.loadLocal(b2.getParameter(0)); String stateFieldName = StorableGenerator.PROPERTY_STATE_FIELD_NAME + (ordinal >> 4); b2.loadThis(); b2.loadField(stateFieldName, TypeDesc.INT); // Load the mask parameter. b2.loadLocal(b2.getParameter(2)); b2.math(Opcode.IAND); Label propIsInitialized = b2.createLabel(); b2.ifZeroComparisonBranch(propIsInitialized, "!="); // Load the count parameter. b2.loadLocal(b2.getParameter(1)); Label noComma = b2.createLabel(); b2.ifZeroComparisonBranch(noComma, "=="); b2.loadConstant(", "); b2.invokeVirtual(sbType, "append", sbType, stringParam); noComma.setLocation(); // Load the property name parameter. b2.loadLocal(b2.getParameter(3)); b2.invokeVirtual(sbType, "append", sbType, stringParam); // Increment the count parameter. b2.integerIncrement(b2.getParameter(1), 1); propIsInitialized.setLocation(); // Return the possibly updated count. b2.loadLocal(b2.getParameter(1)); b2.returnValue(TypeDesc.INT); } b.loadThis(); // Parameters are: StringBuilder, count, mask, property name b.loadLocal(sbVar); b.loadLocal(countVar); b.loadConstant(StorableGenerator.PROPERTY_STATE_MASK << ((ordinal & 0xf) * 2)); b.loadConstant(property.getName()); b.invokePrivate(stateAppendMethodName, TypeDesc.INT, appendParams); b.storeLocal(countVar); } b.loadLocal(sbVar); b.invokeVirtual(sbType, "toString", TypeDesc.STRING, null); b.invokeConstructor(exType, new TypeDesc[] {TypeDesc.STRING}); b.throwObject(); isInitialized.setLocation(); } b.loadThis(); b.invokeVirtual(DO_TRY_INSERT_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); if (tryStart == null) { b.returnValue(TypeDesc.BOOLEAN); } else { Label failed = b.createLabel(); b.ifZeroComparisonBranch(failed, "=="); addCommitAndExitTransaction(b, INSERT_OP, txnVar); b.loadConstant(true); b.returnValue(TypeDesc.BOOLEAN); failed.setLocation(); addExitTransaction(b, INSERT_OP, txnVar); b.loadConstant(false); b.returnValue(TypeDesc.BOOLEAN); addExitTransaction(b, INSERT_OP, txnVar, tryStart); } } // Add required protected doTryUpdate method. addDoTryUpdate: { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toFinal(true), StorableGenerator.DO_TRY_UPDATE_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); CodeBuilder b = new CodeBuilder(mi); if ((!mFeatures.contains(MasterFeature.VERSIONING)) && (!mFeatures.contains(MasterFeature.UPDATE_FULL))) { // Nothing special needs to be done, so just delegate and return. b.loadThis(); b.invokeVirtual(DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); b.returnValue(TypeDesc.BOOLEAN); break addDoTryUpdate; } LocalVariable txnVar = b.createLocalVariable(null, transactionType); LocalVariable savedVar = null; Label tryStart = addEnterTransaction(b, UPDATE_OP, txnVar); Label failed = b.createLabel(); if (mFeatures.contains(MasterFeature.UPDATE_FULL)) { // Storable saved = copy(); b.loadThis(); b.invokeVirtual(COPY_METHOD_NAME, storableType, null); b.checkCast(mClassFile.getType()); savedVar = b.createLocalVariable(null, mClassFile.getType()); b.storeLocal(savedVar); // if (!saved.tryLoad()) { // goto failed; // } b.loadLocal(savedVar); b.invokeInterface(storableType, TRY_LOAD_METHOD_NAME, TypeDesc.BOOLEAN, null); b.ifZeroComparisonBranch(failed, "=="); // if (version support enabled) { // if (this.getVersionNumber() != saved.getVersionNumber()) { // throw new OptimisticLockException // (this.getVersionNumber(), saved.getVersionNumber(), this); // } // } if (mFeatures.contains(MasterFeature.VERSIONING)) { TypeDesc versionType = TypeDesc.forClass(mInfo.getVersionProperty().getType()); b.loadThis(); b.invoke(mInfo.getVersionProperty().getReadMethod()); b.loadLocal(savedVar); b.invoke(mInfo.getVersionProperty().getReadMethod()); Label sameVersion = b.createLabel(); CodeBuilderUtil.addValuesEqualCall(b, versionType, true, sameVersion, true); b.newObject(optimisticLockType); b.dup(); b.loadThis(); b.invoke(mInfo.getVersionProperty().getReadMethod()); b.convert(versionType, TypeDesc.OBJECT); b.loadLocal(savedVar); b.invoke(mInfo.getVersionProperty().getReadMethod()); b.convert(versionType, TypeDesc.OBJECT); b.loadThis(); b.invokeConstructor (optimisticLockType, new TypeDesc[] {TypeDesc.OBJECT, TypeDesc.OBJECT, storableType}); b.throwObject(); sameVersion.setLocation(); } // this.copyDirtyProperties(saved); // if (version support enabled) { // saved.setVersionNumber(saved.getVersionNumber() + 1); // } b.loadThis(); b.loadLocal(savedVar); b.invokeVirtual(COPY_DIRTY_PROPERTIES, null, new TypeDesc[] {storableType}); if (mFeatures.contains(MasterFeature.VERSIONING)) { addAdjustVersionProperty(b, savedVar, -1); } // if (!saved.doTryUpdateMaster()) { // goto failed; // } b.loadLocal(savedVar); b.invokeVirtual(DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); b.ifZeroComparisonBranch(failed, "=="); // saved.copyUnequalProperties(this); b.loadLocal(savedVar); b.loadThis(); b.invokeInterface (storableType, COPY_UNEQUAL_PROPERTIES, null, new TypeDesc[] {storableType}); } else { // if (!this.doTryUpdateMaster()) { // goto failed; // } b.loadThis(); b.invokeVirtual(DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); b.ifZeroComparisonBranch(failed, "=="); } // txn.commit(); // txn.exit(); // return true; addCommitAndExitTransaction(b, UPDATE_OP, txnVar); b.loadConstant(true); b.returnValue(TypeDesc.BOOLEAN); // failed: // txn.exit(); failed.setLocation(); addExitTransaction(b, UPDATE_OP, txnVar); // return false; b.loadConstant(false); b.returnValue(TypeDesc.BOOLEAN); addExitTransaction(b, UPDATE_OP, txnVar, tryStart); } // Add required protected doTryDelete method. { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toFinal(true), StorableGenerator.DO_TRY_DELETE_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); CodeBuilder b = new CodeBuilder(mi); LocalVariable txnVar = b.createLocalVariable(null, transactionType); Label tryStart = addEnterTransaction(b, DELETE_OP, txnVar); b.loadThis(); b.invokeVirtual(DO_TRY_DELETE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); if (tryStart == null) { b.returnValue(TypeDesc.BOOLEAN); } else { Label failed = b.createLabel(); b.ifZeroComparisonBranch(failed, "=="); addCommitAndExitTransaction(b, DELETE_OP, txnVar); b.loadConstant(true); b.returnValue(TypeDesc.BOOLEAN); failed.setLocation(); addExitTransaction(b, DELETE_OP, txnVar); b.loadConstant(false); b.returnValue(TypeDesc.BOOLEAN); addExitTransaction(b, DELETE_OP, txnVar, tryStart); } } }
private void generateClass() throws SupportException { // Declare some types. final TypeDesc storableType = TypeDesc.forClass(Storable.class); final TypeDesc triggerSupportType = TypeDesc.forClass(TriggerSupport.class); final TypeDesc masterSupportType = TypeDesc.forClass(MasterSupport.class); final TypeDesc transactionType = TypeDesc.forClass(Transaction.class); final TypeDesc optimisticLockType = TypeDesc.forClass(OptimisticLockException.class); final TypeDesc persistExceptionType = TypeDesc.forClass(PersistException.class); // Add constructor that accepts a MasterSupport. { TypeDesc[] params = {masterSupportType}; MethodInfo mi = mClassFile.addConstructor(Modifiers.PUBLIC, params); CodeBuilder b = new CodeBuilder(mi); b.loadThis(); b.loadLocal(b.getParameter(0)); b.invokeSuperConstructor(new TypeDesc[] {triggerSupportType}); b.returnVoid(); } // Declare protected abstract methods. { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toAbstract(true), DO_TRY_INSERT_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); mi = mClassFile.addMethod (Modifiers.PROTECTED.toAbstract(true), DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); mi = mClassFile.addMethod (Modifiers.PROTECTED.toAbstract(true), DO_TRY_DELETE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); } // Add required protected doTryInsert method. { // If sequence support requested, implement special insert hook to // call sequences for properties which are UNINITIALIZED. User may // provide explicit values for properties with sequences. if (mFeatures.contains(MasterFeature.INSERT_SEQUENCES)) { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED, StorableGenerator.CHECK_PK_FOR_INSERT_METHOD_NAME, null, null); CodeBuilder b = new CodeBuilder(mi); int ordinal = 0; for (StorableProperty<S> property : mAllProperties.values()) { if (property.getSequenceName() != null) { // Check the state of this property, to see if it is // uninitialized. Uninitialized state has value zero. String stateFieldName = StorableGenerator.PROPERTY_STATE_FIELD_NAME + (ordinal >> 4); b.loadThis(); b.loadField(stateFieldName, TypeDesc.INT); int shift = (ordinal & 0xf) * 2; b.loadConstant(StorableGenerator.PROPERTY_STATE_MASK << shift); b.math(Opcode.IAND); Label isInitialized = b.createLabel(); b.ifZeroComparisonBranch(isInitialized, "!="); // Load this in preparation for storing value to property. b.loadThis(); // Call MasterSupport.getSequenceValueProducer(String). TypeDesc seqValueProdType = TypeDesc.forClass(SequenceValueProducer.class); b.loadThis(); b.loadField(StorableGenerator.SUPPORT_FIELD_NAME, triggerSupportType); b.checkCast(masterSupportType); b.loadConstant(property.getSequenceName()); b.invokeInterface (masterSupportType, "getSequenceValueProducer", seqValueProdType, new TypeDesc[] {TypeDesc.STRING}); // Find appropriate method to call for getting next sequence value. TypeDesc propertyType = TypeDesc.forClass(property.getType()); TypeDesc propertyObjType = propertyType.toObjectType(); Method method; try { if (propertyObjType == TypeDesc.LONG.toObjectType()) { method = SequenceValueProducer.class .getMethod("nextLongValue", (Class[]) null); } else if (propertyObjType == TypeDesc.INT.toObjectType()) { method = SequenceValueProducer.class .getMethod("nextIntValue", (Class[]) null); } else if (propertyObjType == TypeDesc.STRING) { method = SequenceValueProducer.class .getMethod("nextDecimalValue", (Class[]) null); } else { throw new SupportException ("Unable to support sequence of type \"" + property.getType().getName() + "\" for property: " + property.getName()); } } catch (NoSuchMethodException e) { Error err = new NoSuchMethodError(); err.initCause(e); throw err; } b.invoke(method); b.convert(TypeDesc.forClass(method.getReturnType()), propertyType); // Store property b.storeField(property.getName(), propertyType); // Set state to dirty. b.loadThis(); b.loadThis(); b.loadField(stateFieldName, TypeDesc.INT); b.loadConstant(StorableGenerator.PROPERTY_STATE_DIRTY << shift); b.math(Opcode.IOR); b.storeField(stateFieldName, TypeDesc.INT); isInitialized.setLocation(); } ordinal++; } // We've tried our best to fill in missing values, now run the // original check method. b.loadThis(); b.invokeSuper(mClassFile.getSuperClassName(), StorableGenerator.CHECK_PK_FOR_INSERT_METHOD_NAME, null, null); b.returnVoid(); } MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toFinal(true), StorableGenerator.DO_TRY_INSERT_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); CodeBuilder b = new CodeBuilder(mi); LocalVariable txnVar = b.createLocalVariable(null, transactionType); Label tryStart = addEnterTransaction(b, INSERT_OP, txnVar); if (mFeatures.contains(MasterFeature.VERSIONING)) { // Only set if uninitialized. b.loadThis(); b.invokeVirtual(StorableGenerator.IS_VERSION_INITIALIZED_METHOD_NAME, TypeDesc.BOOLEAN, null); Label isInitialized = b.createLabel(); b.ifZeroComparisonBranch(isInitialized, "!="); addAdjustVersionProperty(b, null, 1); isInitialized.setLocation(); } if (mFeatures.contains(MasterFeature.INSERT_CHECK_REQUIRED)) { // Ensure that required properties have been set. b.loadThis(); b.invokeVirtual(StorableGenerator.IS_REQUIRED_DATA_INITIALIZED_METHOD_NAME, TypeDesc.BOOLEAN, null); Label isInitialized = b.createLabel(); b.ifZeroComparisonBranch(isInitialized, "!="); // Throw a ConstraintException. TypeDesc exType = TypeDesc.forClass(ConstraintException.class); b.newObject(exType); b.dup(); // Append all the uninitialized property names to the exception message. LocalVariable countVar = b.createLocalVariable(null, TypeDesc.INT); b.loadConstant(0); b.storeLocal(countVar); TypeDesc sbType = TypeDesc.forClass(StringBuilder.class); b.newObject(sbType); b.dup(); b.loadConstant("Not all required properties have been set: "); TypeDesc[] stringParam = {TypeDesc.STRING}; b.invokeConstructor(sbType, stringParam); LocalVariable sbVar = b.createLocalVariable(null, sbType); b.storeLocal(sbVar); int ordinal = -1; HashSet<Integer> stateAppendMethods = new HashSet<Integer>(); // Parameters are: StringBuilder, count, mask, property name TypeDesc[] appendParams = {sbType, TypeDesc.INT, TypeDesc.INT, TypeDesc.STRING}; for (StorableProperty<S> property : mAllProperties.values()) { ordinal++; if (property.isJoin() || property.isPrimaryKeyMember() || property.isNullable()) { continue; } int stateField = ordinal >> 4; String stateAppendMethodName = APPEND_UNINIT_PROPERTY + stateField; if (!stateAppendMethods.contains(stateField)) { stateAppendMethods.add(stateField); MethodInfo mi2 = mClassFile.addMethod (Modifiers.PRIVATE, stateAppendMethodName, TypeDesc.INT, appendParams); CodeBuilder b2 = new CodeBuilder(mi2); // Load the StringBuilder parameter. b2.loadLocal(b2.getParameter(0)); String stateFieldName = StorableGenerator.PROPERTY_STATE_FIELD_NAME + (ordinal >> 4); b2.loadThis(); b2.loadField(stateFieldName, TypeDesc.INT); // Load the mask parameter. b2.loadLocal(b2.getParameter(2)); b2.math(Opcode.IAND); Label propIsInitialized = b2.createLabel(); b2.ifZeroComparisonBranch(propIsInitialized, "!="); // Load the count parameter. b2.loadLocal(b2.getParameter(1)); Label noComma = b2.createLabel(); b2.ifZeroComparisonBranch(noComma, "=="); b2.loadConstant(", "); b2.invokeVirtual(sbType, "append", sbType, stringParam); noComma.setLocation(); // Load the property name parameter. b2.loadLocal(b2.getParameter(3)); b2.invokeVirtual(sbType, "append", sbType, stringParam); // Increment the count parameter. b2.integerIncrement(b2.getParameter(1), 1); propIsInitialized.setLocation(); // Return the possibly updated count. b2.loadLocal(b2.getParameter(1)); b2.returnValue(TypeDesc.INT); } b.loadThis(); // Parameters are: StringBuilder, count, mask, property name b.loadLocal(sbVar); b.loadLocal(countVar); b.loadConstant(StorableGenerator.PROPERTY_STATE_MASK << ((ordinal & 0xf) * 2)); b.loadConstant(property.getName()); b.invokePrivate(stateAppendMethodName, TypeDesc.INT, appendParams); b.storeLocal(countVar); } b.loadLocal(sbVar); b.invokeVirtual(sbType, "toString", TypeDesc.STRING, null); b.invokeConstructor(exType, new TypeDesc[] {TypeDesc.STRING}); b.throwObject(); isInitialized.setLocation(); } b.loadThis(); b.invokeVirtual(DO_TRY_INSERT_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); if (tryStart == null) { b.returnValue(TypeDesc.BOOLEAN); } else { Label failed = b.createLabel(); b.ifZeroComparisonBranch(failed, "=="); addCommitAndExitTransaction(b, INSERT_OP, txnVar); b.loadConstant(true); b.returnValue(TypeDesc.BOOLEAN); failed.setLocation(); addExitTransaction(b, INSERT_OP, txnVar); b.loadConstant(false); b.returnValue(TypeDesc.BOOLEAN); addExitTransaction(b, INSERT_OP, txnVar, tryStart); } } // Add required protected doTryUpdate method. addDoTryUpdate: { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toFinal(true), StorableGenerator.DO_TRY_UPDATE_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); CodeBuilder b = new CodeBuilder(mi); if ((!mFeatures.contains(MasterFeature.VERSIONING)) && (!mFeatures.contains(MasterFeature.UPDATE_FULL)) && (!mFeatures.contains(MasterFeature.UPDATE_TXN))) { // Nothing special needs to be done, so just delegate and return. b.loadThis(); b.invokeVirtual(DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); b.returnValue(TypeDesc.BOOLEAN); break addDoTryUpdate; } LocalVariable txnVar = b.createLocalVariable(null, transactionType); LocalVariable savedVar = null; Label tryStart = addEnterTransaction(b, UPDATE_OP, txnVar); Label failed = b.createLabel(); if (mFeatures.contains(MasterFeature.UPDATE_FULL)) { // Storable saved = copy(); b.loadThis(); b.invokeVirtual(COPY_METHOD_NAME, storableType, null); b.checkCast(mClassFile.getType()); savedVar = b.createLocalVariable(null, mClassFile.getType()); b.storeLocal(savedVar); // if (!saved.tryLoad()) { // goto failed; // } b.loadLocal(savedVar); b.invokeInterface(storableType, TRY_LOAD_METHOD_NAME, TypeDesc.BOOLEAN, null); b.ifZeroComparisonBranch(failed, "=="); // if (version support enabled) { // if (this.getVersionNumber() != saved.getVersionNumber()) { // throw new OptimisticLockException // (this.getVersionNumber(), saved.getVersionNumber(), this); // } // } if (mFeatures.contains(MasterFeature.VERSIONING)) { TypeDesc versionType = TypeDesc.forClass(mInfo.getVersionProperty().getType()); b.loadThis(); b.invoke(mInfo.getVersionProperty().getReadMethod()); b.loadLocal(savedVar); b.invoke(mInfo.getVersionProperty().getReadMethod()); Label sameVersion = b.createLabel(); CodeBuilderUtil.addValuesEqualCall(b, versionType, true, sameVersion, true); b.newObject(optimisticLockType); b.dup(); b.loadThis(); b.invoke(mInfo.getVersionProperty().getReadMethod()); b.convert(versionType, TypeDesc.OBJECT); b.loadLocal(savedVar); b.invoke(mInfo.getVersionProperty().getReadMethod()); b.convert(versionType, TypeDesc.OBJECT); b.loadThis(); b.invokeConstructor (optimisticLockType, new TypeDesc[] {TypeDesc.OBJECT, TypeDesc.OBJECT, storableType}); b.throwObject(); sameVersion.setLocation(); } // this.copyDirtyProperties(saved); // if (version support enabled) { // saved.setVersionNumber(saved.getVersionNumber() + 1); // } b.loadThis(); b.loadLocal(savedVar); b.invokeVirtual(COPY_DIRTY_PROPERTIES, null, new TypeDesc[] {storableType}); if (mFeatures.contains(MasterFeature.VERSIONING)) { addAdjustVersionProperty(b, savedVar, -1); } // if (!saved.doTryUpdateMaster()) { // goto failed; // } b.loadLocal(savedVar); b.invokeVirtual(DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); b.ifZeroComparisonBranch(failed, "=="); // saved.copyUnequalProperties(this); b.loadLocal(savedVar); b.loadThis(); b.invokeInterface (storableType, COPY_UNEQUAL_PROPERTIES, null, new TypeDesc[] {storableType}); } else { // if (!this.doTryUpdateMaster()) { // goto failed; // } b.loadThis(); b.invokeVirtual(DO_TRY_UPDATE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); b.ifZeroComparisonBranch(failed, "=="); } // txn.commit(); // txn.exit(); // return true; addCommitAndExitTransaction(b, UPDATE_OP, txnVar); b.loadConstant(true); b.returnValue(TypeDesc.BOOLEAN); // failed: // txn.exit(); failed.setLocation(); addExitTransaction(b, UPDATE_OP, txnVar); // return false; b.loadConstant(false); b.returnValue(TypeDesc.BOOLEAN); addExitTransaction(b, UPDATE_OP, txnVar, tryStart); } // Add required protected doTryDelete method. { MethodInfo mi = mClassFile.addMethod (Modifiers.PROTECTED.toFinal(true), StorableGenerator.DO_TRY_DELETE_METHOD_NAME, TypeDesc.BOOLEAN, null); mi.addException(persistExceptionType); CodeBuilder b = new CodeBuilder(mi); LocalVariable txnVar = b.createLocalVariable(null, transactionType); Label tryStart = addEnterTransaction(b, DELETE_OP, txnVar); b.loadThis(); b.invokeVirtual(DO_TRY_DELETE_MASTER_METHOD_NAME, TypeDesc.BOOLEAN, null); if (tryStart == null) { b.returnValue(TypeDesc.BOOLEAN); } else { Label failed = b.createLabel(); b.ifZeroComparisonBranch(failed, "=="); addCommitAndExitTransaction(b, DELETE_OP, txnVar); b.loadConstant(true); b.returnValue(TypeDesc.BOOLEAN); failed.setLocation(); addExitTransaction(b, DELETE_OP, txnVar); b.loadConstant(false); b.returnValue(TypeDesc.BOOLEAN); addExitTransaction(b, DELETE_OP, txnVar, tryStart); } } }
diff --git a/client/src/main/java/edu/exigen/client/LibraryClient.java b/client/src/main/java/edu/exigen/client/LibraryClient.java index 9dbe3af..827e3da 100644 --- a/client/src/main/java/edu/exigen/client/LibraryClient.java +++ b/client/src/main/java/edu/exigen/client/LibraryClient.java @@ -1,82 +1,82 @@ package edu.exigen.client; import edu.exigen.client.gui.LibraryClientComponent; import edu.exigen.server.provider.BookProvider; import edu.exigen.server.provider.ReaderProvider; import edu.exigen.server.provider.ReservationRecordProvider; import javax.naming.Context; import javax.naming.InitialContext; import javax.naming.NamingException; import javax.swing.*; import java.rmi.RemoteException; /** * @author Tedikova O. * @version 1.0 */ public class LibraryClient { private static final String BOOK_PROVIDER_URL = "rmi://localhost/book_provider"; private static final String READER_PROVIDER_URL = "rmi://localhost/reader_provider"; private static final String RECORD_PROVIDER_URL = "rmi://localhost/record_provider"; private BookProvider bookProvider; private ReaderProvider readerProvider; private ReservationRecordProvider recordProvider; public LibraryClient(BookProvider bookProvider, ReaderProvider readerProvider, ReservationRecordProvider recordProvider) { this.bookProvider = bookProvider; this.readerProvider = readerProvider; this.recordProvider = recordProvider; } public static void main(String[] args) { Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { e.printStackTrace(); if (e.getMessage().contains("java.net.ConnectException")) { JOptionPane.showMessageDialog(null, "Client was disconnected, please, check server.", "Library client", JOptionPane.ERROR_MESSAGE); } else { - JOptionPane.showMessageDialog(null, "Internal client error. " + e.getMessage(), "Library client", JOptionPane.ERROR_MESSAGE); + JOptionPane.showMessageDialog(null, e.getMessage(), "Library client", JOptionPane.ERROR_MESSAGE); } } }); try { Context namingContext = new InitialContext(); BookProvider bookProvider = (BookProvider) namingContext.lookup(BOOK_PROVIDER_URL); ReaderProvider readerProvider = (ReaderProvider) namingContext.lookup(READER_PROVIDER_URL); ReservationRecordProvider recordProvider = (ReservationRecordProvider) namingContext.lookup(RECORD_PROVIDER_URL); LibraryClient libraryClient = new LibraryClient(bookProvider, readerProvider, recordProvider); final LibraryClientComponent clientComponent = new LibraryClientComponent(libraryClient); SwingUtilities.invokeLater(new Runnable() { @Override public void run() { JFrame clientFrame = clientComponent.getLibraryClientFrame(); clientFrame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); clientFrame.setLocationRelativeTo(null); clientFrame.setVisible(true); } }); } catch (NamingException e) { JOptionPane.showMessageDialog(null, "Can't find 'Library Server' at localhost.", "Library client", JOptionPane.INFORMATION_MESSAGE); System.exit(-1); } catch (RemoteException e) { JOptionPane.showMessageDialog(null, "Read server data failed.", "Library client", JOptionPane.INFORMATION_MESSAGE); System.exit(-1); } } public BookProvider getBookProvider() { return bookProvider; } public ReaderProvider getReaderProvider() { return readerProvider; } public ReservationRecordProvider getRecordProvider() { return recordProvider; } }
true
true
public static void main(String[] args) { Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { e.printStackTrace(); if (e.getMessage().contains("java.net.ConnectException")) { JOptionPane.showMessageDialog(null, "Client was disconnected, please, check server.", "Library client", JOptionPane.ERROR_MESSAGE); } else { JOptionPane.showMessageDialog(null, "Internal client error. " + e.getMessage(), "Library client", JOptionPane.ERROR_MESSAGE); } } }); try { Context namingContext = new InitialContext(); BookProvider bookProvider = (BookProvider) namingContext.lookup(BOOK_PROVIDER_URL); ReaderProvider readerProvider = (ReaderProvider) namingContext.lookup(READER_PROVIDER_URL); ReservationRecordProvider recordProvider = (ReservationRecordProvider) namingContext.lookup(RECORD_PROVIDER_URL); LibraryClient libraryClient = new LibraryClient(bookProvider, readerProvider, recordProvider); final LibraryClientComponent clientComponent = new LibraryClientComponent(libraryClient); SwingUtilities.invokeLater(new Runnable() { @Override public void run() { JFrame clientFrame = clientComponent.getLibraryClientFrame(); clientFrame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); clientFrame.setLocationRelativeTo(null); clientFrame.setVisible(true); } }); } catch (NamingException e) { JOptionPane.showMessageDialog(null, "Can't find 'Library Server' at localhost.", "Library client", JOptionPane.INFORMATION_MESSAGE); System.exit(-1); } catch (RemoteException e) { JOptionPane.showMessageDialog(null, "Read server data failed.", "Library client", JOptionPane.INFORMATION_MESSAGE); System.exit(-1); } }
public static void main(String[] args) { Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { e.printStackTrace(); if (e.getMessage().contains("java.net.ConnectException")) { JOptionPane.showMessageDialog(null, "Client was disconnected, please, check server.", "Library client", JOptionPane.ERROR_MESSAGE); } else { JOptionPane.showMessageDialog(null, e.getMessage(), "Library client", JOptionPane.ERROR_MESSAGE); } } }); try { Context namingContext = new InitialContext(); BookProvider bookProvider = (BookProvider) namingContext.lookup(BOOK_PROVIDER_URL); ReaderProvider readerProvider = (ReaderProvider) namingContext.lookup(READER_PROVIDER_URL); ReservationRecordProvider recordProvider = (ReservationRecordProvider) namingContext.lookup(RECORD_PROVIDER_URL); LibraryClient libraryClient = new LibraryClient(bookProvider, readerProvider, recordProvider); final LibraryClientComponent clientComponent = new LibraryClientComponent(libraryClient); SwingUtilities.invokeLater(new Runnable() { @Override public void run() { JFrame clientFrame = clientComponent.getLibraryClientFrame(); clientFrame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); clientFrame.setLocationRelativeTo(null); clientFrame.setVisible(true); } }); } catch (NamingException e) { JOptionPane.showMessageDialog(null, "Can't find 'Library Server' at localhost.", "Library client", JOptionPane.INFORMATION_MESSAGE); System.exit(-1); } catch (RemoteException e) { JOptionPane.showMessageDialog(null, "Read server data failed.", "Library client", JOptionPane.INFORMATION_MESSAGE); System.exit(-1); } }
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/UserPermissionsCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/UserPermissionsCommand.java index f68257a32..8da974299 100644 --- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/UserPermissionsCommand.java +++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/UserPermissionsCommand.java @@ -1,86 +1,86 @@ /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.accumulo.core.util.shell.commands; import java.io.IOException; import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.security.SystemPermission; import org.apache.accumulo.core.security.TablePermission; import org.apache.accumulo.core.util.shell.Shell; import org.apache.accumulo.core.util.shell.Shell.Command; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; public class UserPermissionsCommand extends Command { private Option userOpt; private static int runOnce = 0; @Override public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws AccumuloException, AccumuloSecurityException, IOException { final String user = cl.getOptionValue(userOpt.getOpt(), shellState.getConnector().whoami()); String delim = ""; shellState.getReader().printString("System permissions: "); for (SystemPermission p : SystemPermission.values()) { - if (shellState.getConnector().securityOperations().hasSystemPermission(user, p) & p != null) { + if (p != null && shellState.getConnector().securityOperations().hasSystemPermission(user, p)) { shellState.getReader().printString(delim + "System." + p.name()); delim = ", "; } } shellState.getReader().printNewline(); for (String t : shellState.getConnector().tableOperations().list()) { delim = ""; for (TablePermission p : TablePermission.values()) { if (shellState.getConnector().securityOperations().hasTablePermission(user, t, p) && p != null) { if (runOnce == 0) { shellState.getReader().printString("\nTable permissions (" + t + "): "); runOnce++; } shellState.getReader().printString(delim + "Table." + p.name()); delim = ", "; } } runOnce = 0; } shellState.getReader().printNewline(); return 0; } @Override public String description() { return "displays a user's system and table permissions"; } @Override public Options getOptions() { Options o = new Options(); userOpt = new Option(Shell.userOption, "user", true, "user to operate on"); userOpt.setArgName("user"); o.addOption(userOpt); return o; } @Override public int numArgs() { return 0; } }
true
true
public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws AccumuloException, AccumuloSecurityException, IOException { final String user = cl.getOptionValue(userOpt.getOpt(), shellState.getConnector().whoami()); String delim = ""; shellState.getReader().printString("System permissions: "); for (SystemPermission p : SystemPermission.values()) { if (shellState.getConnector().securityOperations().hasSystemPermission(user, p) & p != null) { shellState.getReader().printString(delim + "System." + p.name()); delim = ", "; } } shellState.getReader().printNewline(); for (String t : shellState.getConnector().tableOperations().list()) { delim = ""; for (TablePermission p : TablePermission.values()) { if (shellState.getConnector().securityOperations().hasTablePermission(user, t, p) && p != null) { if (runOnce == 0) { shellState.getReader().printString("\nTable permissions (" + t + "): "); runOnce++; } shellState.getReader().printString(delim + "Table." + p.name()); delim = ", "; } } runOnce = 0; } shellState.getReader().printNewline(); return 0; }
public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws AccumuloException, AccumuloSecurityException, IOException { final String user = cl.getOptionValue(userOpt.getOpt(), shellState.getConnector().whoami()); String delim = ""; shellState.getReader().printString("System permissions: "); for (SystemPermission p : SystemPermission.values()) { if (p != null && shellState.getConnector().securityOperations().hasSystemPermission(user, p)) { shellState.getReader().printString(delim + "System." + p.name()); delim = ", "; } } shellState.getReader().printNewline(); for (String t : shellState.getConnector().tableOperations().list()) { delim = ""; for (TablePermission p : TablePermission.values()) { if (shellState.getConnector().securityOperations().hasTablePermission(user, t, p) && p != null) { if (runOnce == 0) { shellState.getReader().printString("\nTable permissions (" + t + "): "); runOnce++; } shellState.getReader().printString(delim + "Table." + p.name()); delim = ", "; } } runOnce = 0; } shellState.getReader().printNewline(); return 0; }
diff --git a/sonar-batch/src/main/java/org/sonar/batch/bootstrap/ProjectLock.java b/sonar-batch/src/main/java/org/sonar/batch/bootstrap/ProjectLock.java index 8a4d2148d6..144dd25381 100644 --- a/sonar-batch/src/main/java/org/sonar/batch/bootstrap/ProjectLock.java +++ b/sonar-batch/src/main/java/org/sonar/batch/bootstrap/ProjectLock.java @@ -1,91 +1,91 @@ /* * Sonar, open source software quality management tool. * Copyright (C) 2008-2012 SonarSource * mailto:contact AT sonarsource DOT com * * Sonar is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * Sonar is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with Sonar; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02 */ package org.sonar.batch.bootstrap; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.sonar.api.CoreProperties; import org.sonar.api.config.Settings; import org.sonar.api.resources.Project; import org.sonar.api.utils.Semaphores; import org.sonar.api.utils.SonarException; import org.sonar.batch.ProjectTree; public class ProjectLock { private static final Logger LOG = LoggerFactory.getLogger(ProjectLock.class); private final Semaphores semaphores; private final ProjectTree projectTree; private final Settings settings; public ProjectLock(Semaphores semaphores, ProjectTree projectTree, Settings settings) { this.semaphores = semaphores; this.projectTree = projectTree; this.settings = settings; } public void start() { if (!isInDryRunMode() && StringUtils.isNotBlank(getProject().getKey())) { Semaphores.Semaphore semaphore = acquire(); if (!semaphore.isLocked()) { LOG.error(getErrorMessage(semaphore)); - throw new SonarException("The project is already been analysing."); + throw new SonarException("The project is already being analysed."); } } } private String getErrorMessage(Semaphores.Semaphore semaphore) { long duration = semaphore.getDurationSinceLocked(); DurationLabel durationLabel = new DurationLabel(); String durationDisplay = durationLabel.label(duration); return "It looks like an analysis of '" + getProject().getName() + "' is already running (started " + durationDisplay + ")."; } public void stop() { if (!isInDryRunMode()) { release(); } } private Semaphores.Semaphore acquire() { LOG.debug("Acquire semaphore on project : {}, with key {}", getProject(), getSemaphoreKey()); return semaphores.acquire(getSemaphoreKey(), 15, 10); } private void release() { LOG.debug("Release semaphore on project : {}, with key {}", getProject(), getSemaphoreKey()); semaphores.release(getSemaphoreKey()); } private String getSemaphoreKey() { return "batch-" + getProject().getKey(); } private Project getProject() { return projectTree.getRootProject(); } private boolean isInDryRunMode() { return settings.getBoolean(CoreProperties.DRY_RUN); } }
true
true
public void start() { if (!isInDryRunMode() && StringUtils.isNotBlank(getProject().getKey())) { Semaphores.Semaphore semaphore = acquire(); if (!semaphore.isLocked()) { LOG.error(getErrorMessage(semaphore)); throw new SonarException("The project is already been analysing."); } } }
public void start() { if (!isInDryRunMode() && StringUtils.isNotBlank(getProject().getKey())) { Semaphores.Semaphore semaphore = acquire(); if (!semaphore.isLocked()) { LOG.error(getErrorMessage(semaphore)); throw new SonarException("The project is already being analysed."); } } }
diff --git a/src/test/java/com/wikia/webdriver/TestCases/ChatTests/ChatTests.java b/src/test/java/com/wikia/webdriver/TestCases/ChatTests/ChatTests.java index 5ace122..9f50175 100644 --- a/src/test/java/com/wikia/webdriver/TestCases/ChatTests/ChatTests.java +++ b/src/test/java/com/wikia/webdriver/TestCases/ChatTests/ChatTests.java @@ -1,567 +1,583 @@ package com.wikia.webdriver.TestCases.ChatTests; import org.testng.annotations.Test; import com.wikia.webdriver.Common.Core.CommonFunctions; import com.wikia.webdriver.Common.Core.Global; import com.wikia.webdriver.Common.Properties.Properties; import com.wikia.webdriver.Common.Templates.TestTemplate_Two_Drivers; import com.wikia.webdriver.PageObjects.PageObject.HomePageObject; import com.wikia.webdriver.PageObjects.PageObject.ChatPageObject.ChatPageObject; import com.wikia.webdriver.PageObjects.PageObject.WikiPage.WikiArticlePageObject; public class ChatTests extends TestTemplate_Two_Drivers{ /* * Test 1: One user opens chat 1. A user opens Special:Chat. He is the only person on-line. 2. The main chat room is opened for him and he can see: message area, userlist and entry field. 3. At the top of message area is wiki's wordmark/name. 4. At the top of userlist he sees his avatar and name. Below that is a list of other users which is empty. 5. There is no chevron next to the wiki wordmark on userlist. 6. In the message area a message with his name appears: "user A has joined the chat." dropped from automation scope - this test case will be executed as a part of all test cases. */ /* * Test 2: Two users open chat 1. There are two users: user A and user B. 2. Both open Special:Chat on the same wiki. 3. The main chat room is opened for them and each can see: message area, userlist and entry field. 4. At the top of message area is wiki's wordmark/name. 5. At the top of userlist each user can see his avatar and name. Below that is a list of other users in the chat room. 6. There is a chevron next to the wiki wordmark on userlist. It is opened by default. 7. A user can click on the chevron to toggle userlist. 8. In the message area both users see a message with his name: "user A has joined the chat." or "user B has joined the chat." */ @Test(groups = {"Chat_001", "Chat"}) public void Chat_001_two_users_open_chat() { //first user opens the chat switchToWindow(driver); CommonFunctions.logOut(driver); WikiArticlePageObject home = new WikiArticlePageObject(driver, Global.DOMAIN, ""); home.openWikiPage(); CommonFunctions.logInCookie(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); chat1.openChatPage(); chat1.verifyChatPage(); //second user opens the chat switchToWindow(driver2); CommonFunctions.logOut(driver2); WikiArticlePageObject home2 = new WikiArticlePageObject(driver2, Global.DOMAIN, ""); home2.openWikiPage(); CommonFunctions.logInCookie(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); chat2.verifyChatPage(); //Test switchToWindow(driver); chat1.verifyUserJoinToChat(Properties.userName2); } /* * Test 3: Changes in drop-down menu #1 1. User clicks on a different user name with left mouse button. Drop-down menu appears. 2. There are three options to choose: User Profile Message Wall, Contributions, Private message. 3. If user is an admin there should be also: Give ChatMod status and Kickban (if clicked user is not a chat moderator or admin). */ @Test(groups = {"Chat_002", "Chat"}) public void Chat_002_changes_in_drop_down_menu_1() { //first user opens the chat switchToWindow(driver); CommonFunctions.logOut(driver); WikiArticlePageObject home = new WikiArticlePageObject(driver, Global.DOMAIN, ""); home.openWikiPage(); CommonFunctions.logInCookie(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); CommonFunctions.logOut(driver2); WikiArticlePageObject home2 = new WikiArticlePageObject(driver2, Global.DOMAIN, ""); home2.openWikiPage(); CommonFunctions.logInCookie(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //Test chat1.verifyChatPage(); chat1.clickOnDifferentUser(Properties.userName2, driver); chat1.verifyNormalUserDropdown(); } /* * Test 4: Changes in drop-down menu #2 1. There are two users in the chat room: user A and user B. User B private message are blocked by user A. 2. User A clicks with a left mouse button on user B name. Drop-down menu appears. 3. There are three options to choose: User Profile, Contributions, Allow Private Messages. 4. If user A is an admin there should be also Give ChatMod status and Kickban (if clicked user is not a chat moderator or admin). - to next test case */ /* Test 11: Private room dropdown menu Edit 1. There are two users in the chat room: user A and user B. 2. User B opens private chat room with user A. 3. Clicks on user A item under "Private messages" bar displays drop-down menu similar to main chat's drop-down menu except one new element: "Block Private Messages" * */ @Test(groups = {"Chat_003", "Chat"}) public void Chat_003_changes_in_drop_down_menu_2() { //first user opens the chat switchToWindow(driver); CommonFunctions.logOut(driver); WikiArticlePageObject home = new WikiArticlePageObject(driver, Global.DOMAIN, ""); home.openWikiPage(); CommonFunctions.logInCookie(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); CommonFunctions.logOut(driver2); WikiArticlePageObject home2 = new WikiArticlePageObject(driver2, Global.DOMAIN, ""); home2.openWikiPage(); CommonFunctions.logInCookie(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //Test chat1.verifyChatPage(); chat1.clickOnDifferentUser(Properties.userName2, driver); chat1.selectPrivateMessage(driver); chat1.clickPrivateMessageUser(Properties.userName2, driver); chat1.verifyPrivateUserDropdown(); chat1.blockPrivateMessage(driver); chat1.clickOnBlockedDifferentUser(Properties.userName2, driver); chat1.verifyBlockingUserDropdown(); chat1.allowPrivateMessageFromUser(Properties.userName2, driver); } /* * Test 4: Changes in drop-down menu #2 - KICKBAN verification 1. There are two users in the chat room: user A and user B. User B private message are blocked by user A. 2. User A clicks with a left mouse button on user B name. Drop-down menu appears. 3. There are three options to choose: User Profile, Contributions, Allow Private Messages. 4. If user A is an admin there should be also Give ChatMod status and Kickban (if clicked user is not a chat moderator or admin). */ @Test(groups = {"Chat_004", "Chat"}) public void Chat_004_changes_in_drop_down_menu_staff() { //first user opens the chat switchToWindow(driver); CommonFunctions.logOut(driver); WikiArticlePageObject home = new WikiArticlePageObject(driver, Global.DOMAIN, ""); home.openWikiPage(); CommonFunctions.logInCookie(Properties.userNameStaff, Properties.passwordStaff, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); CommonFunctions.logOut(driver2); WikiArticlePageObject home2 = new WikiArticlePageObject(driver2, Global.DOMAIN, ""); home2.openWikiPage(); CommonFunctions.logInCookie(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //Test chat1.verifyChatPage(); chat1.clickOnDifferentUser(Properties.userName2, driver); chat1.verifyAdminUserDropdown(); } /* * Test 5: "Private Messages" bar 1. There are two users in the chat room: user A and user B. No "Private Message" bar. 2. User B opens a private room with user A. 3. The small header labeled "Private Message" appears on user B's userlist. */ /* * Test 6: Current chat is highlighted 1. There are two users in the chat room: user A and user B. 2. User B opens a drop-down menu and click on "Private message" with user A. 3. New room is opened and highlighted. 4. Click on main room changes the highlighting. */ /* * Test 7: Current chat title changes 1. There are two users in the chat room: user A and user B. 2. User B opens a drop-down menu and click on "Private message" with user A. 3. New room is opened and the title is changed to "Private chat with user A". 4. Click on main room changes the title to wiki's wordmark/name. */ /*Above test cases are covered by below script */ @Test(groups = {"Chat_005", "Chat"}) public void Chat_005_private_chat_validation() { //first user opens the chat switchToWindow(driver); CommonFunctions.logOut(driver); WikiArticlePageObject home = new WikiArticlePageObject(driver, Global.DOMAIN, ""); home.openWikiPage(); CommonFunctions.logInCookie(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); CommonFunctions.logOut(driver2); WikiArticlePageObject home2 = new WikiArticlePageObject(driver2, Global.DOMAIN, ""); home2.openWikiPage(); CommonFunctions.logInCookie(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //Test chat1.verifyChatPage(); chat1.clickOnDifferentUser(Properties.userName2, driver); chat1.selectPrivateMessage(driver); chat1.verifyPrivateMessageHeader(); chat1.verifyPrivateMessageIsHighLighted(Properties.userName2); chat1.verifyPrivateChatTitle(Properties.userName2); chat1.clickOnMainChat(driver); chat1.verifyMainChatIsHighLighted(); chat1.clickOnPrivateChat(Properties.userName2, driver); chat1.verifyPrivateMessageIsHighLighted(Properties.userName2); } /* * Test 8: Current chat messages area changes 1. There are two users in the chat room: user A and user B. 2. User A sends a string 'abc' to the main room. It is now displayed on chat messages area. 3. User B opens a drop-down menu and click on "Private message" with user A. 4. New room is opened and chat messages area is empty. 5. Click on main room changes chat messages area so there is 'abc' message displayed now. */ @Test(groups = {"Chat_006", "Chat"}) public void Chat_006_current_chat_messages_area_changes() { //first user opens the chat switchToWindow(driver); CommonFunctions.logOut(driver); WikiArticlePageObject home = new WikiArticlePageObject(driver, Global.DOMAIN, ""); home.openWikiPage(); CommonFunctions.logInCookie(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); CommonFunctions.logOut(driver2); WikiArticlePageObject home2 = new WikiArticlePageObject(driver2, Global.DOMAIN, ""); home2.openWikiPage(); CommonFunctions.logInCookie(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //test switchToWindow(driver2); chat2.writeOnChat("Hello this is user "+Properties.userName2); switchToWindow(driver); chat1.verifyMessageOnChat("Hello this is user "+Properties.userName2); chat1.clickOnDifferentUser(Properties.userName2, driver); chat1.selectPrivateMessage(driver); chat1.verifyPrivateMessageHeader(); chat1.clickOnMainChat(driver); chat1.verifyMainChatIsHighLighted(); chat1.verifyMessageOnChat("Hello this is user "+Properties.userName2); } /* * Test 9: Private chat window is opened for target user after a message is sent 1. There are two users in the chat room: user A and user B. 2. User B opens a drop-down menu and click on "Private message" with user A. 3. New room is opened for user B. User A doesn't notice anything yet. 4. User B types and sends string 'abc' in the private chat with user A window. 5. Private chat with user B appears in user A's userlist area. */ @Test(groups = {"Chat_007", "Chat", "Smoke"}) public void Chat_007_send_private_message() { //first user opens the chat switchToWindow(driver); CommonFunctions.logOut(driver); WikiArticlePageObject home = new WikiArticlePageObject(driver, Global.DOMAIN, ""); home.openWikiPage(); CommonFunctions.logInCookie(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); CommonFunctions.logOut(driver2); WikiArticlePageObject home2 = new WikiArticlePageObject(driver2, Global.DOMAIN, ""); home2.openWikiPage(); CommonFunctions.logInCookie(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //test switchToWindow(driver2); chat2.verifyUserJoinToChat(Properties.userName); chat2.verifyUserIsVisibleOnContactsList(Properties.userName); chat1.verifyUserIsVisibleOnContactsList(Properties.userName2); chat2.writeOnChat("test message"); chat1.verifyMessageOnChat("test message"); chat2.clickOnDifferentUser(Properties.userName, driver2); chat2.selectPrivateMessage(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageHeader(); chat1.verifyPrivateMessageNotification(); chat1.clickOnPrivateChat(Properties.userName2, driver); chat1.verifyMessageOnChat("This is private message from "+Properties.userName2); } /* * Test 10: Notifications 1. There are two users in the chat room: user A and user B. 2. User B opens private chat room with user A. 3. New room is opened for user B. User A doesn't notice anything yet. 4. User B types and sends string 'abc' in the private chat with user A window. 5. Private chat with user B appears in user A's userlist area with red dot with number of unread messages (1). 6. User B types and sends another two strings: 'def' and 'ghi'. 7. User A notices that red dot counter is now with number 3 on it. 8. User A clicks on private chat with user B item from his userlist area and red dot is gone. */ @Test(groups = {"Chat_008", "Chat"}) public void Chat_008_notifications() { //first user opens the chat switchToWindow(driver); CommonFunctions.logOut(driver); WikiArticlePageObject home = new WikiArticlePageObject(driver, Global.DOMAIN, ""); home.openWikiPage(); CommonFunctions.logInCookie(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); CommonFunctions.logOut(driver2); WikiArticlePageObject home2 = new WikiArticlePageObject(driver2, Global.DOMAIN, ""); home2.openWikiPage(); CommonFunctions.logInCookie(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //test switchToWindow(driver2); chat2.verifyUserJoinToChat(Properties.userName); chat2.verifyUserIsVisibleOnContactsList(Properties.userName); chat1.verifyUserIsVisibleOnContactsList(Properties.userName2); chat2.writeOnChat("test message"); chat1.verifyMessageOnChat("test message"); chat2.clickOnDifferentUser(Properties.userName, driver2); chat2.selectPrivateMessage(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageHeader(); chat1.verifyPrivateMessageNotification(1); + switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); + switchToWindow(driver); chat1.verifyPrivateMessageNotification(2); + switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); + switchToWindow(driver); chat1.verifyPrivateMessageNotification(3); + switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); + switchToWindow(driver); chat1.verifyPrivateMessageNotification(4); + switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); + switchToWindow(driver); chat1.verifyPrivateMessageNotification(5); + switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); + switchToWindow(driver); chat1.verifyPrivateMessageNotification(6); + switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); + switchToWindow(driver); chat1.verifyPrivateMessageNotification(7); + switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); + switchToWindow(driver); chat1.verifyPrivateMessageNotification(8); + switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); + switchToWindow(driver); chat1.verifyPrivateMessageNotification(9); } /* * Test 12: Disconnections 1. There are two users in the chat room: user A and user B. 2. User B opens private chat room with user A. 3. User B sends few messages. 4. User A opens private chat with user B and sends few messages too. 5. User B closes the chat window and logs out. 6. User A still have the private chat window with user B opened but he notices user B has status "Offline" and is grayed out. 7. User A can not type and send anything in the private chat window with user B. The entry field in that room grays out. 8. After awhile user B comes back and user A is still there. User B's "Offline" status is gone and both of them can chat again in the private room. */ @Test(groups = {"Chat_009", ""}) public void Chat_009_disconnections() { //first user opens the chat switchToWindow(driver); HomePageObject home = new HomePageObject(driver); CommonFunctions.logOut(driver); home.openHomePage(); CommonFunctions.logIn(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); HomePageObject home2 = new HomePageObject(driver2); CommonFunctions.logOut(driver2); home2.openHomePage(); CommonFunctions.logIn(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //test switchToWindow(driver2); chat2.verifyUserJoinToChat(Properties.userName); chat2.verifyUserIsVisibleOnContactsList(Properties.userName); chat1.verifyUserIsVisibleOnContactsList(Properties.userName2); chat2.writeOnChat("test message"); chat1.verifyMessageOnChat("test message"); chat2.clickOnDifferentUser(Properties.userName, driver2); chat2.selectPrivateMessage(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageHeader(); chat1.verifyPrivateMessageNotification(1); chat2.writeOnChat("This is private message from "+Properties.userName2); chat1.verifyPrivateMessageNotification(2); chat2.writeOnChat("This is private message from "+Properties.userName2); chat1.verifyPrivateMessageNotification(3); chat1.clickOnPrivateChat(Properties.userName2, driver); switchToWindow(driver2); chat2.disconnectFromChat(); switchToWindow(driver); chat1.verifyUserIsGreyedOut(); chat1.verifyWritingAreaIsBlocked(); chat1.clickOnMainChat(driver); chat1.verifyUserLeftFromChatMessage(Properties.userName2); } /* * Test 13: User blocking - closing the window on blocker's site 1. There is private room opened between user A and user B. 2. User A blocks user B and the private chat window is gone for him. 3. User B still sees private chat window with user A but message: "user A has blocked private messages" appears there for him. He can still type but user A will not receive it. 4. Both: user A and user B don't see 'Private message' item on userlist drop-menu. */ @Test(groups = {"Chat_010", ""}) public void Chat_010_user_blocking_1() { //first user opens the chat switchToWindow(driver); HomePageObject home = new HomePageObject(driver); CommonFunctions.logOut(driver); home.openHomePage(); CommonFunctions.logIn(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); HomePageObject home2 = new HomePageObject(driver2); CommonFunctions.logOut(driver2); home2.openHomePage(); CommonFunctions.logIn(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //test switchToWindow(driver2); chat2.verifyUserJoinToChat(Properties.userName); chat2.verifyUserIsVisibleOnContactsList(Properties.userName); chat1.verifyUserIsVisibleOnContactsList(Properties.userName2); chat2.writeOnChat("test message"); chat1.verifyMessageOnChat("test message"); chat2.clickOnDifferentUser(Properties.userName, driver2); chat2.selectPrivateMessage(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageHeader(); chat1.verifyPrivateMessageNotification(1); chat1.clickPrivateMessageUser(Properties.userName2, driver); chat1.blockPrivateMessage(driver); chat1.clickOnBlockedDifferentUser(Properties.userName2, driver); chat1.verifyBlockingUserDropdown(); chat1.clickOnDifferentUser(Properties.userName2, driver); switchToWindow(driver2); chat2.verifyBlockedUserMessage(Properties.userName, Properties.userName2); chat2.clickOnBlockedDifferentUser(Properties.userName, driver2); chat2.verifyBlockedUserDropdown(); switchToWindow(driver); chat1.clickOnDifferentUser(Properties.userName2, driver); chat1.allowPrivateMessageFromUser(Properties.userName2, driver); } /* * Test 14: User blocking - a block is site-wide - currently out of scope //TODO There are two users (user A and user B) in the main chat room of wiki A. User B is blocked by user A and he can not send private messages to user A. There is no "Private message" item on drop-down menu at user A. User B opens a Special:Chat page on wiki B where user A is on too. User B can not private message user A there too. There is no "Private message" item on drop-down menu at user A. */ /* * Test 15: User advanced to ChatMod There are two users: user A and user B on the main chat room of wiki A. User A is an admin and he clicks with left mouse button on user B who is not an admin. Then he clicks on "Give ChatMod Status". The message "User A has made user B a chat moderator." appears in the chat message area in main chat room. User B now can find "Give ChatMod Status" and "Kickban" option in drop-down menu at non-admin users. */ @Test(groups = {"Chat_011", ""}) public void Chat_011_chatMod() { //first user opens the chat switchToWindow(driver); HomePageObject home = new HomePageObject(driver); CommonFunctions.logOut(driver); home.openHomePage(); CommonFunctions.logIn(Properties.userNameStaff, Properties.passwordStaff, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); HomePageObject home2 = new HomePageObject(driver2); CommonFunctions.logOut(driver2); home2.openHomePage(); CommonFunctions.logIn(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); switchToWindow(driver2); chat2.verifyUserJoinToChat(Properties.userNameStaff); chat2.verifyUserIsVisibleOnContactsList(Properties.userNameStaff); chat1.verifyUserIsVisibleOnContactsList(Properties.userName2); //test switchToWindow(driver); chat1.clickOnDifferentUser(Properties.userName2, driver); chat1.selectChatModStatus(driver); chat1.verifyChatModMessage(Properties.userNameStaff, Properties.userName2); chat2.verifyChatModMessage(Properties.userNameStaff, Properties.userName2); CommonFunctions.logOut(driver); CommonFunctions.logIn(Properties.userName, Properties.password, driver); chat1.openChatPage(); switchToWindow(driver2); chat2.verifyUserJoinToChat(Properties.userName); chat2.clickOnDifferentUser(Properties.userName, driver2); chat2.verifyChatModUserDropdown(); //remove admin rights switchToWindow(driver); CommonFunctions.logOut(driver); CommonFunctions.logIn(Properties.userNameStaff, Properties.passwordStaff, driver); CommonFunctions.removeChatModeratorRights(Properties.userName2, driver); } }
false
true
public void Chat_008_notifications() { //first user opens the chat switchToWindow(driver); CommonFunctions.logOut(driver); WikiArticlePageObject home = new WikiArticlePageObject(driver, Global.DOMAIN, ""); home.openWikiPage(); CommonFunctions.logInCookie(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); CommonFunctions.logOut(driver2); WikiArticlePageObject home2 = new WikiArticlePageObject(driver2, Global.DOMAIN, ""); home2.openWikiPage(); CommonFunctions.logInCookie(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //test switchToWindow(driver2); chat2.verifyUserJoinToChat(Properties.userName); chat2.verifyUserIsVisibleOnContactsList(Properties.userName); chat1.verifyUserIsVisibleOnContactsList(Properties.userName2); chat2.writeOnChat("test message"); chat1.verifyMessageOnChat("test message"); chat2.clickOnDifferentUser(Properties.userName, driver2); chat2.selectPrivateMessage(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageHeader(); chat1.verifyPrivateMessageNotification(1); chat2.writeOnChat("This is private message from "+Properties.userName2); chat1.verifyPrivateMessageNotification(2); chat2.writeOnChat("This is private message from "+Properties.userName2); chat1.verifyPrivateMessageNotification(3); chat2.writeOnChat("This is private message from "+Properties.userName2); chat1.verifyPrivateMessageNotification(4); chat2.writeOnChat("This is private message from "+Properties.userName2); chat1.verifyPrivateMessageNotification(5); chat2.writeOnChat("This is private message from "+Properties.userName2); chat1.verifyPrivateMessageNotification(6); chat2.writeOnChat("This is private message from "+Properties.userName2); chat1.verifyPrivateMessageNotification(7); chat2.writeOnChat("This is private message from "+Properties.userName2); chat1.verifyPrivateMessageNotification(8); chat2.writeOnChat("This is private message from "+Properties.userName2); chat1.verifyPrivateMessageNotification(9); }
public void Chat_008_notifications() { //first user opens the chat switchToWindow(driver); CommonFunctions.logOut(driver); WikiArticlePageObject home = new WikiArticlePageObject(driver, Global.DOMAIN, ""); home.openWikiPage(); CommonFunctions.logInCookie(Properties.userName, Properties.password, driver); ChatPageObject chat1 = new ChatPageObject(driver); //second user opens the chat switchToWindow(driver2); CommonFunctions.logOut(driver2); WikiArticlePageObject home2 = new WikiArticlePageObject(driver2, Global.DOMAIN, ""); home2.openWikiPage(); CommonFunctions.logInCookie(Properties.userName2, Properties.password2, driver2); ChatPageObject chat2 = new ChatPageObject(driver2); chat2.openChatPage(); switchToWindow(driver); chat1.openChatPage(); //test switchToWindow(driver2); chat2.verifyUserJoinToChat(Properties.userName); chat2.verifyUserIsVisibleOnContactsList(Properties.userName); chat1.verifyUserIsVisibleOnContactsList(Properties.userName2); chat2.writeOnChat("test message"); chat1.verifyMessageOnChat("test message"); chat2.clickOnDifferentUser(Properties.userName, driver2); chat2.selectPrivateMessage(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageHeader(); chat1.verifyPrivateMessageNotification(1); switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageNotification(2); switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageNotification(3); switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageNotification(4); switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageNotification(5); switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageNotification(6); switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageNotification(7); switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageNotification(8); switchToWindow(driver2); chat2.writeOnChat("This is private message from "+Properties.userName2); switchToWindow(driver); chat1.verifyPrivateMessageNotification(9); }
diff --git a/src/main/java/net/paguo/trafshow/backend/snmp/summary/model/RouterSummaryTraffic.java b/src/main/java/net/paguo/trafshow/backend/snmp/summary/model/RouterSummaryTraffic.java index 0e38419..f924733 100644 --- a/src/main/java/net/paguo/trafshow/backend/snmp/summary/model/RouterSummaryTraffic.java +++ b/src/main/java/net/paguo/trafshow/backend/snmp/summary/model/RouterSummaryTraffic.java @@ -1,95 +1,95 @@ package net.paguo.trafshow.backend.snmp.summary.model; import java.util.Date; /** * @author Reyentenko */ public class RouterSummaryTraffic { private String router; private String iface; private Long totalInput = 0l; private Long totalOutput = 0l; private Date date; private TrafficRecord lastProcessed; public RouterSummaryTraffic(){ } public void processRecord(TrafficRecord record) { if (lastProcessed != null) { // Check for router restart if (record.getUptime() >= lastProcessed.getUptime()){ totalInput += findDifference(lastProcessed.getInput(), record.getInput()); totalOutput += findDifference(lastProcessed.getOutput(), record.getOutput()); }else{ totalInput += record.getInput(); totalOutput += record.getOutput(); } }else{ - totalInput += record.getInput(); - totalOutput += record.getOutput(); + totalInput = 0L; + totalOutput = 0L; } this.lastProcessed = record; } private Long findDifference(Long input, Long input1) { return input1 >= input ? input1 - input : ((long) Math.pow(2, 32) - input) + input1; } /** * Test-only method. * @param lastProcessed last processed record */ void setLastProcessed(TrafficRecord lastProcessed) { this.lastProcessed = lastProcessed; } public String getRouter() { return router; } public void setRouter(String router) { this.router = router; } public String getIface() { return iface; } public void setIface(String iface) { this.iface = iface; } public Long getTotalInput() { return totalInput; } public void setTotalInput(Long totalInput) { this.totalInput = totalInput; } public Long getTotalOutput() { return totalOutput; } public void setTotalOutput(Long totalOutput) { this.totalOutput = totalOutput; } public Date getDate() { return date; } public void setDate(Date date) { this.date = date; } }
true
true
public void processRecord(TrafficRecord record) { if (lastProcessed != null) { // Check for router restart if (record.getUptime() >= lastProcessed.getUptime()){ totalInput += findDifference(lastProcessed.getInput(), record.getInput()); totalOutput += findDifference(lastProcessed.getOutput(), record.getOutput()); }else{ totalInput += record.getInput(); totalOutput += record.getOutput(); } }else{ totalInput += record.getInput(); totalOutput += record.getOutput(); } this.lastProcessed = record; }
public void processRecord(TrafficRecord record) { if (lastProcessed != null) { // Check for router restart if (record.getUptime() >= lastProcessed.getUptime()){ totalInput += findDifference(lastProcessed.getInput(), record.getInput()); totalOutput += findDifference(lastProcessed.getOutput(), record.getOutput()); }else{ totalInput += record.getInput(); totalOutput += record.getOutput(); } }else{ totalInput = 0L; totalOutput = 0L; } this.lastProcessed = record; }
diff --git a/net.sparktank.morrigan/src/net/sparktank/morrigan/ApplicationActionBarAdvisor.java b/net.sparktank.morrigan/src/net/sparktank/morrigan/ApplicationActionBarAdvisor.java index 33c2b14c..c08cd92f 100644 --- a/net.sparktank.morrigan/src/net/sparktank/morrigan/ApplicationActionBarAdvisor.java +++ b/net.sparktank.morrigan/src/net/sparktank/morrigan/ApplicationActionBarAdvisor.java @@ -1,119 +1,119 @@ package net.sparktank.morrigan; import net.sparktank.morrigan.actions.NewPlaylistAction; import org.eclipse.jface.action.GroupMarker; import org.eclipse.jface.action.IAction; import org.eclipse.jface.action.IContributionItem; import org.eclipse.jface.action.ICoolBarManager; import org.eclipse.jface.action.IMenuManager; import org.eclipse.jface.action.IToolBarManager; import org.eclipse.jface.action.MenuManager; import org.eclipse.jface.action.Separator; import org.eclipse.jface.action.ToolBarContributionItem; import org.eclipse.jface.action.ToolBarManager; import org.eclipse.ui.IWorkbenchActionConstants; import org.eclipse.ui.IWorkbenchWindow; import org.eclipse.ui.actions.ActionFactory; import org.eclipse.ui.actions.ContributionItemFactory; import org.eclipse.ui.actions.RetargetAction; import org.eclipse.ui.actions.ActionFactory.IWorkbenchAction; import org.eclipse.ui.application.ActionBarAdvisor; import org.eclipse.ui.application.IActionBarConfigurer; /** * An action bar advisor is responsible for creating, adding, and disposing of * the actions added to a workbench window. Each window will be populated with * new actions. */ public class ApplicationActionBarAdvisor extends ActionBarAdvisor { // Actions - important to allocate these only in makeActions, and then use them // in the fill methods. This ensures that the actions aren't recreated // when fillActionBars is called with FILL_PROXY. private IWorkbenchAction exitAction; // Window. private IWorkbenchAction resetPerspectiveAction; private MenuManager showViewMenuMgr; private IContributionItem showViewItemShortList; // List actions. private IAction newPlayListAction; IWorkbenchAction saveAction; private RetargetAction addAction; private RetargetAction removeAction; public static final String ADD_ACTIONID = "morrigan.add"; public static final String REMOVE_ACTIONID = "morrigan.remove"; public ApplicationActionBarAdvisor(IActionBarConfigurer configurer) { super(configurer); } @Override protected void makeActions(final IWorkbenchWindow window) { // Creates the actions and registers them. // Registering is needed to ensure that key bindings work. // The corresponding commands keybindings are defined in the plugin.xml file. // Registering also provides automatic disposal of the actions when the window is closed. newPlayListAction = new NewPlaylistAction(window); register(newPlayListAction); exitAction = ActionFactory.QUIT.create(window); register(exitAction); resetPerspectiveAction = ActionFactory.RESET_PERSPECTIVE.create(window); showViewMenuMgr = new MenuManager("Show view", "showView"); showViewItemShortList = ContributionItemFactory.VIEWS_SHORTLIST.create(window); // Editor actions. saveAction = ActionFactory.SAVE.create(window); register(saveAction); - addAction = new RetargetAction(ADD_ACTIONID, "&add"); + addAction = new RetargetAction(ADD_ACTIONID, "&add files..."); addAction.setImageDescriptor(Activator.getImageDescriptor("icons/plus.gif")); getActionBarConfigurer().registerGlobalAction(addAction); register(addAction); window.getPartService().addPartListener(addAction); removeAction = new RetargetAction(REMOVE_ACTIONID, "&remove selected..."); removeAction.setImageDescriptor(Activator.getImageDescriptor("icons/minus.gif")); getActionBarConfigurer().registerGlobalAction(removeAction); register(removeAction); window.getPartService().addPartListener(removeAction); } @Override protected void fillMenuBar(IMenuManager menuBar) { MenuManager fileMenu = new MenuManager("&Morrigan", IWorkbenchActionConstants.M_FILE); menuBar.add(fileMenu); fileMenu.add(exitAction); MenuManager playlistMenu = new MenuManager("&Playlist", "playlist"); menuBar.add(playlistMenu); playlistMenu.add(newPlayListAction); playlistMenu.add(new Separator()); playlistMenu.add(saveAction); playlistMenu.add(addAction); playlistMenu.add(removeAction); MenuManager windowMenu = new MenuManager("&Window", IWorkbenchActionConstants.M_WINDOW); menuBar.add(windowMenu); windowMenu.add(resetPerspectiveAction); showViewMenuMgr.add(showViewItemShortList); windowMenu.add(showViewMenuMgr); } @Override protected void fillCoolBar(ICoolBarManager coolBar) { coolBar.add(new GroupMarker("group.list")); IToolBarManager fileToolBar = new ToolBarManager(coolBar.getStyle()); fileToolBar.add(saveAction); fileToolBar.add(addAction); fileToolBar.add(removeAction); coolBar.add(new ToolBarContributionItem(fileToolBar)); } }
true
true
protected void makeActions(final IWorkbenchWindow window) { // Creates the actions and registers them. // Registering is needed to ensure that key bindings work. // The corresponding commands keybindings are defined in the plugin.xml file. // Registering also provides automatic disposal of the actions when the window is closed. newPlayListAction = new NewPlaylistAction(window); register(newPlayListAction); exitAction = ActionFactory.QUIT.create(window); register(exitAction); resetPerspectiveAction = ActionFactory.RESET_PERSPECTIVE.create(window); showViewMenuMgr = new MenuManager("Show view", "showView"); showViewItemShortList = ContributionItemFactory.VIEWS_SHORTLIST.create(window); // Editor actions. saveAction = ActionFactory.SAVE.create(window); register(saveAction); addAction = new RetargetAction(ADD_ACTIONID, "&add"); addAction.setImageDescriptor(Activator.getImageDescriptor("icons/plus.gif")); getActionBarConfigurer().registerGlobalAction(addAction); register(addAction); window.getPartService().addPartListener(addAction); removeAction = new RetargetAction(REMOVE_ACTIONID, "&remove selected..."); removeAction.setImageDescriptor(Activator.getImageDescriptor("icons/minus.gif")); getActionBarConfigurer().registerGlobalAction(removeAction); register(removeAction); window.getPartService().addPartListener(removeAction); }
protected void makeActions(final IWorkbenchWindow window) { // Creates the actions and registers them. // Registering is needed to ensure that key bindings work. // The corresponding commands keybindings are defined in the plugin.xml file. // Registering also provides automatic disposal of the actions when the window is closed. newPlayListAction = new NewPlaylistAction(window); register(newPlayListAction); exitAction = ActionFactory.QUIT.create(window); register(exitAction); resetPerspectiveAction = ActionFactory.RESET_PERSPECTIVE.create(window); showViewMenuMgr = new MenuManager("Show view", "showView"); showViewItemShortList = ContributionItemFactory.VIEWS_SHORTLIST.create(window); // Editor actions. saveAction = ActionFactory.SAVE.create(window); register(saveAction); addAction = new RetargetAction(ADD_ACTIONID, "&add files..."); addAction.setImageDescriptor(Activator.getImageDescriptor("icons/plus.gif")); getActionBarConfigurer().registerGlobalAction(addAction); register(addAction); window.getPartService().addPartListener(addAction); removeAction = new RetargetAction(REMOVE_ACTIONID, "&remove selected..."); removeAction.setImageDescriptor(Activator.getImageDescriptor("icons/minus.gif")); getActionBarConfigurer().registerGlobalAction(removeAction); register(removeAction); window.getPartService().addPartListener(removeAction); }
diff --git a/src/com/android/settings/DreamSettings.java b/src/com/android/settings/DreamSettings.java index 3b14fa528..43f3ce6e4 100644 --- a/src/com/android/settings/DreamSettings.java +++ b/src/com/android/settings/DreamSettings.java @@ -1,353 +1,355 @@ /* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.settings; import android.app.ActionBar; import android.app.Activity; import android.app.AlertDialog; import android.app.Dialog; import android.content.BroadcastReceiver; import android.content.Context; import android.content.DialogInterface; import android.content.Intent; import android.content.IntentFilter; import android.os.Bundle; import android.preference.PreferenceActivity; import android.util.Log; import android.view.Gravity; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.MenuItem.OnMenuItemClickListener; import android.view.MotionEvent; import android.view.View; import android.view.View.OnClickListener; import android.view.View.OnTouchListener; import android.view.ViewGroup; import android.widget.ArrayAdapter; import android.widget.CompoundButton; import android.widget.CompoundButton.OnCheckedChangeListener; import android.widget.ImageView; import android.widget.ListView; import android.widget.RadioButton; import android.widget.Switch; import android.widget.TextView; import com.android.settings.DreamBackend.DreamInfo; import java.util.List; public class DreamSettings extends SettingsPreferenceFragment { private static final String TAG = DreamSettings.class.getSimpleName(); static final boolean DEBUG = false; private static final int DIALOG_WHEN_TO_DREAM = 1; private static final String PACKAGE_SCHEME = "package"; private final PackageReceiver mPackageReceiver = new PackageReceiver(); private Context mContext; private DreamBackend mBackend; private DreamInfoAdapter mAdapter; private Switch mSwitch; private MenuItem[] mMenuItemsWhenEnabled; private boolean mRefreshing; @Override public int getHelpResource() { return R.string.help_url_dreams; } @Override public void onAttach(Activity activity) { logd("onAttach(%s)", activity.getClass().getSimpleName()); super.onAttach(activity); mContext = activity; } @Override public void onCreate(Bundle icicle) { logd("onCreate(%s)", icicle); super.onCreate(icicle); Activity activity = getActivity(); mBackend = new DreamBackend(activity); mSwitch = new Switch(activity); mSwitch.setOnCheckedChangeListener(new OnCheckedChangeListener() { @Override public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) { if (!mRefreshing) { mBackend.setEnabled(isChecked); refreshFromBackend(); } } }); if (activity instanceof PreferenceActivity) { PreferenceActivity preferenceActivity = (PreferenceActivity) activity; if (preferenceActivity.onIsHidingHeaders() || !preferenceActivity.onIsMultiPane()) { final int padding = activity.getResources().getDimensionPixelSize( R.dimen.action_bar_switch_padding); mSwitch.setPadding(0, 0, padding, 0); activity.getActionBar().setDisplayOptions(ActionBar.DISPLAY_SHOW_CUSTOM, ActionBar.DISPLAY_SHOW_CUSTOM); activity.getActionBar().setCustomView(mSwitch, new ActionBar.LayoutParams( ActionBar.LayoutParams.WRAP_CONTENT, ActionBar.LayoutParams.WRAP_CONTENT, Gravity.CENTER_VERTICAL | Gravity.END)); } } setHasOptionsMenu(true); } @Override public void onActivityCreated(Bundle savedInstanceState) { logd("onActivityCreated(%s)", savedInstanceState); super.onActivityCreated(savedInstanceState); ListView listView = getListView(); TextView emptyView = (TextView) getView().findViewById(android.R.id.empty); emptyView.setText(R.string.screensaver_settings_disabled_prompt); listView.setEmptyView(emptyView); mAdapter = new DreamInfoAdapter(mContext); listView.setAdapter(mAdapter); } @Override public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) { logd("onCreateOptionsMenu()"); boolean isEnabled = mBackend.isEnabled(); // create "start" action MenuItem start = createMenuItem(menu, R.string.screensaver_settings_dream_start, MenuItem.SHOW_AS_ACTION_ALWAYS, isEnabled, new Runnable(){ @Override public void run() { mBackend.startDreaming(); }}); // create "when to dream" overflow menu item MenuItem whenToDream = createMenuItem(menu, R.string.screensaver_settings_when_to_dream, MenuItem.SHOW_AS_ACTION_NEVER, isEnabled, new Runnable() { @Override public void run() { showDialog(DIALOG_WHEN_TO_DREAM); }}); // create "help" overflow menu item (make sure it appears last) super.onCreateOptionsMenu(menu, inflater); mMenuItemsWhenEnabled = new MenuItem[] { start, whenToDream }; } private MenuItem createMenuItem(Menu menu, int titleRes, int actionEnum, boolean isEnabled, final Runnable onClick) { MenuItem item = menu.add(titleRes); item.setShowAsAction(actionEnum); item.setEnabled(isEnabled); item.setOnMenuItemClickListener(new OnMenuItemClickListener() { @Override public boolean onMenuItemClick(MenuItem item) { onClick.run(); return true; } }); return item; } @Override public Dialog onCreateDialog(int dialogId) { logd("onCreateDialog(%s)", dialogId); if (dialogId == DIALOG_WHEN_TO_DREAM) return createWhenToDreamDialog(); return super.onCreateDialog(dialogId); } private Dialog createWhenToDreamDialog() { final CharSequence[] items = { mContext.getString(R.string.screensaver_settings_summary_dock), mContext.getString(R.string.screensaver_settings_summary_sleep), mContext.getString(R.string.screensaver_settings_summary_either_short) }; int initialSelection = mBackend.isActivatedOnDock() && mBackend.isActivatedOnSleep() ? 2 : mBackend.isActivatedOnDock() ? 0 : mBackend.isActivatedOnSleep() ? 1 : -1; return new AlertDialog.Builder(mContext) .setTitle(R.string.screensaver_settings_when_to_dream) .setSingleChoiceItems(items, initialSelection, new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int item) { mBackend.setActivatedOnDock(item == 0 || item == 2); mBackend.setActivatedOnSleep(item == 1 || item == 2); } }) .create(); } @Override public void onPause() { logd("onPause()"); super.onPause(); mContext.unregisterReceiver(mPackageReceiver); } @Override public void onResume() { logd("onResume()"); super.onResume(); refreshFromBackend(); // listen for package changes IntentFilter filter = new IntentFilter(); filter.addAction(Intent.ACTION_PACKAGE_ADDED); filter.addAction(Intent.ACTION_PACKAGE_CHANGED); filter.addAction(Intent.ACTION_PACKAGE_REMOVED); filter.addAction(Intent.ACTION_PACKAGE_REPLACED); filter.addDataScheme(PACKAGE_SCHEME); mContext.registerReceiver(mPackageReceiver , filter); } public static int getSummaryResource(Context context) { DreamBackend backend = new DreamBackend(context); boolean isEnabled = backend.isEnabled(); boolean activatedOnSleep = backend.isActivatedOnSleep(); boolean activatedOnDock = backend.isActivatedOnDock(); boolean activatedOnEither = activatedOnSleep && activatedOnDock; return !isEnabled ? R.string.screensaver_settings_summary_off : activatedOnEither ? R.string.screensaver_settings_summary_either_long : activatedOnSleep ? R.string.screensaver_settings_summary_sleep : activatedOnDock ? R.string.screensaver_settings_summary_dock : 0; } private void refreshFromBackend() { logd("refreshFromBackend()"); mRefreshing = true; boolean dreamsEnabled = mBackend.isEnabled(); if (mSwitch.isChecked() != dreamsEnabled) mSwitch.setChecked(dreamsEnabled); mAdapter.clear(); if (dreamsEnabled) { List<DreamInfo> dreamInfos = mBackend.getDreamInfos(); mAdapter.addAll(dreamInfos); } if (mMenuItemsWhenEnabled != null) for (MenuItem menuItem : mMenuItemsWhenEnabled) menuItem.setEnabled(dreamsEnabled); mRefreshing = false; } private static void logd(String msg, Object... args) { if (DEBUG) Log.d(TAG, args == null || args.length == 0 ? msg : String.format(msg, args)); } private class DreamInfoAdapter extends ArrayAdapter<DreamInfo> { private final LayoutInflater mInflater; public DreamInfoAdapter(Context context) { super(context, 0); mInflater = (LayoutInflater) context.getSystemService(Context.LAYOUT_INFLATER_SERVICE); } @Override public View getView(int position, View convertView, ViewGroup parent) { DreamInfo dreamInfo = getItem(position); logd("getView(%s)", dreamInfo.caption); final View row = convertView != null ? convertView : createDreamInfoRow(parent); row.setTag(dreamInfo); // bind icon ((ImageView) row.findViewById(android.R.id.icon)).setImageDrawable(dreamInfo.icon); // bind caption ((TextView) row.findViewById(android.R.id.title)).setText(dreamInfo.caption); // bind radio button RadioButton radioButton = (RadioButton) row.findViewById(android.R.id.button1); radioButton.setChecked(dreamInfo.isActive); radioButton.setOnTouchListener(new OnTouchListener() { @Override public boolean onTouch(View v, MotionEvent event) { row.onTouchEvent(event); return false; }}); // bind settings button + divider boolean showSettings = dreamInfo.settingsComponentName != null; View settingsDivider = row.findViewById(R.id.divider); settingsDivider.setVisibility(showSettings ? View.VISIBLE : View.INVISIBLE); ImageView settingsButton = (ImageView) row.findViewById(android.R.id.button2); settingsButton.setVisibility(showSettings ? View.VISIBLE : View.INVISIBLE); + settingsButton.setAlpha(dreamInfo.isActive ? 1f : 0.7f); + settingsButton.setEnabled(dreamInfo.isActive); settingsButton.setOnClickListener(new OnClickListener(){ @Override public void onClick(View v) { mBackend.launchSettings((DreamInfo) row.getTag()); }}); return row; } private View createDreamInfoRow(ViewGroup parent) { final View row = mInflater.inflate(R.layout.dream_info_row, parent, false); row.setOnClickListener(new OnClickListener(){ @Override public void onClick(View v) { v.setPressed(true); activate((DreamInfo) row.getTag()); }}); return row; } private DreamInfo getCurrentSelection() { for (int i = 0; i < getCount(); i++) { DreamInfo dreamInfo = getItem(i); if (dreamInfo.isActive) return dreamInfo; } return null; } private void activate(DreamInfo dreamInfo) { if (dreamInfo.equals(getCurrentSelection())) return; for (int i = 0; i < getCount(); i++) { getItem(i).isActive = false; } dreamInfo.isActive = true; mBackend.setActiveDream(dreamInfo.componentName); notifyDataSetChanged(); } } private class PackageReceiver extends BroadcastReceiver { @Override public void onReceive(Context context, Intent intent) { logd("PackageReceiver.onReceive"); refreshFromBackend(); } } }
true
true
public View getView(int position, View convertView, ViewGroup parent) { DreamInfo dreamInfo = getItem(position); logd("getView(%s)", dreamInfo.caption); final View row = convertView != null ? convertView : createDreamInfoRow(parent); row.setTag(dreamInfo); // bind icon ((ImageView) row.findViewById(android.R.id.icon)).setImageDrawable(dreamInfo.icon); // bind caption ((TextView) row.findViewById(android.R.id.title)).setText(dreamInfo.caption); // bind radio button RadioButton radioButton = (RadioButton) row.findViewById(android.R.id.button1); radioButton.setChecked(dreamInfo.isActive); radioButton.setOnTouchListener(new OnTouchListener() { @Override public boolean onTouch(View v, MotionEvent event) { row.onTouchEvent(event); return false; }}); // bind settings button + divider boolean showSettings = dreamInfo.settingsComponentName != null; View settingsDivider = row.findViewById(R.id.divider); settingsDivider.setVisibility(showSettings ? View.VISIBLE : View.INVISIBLE); ImageView settingsButton = (ImageView) row.findViewById(android.R.id.button2); settingsButton.setVisibility(showSettings ? View.VISIBLE : View.INVISIBLE); settingsButton.setOnClickListener(new OnClickListener(){ @Override public void onClick(View v) { mBackend.launchSettings((DreamInfo) row.getTag()); }}); return row; }
public View getView(int position, View convertView, ViewGroup parent) { DreamInfo dreamInfo = getItem(position); logd("getView(%s)", dreamInfo.caption); final View row = convertView != null ? convertView : createDreamInfoRow(parent); row.setTag(dreamInfo); // bind icon ((ImageView) row.findViewById(android.R.id.icon)).setImageDrawable(dreamInfo.icon); // bind caption ((TextView) row.findViewById(android.R.id.title)).setText(dreamInfo.caption); // bind radio button RadioButton radioButton = (RadioButton) row.findViewById(android.R.id.button1); radioButton.setChecked(dreamInfo.isActive); radioButton.setOnTouchListener(new OnTouchListener() { @Override public boolean onTouch(View v, MotionEvent event) { row.onTouchEvent(event); return false; }}); // bind settings button + divider boolean showSettings = dreamInfo.settingsComponentName != null; View settingsDivider = row.findViewById(R.id.divider); settingsDivider.setVisibility(showSettings ? View.VISIBLE : View.INVISIBLE); ImageView settingsButton = (ImageView) row.findViewById(android.R.id.button2); settingsButton.setVisibility(showSettings ? View.VISIBLE : View.INVISIBLE); settingsButton.setAlpha(dreamInfo.isActive ? 1f : 0.7f); settingsButton.setEnabled(dreamInfo.isActive); settingsButton.setOnClickListener(new OnClickListener(){ @Override public void onClick(View v) { mBackend.launchSettings((DreamInfo) row.getTag()); }}); return row; }
diff --git a/matterhorn-execute-operations/src/main/java/org/opencastproject/execute/operation/handler/ExecuteManyWorkflowOperationHandler.java b/matterhorn-execute-operations/src/main/java/org/opencastproject/execute/operation/handler/ExecuteManyWorkflowOperationHandler.java index f83379c..66706e0 100644 --- a/matterhorn-execute-operations/src/main/java/org/opencastproject/execute/operation/handler/ExecuteManyWorkflowOperationHandler.java +++ b/matterhorn-execute-operations/src/main/java/org/opencastproject/execute/operation/handler/ExecuteManyWorkflowOperationHandler.java @@ -1,321 +1,323 @@ /** * Copyright 2009, 2010 The Regents of the University of California * Licensed under the Educational Community License, Version 2.0 * (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.osedu.org/licenses/ECL-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS IS" * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing * permissions and limitations under the License. * */ package org.opencastproject.execute.operation.handler; import org.opencastproject.execute.api.ExecuteException; import org.opencastproject.execute.api.ExecuteService; import org.opencastproject.inspection.api.MediaInspectionException; import org.opencastproject.inspection.api.MediaInspectionService; import org.opencastproject.job.api.Job; import org.opencastproject.job.api.JobContext; import org.opencastproject.mediapackage.MediaPackage; import org.opencastproject.mediapackage.MediaPackageElement; import org.opencastproject.mediapackage.MediaPackageElementFlavor; import org.opencastproject.mediapackage.MediaPackageElementParser; import org.opencastproject.mediapackage.MediaPackageException; import org.opencastproject.util.NotFoundException; import org.opencastproject.workflow.api.AbstractWorkflowOperationHandler; import org.opencastproject.workflow.api.WorkflowInstance; import org.opencastproject.workflow.api.WorkflowOperationException; import org.opencastproject.workflow.api.WorkflowOperationInstance; import org.opencastproject.workflow.api.WorkflowOperationResult; import org.opencastproject.workflow.api.WorkflowOperationResult.Action; import org.opencastproject.workflow.api.WorkflowOperationResultImpl; import org.opencastproject.workspace.api.Workspace; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.net.URI; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map.Entry; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; /** * Runs an operation multiple times with each MediaPackageElement matching the characteristics */ public class ExecuteManyWorkflowOperationHandler extends AbstractWorkflowOperationHandler { /** The logging facility */ private static final Logger logger = LoggerFactory.getLogger(ExecuteManyWorkflowOperationHandler.class); /** Property containing the command to run */ public static final String EXEC_PROPERTY = "exec"; /** Property containing the list of command parameters */ public static final String PARAMS_PROPERTY = "params"; /** Property containing the "flavor" that a mediapackage elements must have in order to be used as input arguments */ public static final String SOURCE_FLAVOR_PROPERTY = "source-flavor"; /** Property containing the filename of the elements created by this operation */ public static final String OUTPUT_FILENAME_PROPERTY = "output-filename"; /** Property containing the expected type of the element generated by this operation */ public static final String EXPECTED_TYPE_PROPERTY = "expected-type"; /** Property containing the tags that must exist on a mediapackage element for the element to be used as an input arguments */ public static final String SOURCE_TAGS_PROPERTY = "source-tags"; /** Property containing the flavor that the resulting mediapackage elements will be assigned */ public static final String TARGET_FLAVOR_PROPERTY = "target-flavor"; /** Property containing the tags that the resulting mediapackage elements will be assigned */ public static final String TARGET_TAGS_PROPERTY = "target-tags"; /** The text analyzer */ protected ExecuteService executeService; /** Reference to the media inspection service */ private MediaInspectionService inspectionService = null; /** The workspace service */ protected Workspace workspace; /** The configuration options for this handler */ private static final SortedMap<String, String> CONFIG_OPTIONS; static { CONFIG_OPTIONS = new TreeMap<String, String>(); CONFIG_OPTIONS.put(EXEC_PROPERTY, "The full path the executable to run"); CONFIG_OPTIONS.put(PARAMS_PROPERTY, "Space separated list of command line parameters to pass to the executable')"); CONFIG_OPTIONS.put(OUTPUT_FILENAME_PROPERTY, "The name of the elements created by this operation"); CONFIG_OPTIONS.put(EXPECTED_TYPE_PROPERTY, "The type of the element returned by this operation. Accepted values are: manifest, timeline, track, catalog, attachment, other"); CONFIG_OPTIONS.put(SOURCE_FLAVOR_PROPERTY, "The \"flavor\" that the mediapackage elements must have in order to be used as an input argument"); CONFIG_OPTIONS.put(SOURCE_TAGS_PROPERTY, "The required tags that must exist on the mediapackage element for the element to be used as an input argument"); CONFIG_OPTIONS.put(TARGET_FLAVOR_PROPERTY, "The flavor that the resulting mediapackage elements will be assigned"); CONFIG_OPTIONS.put(TARGET_TAGS_PROPERTY, "The tags that the resulting mediapackage elements will be assigned"); } /** * {@inheritDoc} * * @see org.opencastproject.workflow.api.WorkflowOperationHandler#start(org.opencastproject.workflow.api.WorkflowInstance, JobContext) */ @Override public WorkflowOperationResult start(WorkflowInstance workflowInstance, JobContext context) throws WorkflowOperationException { MediaPackage mediaPackage = workflowInstance.getMediaPackage(); WorkflowOperationInstance operation = workflowInstance.getCurrentOperation(); logger.debug("Running execute workflow operation with ID {}", operation.getId()); // Get operation parameters String exec = StringUtils.trimToNull(operation.getConfiguration(EXEC_PROPERTY)); String params = StringUtils.trimToNull(operation.getConfiguration(PARAMS_PROPERTY)); String sourceFlavor = StringUtils.trimToNull(operation.getConfiguration(SOURCE_FLAVOR_PROPERTY)); String sourceTags = StringUtils.trimToNull(operation.getConfiguration(SOURCE_TAGS_PROPERTY)); String targetFlavorStr = StringUtils.trimToNull(operation.getConfiguration(TARGET_FLAVOR_PROPERTY)); String targetTags = StringUtils.trimToNull(operation.getConfiguration(TARGET_TAGS_PROPERTY)); String outputFilename = StringUtils.trimToNull(operation.getConfiguration(OUTPUT_FILENAME_PROPERTY)); String expectedTypeStr = StringUtils.trimToNull(operation.getConfiguration(EXPECTED_TYPE_PROPERTY)); - MediaPackageElementFlavor matchingFlavor = MediaPackageElementFlavor.parseFlavor(sourceFlavor); + MediaPackageElementFlavor matchingFlavor = null; + if (sourceFlavor != null) + matchingFlavor = MediaPackageElementFlavor.parseFlavor(sourceFlavor); // Unmarshall target flavor MediaPackageElementFlavor targetFlavor = null; if (targetFlavorStr != null) targetFlavor = MediaPackageElementFlavor.parseFlavor(targetFlavorStr); // Unmarshall expected mediapackage element type MediaPackageElement.Type expectedType = null; if (expectedTypeStr != null) { for (MediaPackageElement.Type type : MediaPackageElement.Type.values()) if (type.toString().equalsIgnoreCase(expectedTypeStr)) { expectedType = type; break; } if (expectedType == null) throw new WorkflowOperationException("'" + expectedTypeStr + "' is not a valid element type"); } List<String> sourceTagList = asList(sourceTags); // Select the tracks based on source flavors and tags Set<MediaPackageElement> inputSet = new HashSet<MediaPackageElement>(); for (MediaPackageElement element : mediaPackage.getElementsByTags(sourceTagList)) { MediaPackageElementFlavor elementFlavor = element.getFlavor(); if (sourceFlavor == null || (elementFlavor != null && elementFlavor.matches(matchingFlavor))) { inputSet.add(element); } } if (inputSet.size() == 0) { logger.warn("Mediapackage {} has no suitable elements to execute the command {} based on tags {} and flavor {}", new Object[] { mediaPackage, exec, sourceTags, sourceFlavor }); return createResult(mediaPackage, Action.CONTINUE); } MediaPackageElement[] inputElements = inputSet.toArray(new MediaPackageElement[inputSet.size()]); try{ Job[] jobs = new Job[inputElements.length]; MediaPackageElement[] resultElements = new MediaPackageElement[inputElements.length]; long totalTimeInQueue = 0; for (int i = 0; i < inputElements.length; i++) jobs[i] = executeService.execute(exec, params, inputElements[i], outputFilename, expectedType); // Wait for all jobs to be finished if (!waitForStatus(jobs).isSuccess()) throw new WorkflowOperationException("Execute operation failed"); // Find which output elements are tracks and inspect them HashMap<Integer,Job> jobMap = new HashMap<Integer,Job>(); for (int i = 0; i < jobs.length; i++) { // Add this job's queue time to the total totalTimeInQueue += jobs[i].getQueueTime(); if (StringUtils.trimToNull(jobs[i].getPayload()) != null) { resultElements[i] = MediaPackageElementParser.getFromXml(jobs[i].getPayload()); if (resultElements[i].getElementType() == MediaPackageElement.Type.Track) { jobMap.put(i, inspectionService.inspect(resultElements[i].getURI())); } } else resultElements[i] = inputElements[i]; } if (jobMap.size() > 0) { if (!waitForStatus(jobMap.values().toArray(new Job[jobMap.size()])).isSuccess()) throw new WorkflowOperationException("Execute operation failed in track inspection"); for (Entry<Integer, Job> entry : jobMap.entrySet()) { // Add this job's queue time to the total totalTimeInQueue += entry.getValue().getQueueTime(); resultElements[entry.getKey()] = MediaPackageElementParser.getFromXml(entry.getValue().getPayload()); } } for (int i = 0; i < resultElements.length; i++) { if (resultElements[i] != inputElements[i]) { // Store new element to mediaPackage mediaPackage.addDerived(resultElements[i], inputElements[i]); // Store new element to mediaPackage URI uri = workspace.moveTo(resultElements[i].getURI(), mediaPackage.getIdentifier().toString(), resultElements[i].getIdentifier(), outputFilename); resultElements[i].setURI(uri); // Set new flavor if (targetFlavor != null) resultElements[i].setFlavor(targetFlavor); } // Set new tags if (targetTags != null) // Assume the tags starting with "-" means we want to eliminate such tags form the result element for (String tag : asList(targetTags)) { if (tag.startsWith("-")) // We remove the tag resulting from stripping all the '-' characters at the beginning of the tag resultElements[i].removeTag(tag.replaceAll("^-+", "")); else resultElements[i].addTag(tag); } } WorkflowOperationResult result = createResult(mediaPackage, Action.CONTINUE, totalTimeInQueue); logger.debug("Execute operation {} completed", operation.getId()); return result; } catch (ExecuteException e) { throw new WorkflowOperationException(e); } catch (MediaPackageException e) { throw new WorkflowOperationException("Some result element couldn't be serialized", e); } catch (NotFoundException e) { throw new WorkflowOperationException("Could not find mediapackage", e); } catch (IOException e) { throw new WorkflowOperationException("Error unmarshalling a result mediapackage element", e); } catch (MediaInspectionException e) { throw new WorkflowOperationException("Error inspecting one of the created tracks", e); } } /** * {@inheritDoc} * * @see org.opencastproject.workflow.api.WorkflowOperationHandler#skip(org.opencastproject.workflow.api.WorkflowInstance, JobContext) */ @Override public WorkflowOperationResult skip(WorkflowInstance workflowInstance, JobContext context) throws WorkflowOperationException { return new WorkflowOperationResultImpl(workflowInstance.getMediaPackage(), null, Action.SKIP, 0); } @Override public String getId() { return "execute"; } @Override public String getDescription() { return "Executes command line workflow operations in workers"; } @Override public void destroy(WorkflowInstance workflowInstance, JobContext context) throws WorkflowOperationException { // Do nothing (nothing to clean up, the command line program should do this itself) } /** * {@inheritDoc} * * @see org.opencastproject.workflow.api.WorkflowOperationHandler#getConfigurationOptions() */ @Override public SortedMap<String, String> getConfigurationOptions() { return CONFIG_OPTIONS; } /** * Sets the service * * @param service */ public void setExecuteService(ExecuteService service) { this.executeService = service; } /** * Sets a reference to the workspace service. * * @param workspace */ public void setWorkspace(Workspace workspace) { this.workspace = workspace; } /** * Sets the media inspection service * * @param mediaInspectionService * an instance of the media inspection service */ protected void setMediaInspectionService(MediaInspectionService mediaInspectionService) { this.inspectionService = mediaInspectionService; } }
true
true
public WorkflowOperationResult start(WorkflowInstance workflowInstance, JobContext context) throws WorkflowOperationException { MediaPackage mediaPackage = workflowInstance.getMediaPackage(); WorkflowOperationInstance operation = workflowInstance.getCurrentOperation(); logger.debug("Running execute workflow operation with ID {}", operation.getId()); // Get operation parameters String exec = StringUtils.trimToNull(operation.getConfiguration(EXEC_PROPERTY)); String params = StringUtils.trimToNull(operation.getConfiguration(PARAMS_PROPERTY)); String sourceFlavor = StringUtils.trimToNull(operation.getConfiguration(SOURCE_FLAVOR_PROPERTY)); String sourceTags = StringUtils.trimToNull(operation.getConfiguration(SOURCE_TAGS_PROPERTY)); String targetFlavorStr = StringUtils.trimToNull(operation.getConfiguration(TARGET_FLAVOR_PROPERTY)); String targetTags = StringUtils.trimToNull(operation.getConfiguration(TARGET_TAGS_PROPERTY)); String outputFilename = StringUtils.trimToNull(operation.getConfiguration(OUTPUT_FILENAME_PROPERTY)); String expectedTypeStr = StringUtils.trimToNull(operation.getConfiguration(EXPECTED_TYPE_PROPERTY)); MediaPackageElementFlavor matchingFlavor = MediaPackageElementFlavor.parseFlavor(sourceFlavor); // Unmarshall target flavor MediaPackageElementFlavor targetFlavor = null; if (targetFlavorStr != null) targetFlavor = MediaPackageElementFlavor.parseFlavor(targetFlavorStr); // Unmarshall expected mediapackage element type MediaPackageElement.Type expectedType = null; if (expectedTypeStr != null) { for (MediaPackageElement.Type type : MediaPackageElement.Type.values()) if (type.toString().equalsIgnoreCase(expectedTypeStr)) { expectedType = type; break; } if (expectedType == null) throw new WorkflowOperationException("'" + expectedTypeStr + "' is not a valid element type"); } List<String> sourceTagList = asList(sourceTags); // Select the tracks based on source flavors and tags Set<MediaPackageElement> inputSet = new HashSet<MediaPackageElement>(); for (MediaPackageElement element : mediaPackage.getElementsByTags(sourceTagList)) { MediaPackageElementFlavor elementFlavor = element.getFlavor(); if (sourceFlavor == null || (elementFlavor != null && elementFlavor.matches(matchingFlavor))) { inputSet.add(element); } } if (inputSet.size() == 0) { logger.warn("Mediapackage {} has no suitable elements to execute the command {} based on tags {} and flavor {}", new Object[] { mediaPackage, exec, sourceTags, sourceFlavor }); return createResult(mediaPackage, Action.CONTINUE); } MediaPackageElement[] inputElements = inputSet.toArray(new MediaPackageElement[inputSet.size()]); try{ Job[] jobs = new Job[inputElements.length]; MediaPackageElement[] resultElements = new MediaPackageElement[inputElements.length]; long totalTimeInQueue = 0; for (int i = 0; i < inputElements.length; i++) jobs[i] = executeService.execute(exec, params, inputElements[i], outputFilename, expectedType); // Wait for all jobs to be finished if (!waitForStatus(jobs).isSuccess()) throw new WorkflowOperationException("Execute operation failed"); // Find which output elements are tracks and inspect them HashMap<Integer,Job> jobMap = new HashMap<Integer,Job>(); for (int i = 0; i < jobs.length; i++) { // Add this job's queue time to the total totalTimeInQueue += jobs[i].getQueueTime(); if (StringUtils.trimToNull(jobs[i].getPayload()) != null) { resultElements[i] = MediaPackageElementParser.getFromXml(jobs[i].getPayload()); if (resultElements[i].getElementType() == MediaPackageElement.Type.Track) { jobMap.put(i, inspectionService.inspect(resultElements[i].getURI())); } } else resultElements[i] = inputElements[i]; } if (jobMap.size() > 0) { if (!waitForStatus(jobMap.values().toArray(new Job[jobMap.size()])).isSuccess()) throw new WorkflowOperationException("Execute operation failed in track inspection"); for (Entry<Integer, Job> entry : jobMap.entrySet()) { // Add this job's queue time to the total totalTimeInQueue += entry.getValue().getQueueTime(); resultElements[entry.getKey()] = MediaPackageElementParser.getFromXml(entry.getValue().getPayload()); } } for (int i = 0; i < resultElements.length; i++) { if (resultElements[i] != inputElements[i]) { // Store new element to mediaPackage mediaPackage.addDerived(resultElements[i], inputElements[i]); // Store new element to mediaPackage URI uri = workspace.moveTo(resultElements[i].getURI(), mediaPackage.getIdentifier().toString(), resultElements[i].getIdentifier(), outputFilename); resultElements[i].setURI(uri); // Set new flavor if (targetFlavor != null) resultElements[i].setFlavor(targetFlavor); } // Set new tags if (targetTags != null) // Assume the tags starting with "-" means we want to eliminate such tags form the result element for (String tag : asList(targetTags)) { if (tag.startsWith("-")) // We remove the tag resulting from stripping all the '-' characters at the beginning of the tag resultElements[i].removeTag(tag.replaceAll("^-+", "")); else resultElements[i].addTag(tag); } } WorkflowOperationResult result = createResult(mediaPackage, Action.CONTINUE, totalTimeInQueue); logger.debug("Execute operation {} completed", operation.getId()); return result; } catch (ExecuteException e) { throw new WorkflowOperationException(e); } catch (MediaPackageException e) { throw new WorkflowOperationException("Some result element couldn't be serialized", e); } catch (NotFoundException e) { throw new WorkflowOperationException("Could not find mediapackage", e); } catch (IOException e) { throw new WorkflowOperationException("Error unmarshalling a result mediapackage element", e); } catch (MediaInspectionException e) { throw new WorkflowOperationException("Error inspecting one of the created tracks", e); } }
public WorkflowOperationResult start(WorkflowInstance workflowInstance, JobContext context) throws WorkflowOperationException { MediaPackage mediaPackage = workflowInstance.getMediaPackage(); WorkflowOperationInstance operation = workflowInstance.getCurrentOperation(); logger.debug("Running execute workflow operation with ID {}", operation.getId()); // Get operation parameters String exec = StringUtils.trimToNull(operation.getConfiguration(EXEC_PROPERTY)); String params = StringUtils.trimToNull(operation.getConfiguration(PARAMS_PROPERTY)); String sourceFlavor = StringUtils.trimToNull(operation.getConfiguration(SOURCE_FLAVOR_PROPERTY)); String sourceTags = StringUtils.trimToNull(operation.getConfiguration(SOURCE_TAGS_PROPERTY)); String targetFlavorStr = StringUtils.trimToNull(operation.getConfiguration(TARGET_FLAVOR_PROPERTY)); String targetTags = StringUtils.trimToNull(operation.getConfiguration(TARGET_TAGS_PROPERTY)); String outputFilename = StringUtils.trimToNull(operation.getConfiguration(OUTPUT_FILENAME_PROPERTY)); String expectedTypeStr = StringUtils.trimToNull(operation.getConfiguration(EXPECTED_TYPE_PROPERTY)); MediaPackageElementFlavor matchingFlavor = null; if (sourceFlavor != null) matchingFlavor = MediaPackageElementFlavor.parseFlavor(sourceFlavor); // Unmarshall target flavor MediaPackageElementFlavor targetFlavor = null; if (targetFlavorStr != null) targetFlavor = MediaPackageElementFlavor.parseFlavor(targetFlavorStr); // Unmarshall expected mediapackage element type MediaPackageElement.Type expectedType = null; if (expectedTypeStr != null) { for (MediaPackageElement.Type type : MediaPackageElement.Type.values()) if (type.toString().equalsIgnoreCase(expectedTypeStr)) { expectedType = type; break; } if (expectedType == null) throw new WorkflowOperationException("'" + expectedTypeStr + "' is not a valid element type"); } List<String> sourceTagList = asList(sourceTags); // Select the tracks based on source flavors and tags Set<MediaPackageElement> inputSet = new HashSet<MediaPackageElement>(); for (MediaPackageElement element : mediaPackage.getElementsByTags(sourceTagList)) { MediaPackageElementFlavor elementFlavor = element.getFlavor(); if (sourceFlavor == null || (elementFlavor != null && elementFlavor.matches(matchingFlavor))) { inputSet.add(element); } } if (inputSet.size() == 0) { logger.warn("Mediapackage {} has no suitable elements to execute the command {} based on tags {} and flavor {}", new Object[] { mediaPackage, exec, sourceTags, sourceFlavor }); return createResult(mediaPackage, Action.CONTINUE); } MediaPackageElement[] inputElements = inputSet.toArray(new MediaPackageElement[inputSet.size()]); try{ Job[] jobs = new Job[inputElements.length]; MediaPackageElement[] resultElements = new MediaPackageElement[inputElements.length]; long totalTimeInQueue = 0; for (int i = 0; i < inputElements.length; i++) jobs[i] = executeService.execute(exec, params, inputElements[i], outputFilename, expectedType); // Wait for all jobs to be finished if (!waitForStatus(jobs).isSuccess()) throw new WorkflowOperationException("Execute operation failed"); // Find which output elements are tracks and inspect them HashMap<Integer,Job> jobMap = new HashMap<Integer,Job>(); for (int i = 0; i < jobs.length; i++) { // Add this job's queue time to the total totalTimeInQueue += jobs[i].getQueueTime(); if (StringUtils.trimToNull(jobs[i].getPayload()) != null) { resultElements[i] = MediaPackageElementParser.getFromXml(jobs[i].getPayload()); if (resultElements[i].getElementType() == MediaPackageElement.Type.Track) { jobMap.put(i, inspectionService.inspect(resultElements[i].getURI())); } } else resultElements[i] = inputElements[i]; } if (jobMap.size() > 0) { if (!waitForStatus(jobMap.values().toArray(new Job[jobMap.size()])).isSuccess()) throw new WorkflowOperationException("Execute operation failed in track inspection"); for (Entry<Integer, Job> entry : jobMap.entrySet()) { // Add this job's queue time to the total totalTimeInQueue += entry.getValue().getQueueTime(); resultElements[entry.getKey()] = MediaPackageElementParser.getFromXml(entry.getValue().getPayload()); } } for (int i = 0; i < resultElements.length; i++) { if (resultElements[i] != inputElements[i]) { // Store new element to mediaPackage mediaPackage.addDerived(resultElements[i], inputElements[i]); // Store new element to mediaPackage URI uri = workspace.moveTo(resultElements[i].getURI(), mediaPackage.getIdentifier().toString(), resultElements[i].getIdentifier(), outputFilename); resultElements[i].setURI(uri); // Set new flavor if (targetFlavor != null) resultElements[i].setFlavor(targetFlavor); } // Set new tags if (targetTags != null) // Assume the tags starting with "-" means we want to eliminate such tags form the result element for (String tag : asList(targetTags)) { if (tag.startsWith("-")) // We remove the tag resulting from stripping all the '-' characters at the beginning of the tag resultElements[i].removeTag(tag.replaceAll("^-+", "")); else resultElements[i].addTag(tag); } } WorkflowOperationResult result = createResult(mediaPackage, Action.CONTINUE, totalTimeInQueue); logger.debug("Execute operation {} completed", operation.getId()); return result; } catch (ExecuteException e) { throw new WorkflowOperationException(e); } catch (MediaPackageException e) { throw new WorkflowOperationException("Some result element couldn't be serialized", e); } catch (NotFoundException e) { throw new WorkflowOperationException("Could not find mediapackage", e); } catch (IOException e) { throw new WorkflowOperationException("Error unmarshalling a result mediapackage element", e); } catch (MediaInspectionException e) { throw new WorkflowOperationException("Error inspecting one of the created tracks", e); } }
diff --git a/src/main/java/com/smartass/pewpew/Commander.java b/src/main/java/com/smartass/pewpew/Commander.java index 3d31d74..7155fa1 100644 --- a/src/main/java/com/smartass/pewpew/Commander.java +++ b/src/main/java/com/smartass/pewpew/Commander.java @@ -1,512 +1,512 @@ /* * Custom system to hit my EC2 instances with a large volume of randomly generated crashes. * * Hack not really worth signing yet. */ package com.smartass.pewpew; import com.smartass.pewpew.generators.*; import java.io.*; import java.nio.*; import java.net.*; import java.util.*; import javax.net.ssl.*; import com.amazonaws.services.ec2.*; import com.amazonaws.services.ec2.model.*; import com.amazonaws.auth.*; import com.amazonaws.regions.*; import org.apache.commons.codec.binary.Base64; public class Commander { static String AMI_ID = "ami-281d8018"; static boolean useEC2 = true; static enum Flag { NONE, INSTANCES, HITS, DELAY, TIMEOUT }; public static void main(String[] args) { System.out.println(); System.out.println("Welcome to PewPew Commander!"); System.out.println("\"Your one stop shop for all your DDoS needs!\" (just kidding, use only for good)"); System.out.println(); System.out.println("Preparing written orders."); System.out.println(); int instances = 1; int hits = 10; int delay = 100; int timeout = 500; Flag flag = Flag.NONE; for (String arg : args) { if (arg.equals("--instances") || arg.equals("-i")) { flag = Flag.INSTANCES; } else if (arg.equals("--hits") || arg.equals("-h")) { flag = Flag.HITS; } else if (arg.equals("--delay") || arg.equals("-d")) { flag = Flag.DELAY; } else if (arg.equals("--timeout") || arg.equals("-t")) { flag = Flag.TIMEOUT; } else if (arg.equals("--help") || arg.equals("-h")) { printHelp(); return; } else { int input = 0; try { input = Integer.parseInt(arg); } catch (Exception e) { System.out.println("Couldn't read integer input. No fractions or decimals are allowed."); printHelp(); return; } if (flag == Flag.INSTANCES) { instances = input; } else if (flag == Flag.HITS) { hits = input; } else if (flag == Flag.DELAY) { delay = input; } else if (flag == Flag.TIMEOUT) { - hits = input; + timeout = input; } else { System.out.println("Command not recognized : "+arg); printHelp(); return; } flag = Flag.NONE; } } // Here's a config for testing the crash reporter TestConfig config = new TestConfig(); config.url = "amazonloadbalancer-1358579984.us-west-2.elb.amazonaws.com/log/"; config.expectedResponse = "your concerns will be noted in the captains log ;)"; config.numHits = hits; config.msDelay = delay; config.msTimeout = timeout; config.urlParamGenerator = new CrashTestParameterGenerator(); System.out.println("Planning to hit \""+config.url+"\" from "+instances+" instances, with "+hits+" hits each, at a "+delay+" ms delay, accepting a max lag per request of "+timeout+" ms."); ArrayList<String> cannons = new ArrayList<String>(); ArrayList<String> instanceIds = new ArrayList<String>(); AmazonEC2 ec2 = null; cannons.add("localhost"); if (useEC2) { ec2 = createEC2(); // Create the instances instanceIds = createEC2Instances(ec2, instances); // Wait for instances to start up cannons = instanceIdsToDNS(ec2, instanceIds); } // Wait for our sockets ArrayList<Socket> sockets = waitForSockets(cannons); // Tell instances to attack ArrayList<TestResult> results = new ArrayList<TestResult>(); for (int i = 0; i < sockets.size()*2; i++) { // Blast from i instances, and get results long startTimeMS = System.currentTimeMillis(); int testInstances = (int)Math.ceil(i/2)+1; TestResult result = runTest(sockets,config,testInstances); result.instances = testInstances; result.config = config; result.duration = System.currentTimeMillis() - startTimeMS; results.add(result); } System.out.println(); System.out.println("==============\nTest Results\n=============="); long perInstanceAvgResponseMs = 0; float avgHitsPerSecond = 0; for (int i = 0; i < results.size(); i++) { TestResult result = results.get(i); System.out.println("\n-------\nTest "+i+":\n-------"); System.out.println("simultaneous instances: "+result.instances); System.out.println(); System.out.println("instance config:"); System.out.println("delay between hits: "+result.config.msDelay); System.out.println("timeout: "+result.config.msTimeout); System.out.println("consecutive hits: "+result.config.numHits); System.out.println("theorhetical max hits per second: "+((1000f/(float)result.config.msTimeout)*(float)result.instances)); System.out.println(); System.out.println("passed: "+result.passedTests); System.out.println("failed: "+result.failedTests); System.out.println("avg response time: "+result.msAverageResponse); System.out.println("avg response time per instance: "+(result.msAverageResponse/(long)result.instances)); System.out.println("realized hits per second: "+(((float)result.passedTests/(float)result.duration)*1000f)); perInstanceAvgResponseMs += (result.msAverageResponse/(long)result.instances); avgHitsPerSecond += (((float)result.passedTests/(float)result.duration)*1000f); } perInstanceAvgResponseMs /= results.size(); avgHitsPerSecond /= results.size(); System.out.println(); System.out.println("****"); System.out.println("Overall avg response time per instance : "+perInstanceAvgResponseMs); System.out.println("Overall avg hits per second : "+avgHitsPerSecond); System.out.println(); // Terminate the instances if (useEC2) { terminateEC2Instances(ec2, instanceIds); } } private static void printHelp() { System.out.println("Options:"); System.out.println("\t--instances, -i : Set the number of instances"); System.out.println("\t--hits, -h : Set the number of REST calls each instance will perform"); System.out.println("\t--delay, -d : Set the delay between each call, in ms"); System.out.println("\t--timeout, -t : Set the timeout on each call, in ms"); } private static AmazonEC2 createEC2() { System.out.println(); System.out.println("Authenticating with the mercenaries."); AWSCredentials cred; try { cred = new PropertiesCredentials(new File("config.txt")); } catch (IOException e) { System.out.println("Can't find the autorization file."); return null; } AmazonEC2 ec2 = new AmazonEC2Client(cred); // It's very important to set the region correctly, or else nothing works ec2.setRegion(com.amazonaws.regions.Region.getRegion(Regions.US_WEST_2)); return ec2; } // This creates a bunch of EC2 instances private static ArrayList<String> createEC2Instances(AmazonEC2 ec2, int numInstances) { String userData = "#! /bin/bash\n" + "git clone https://github.com/keenon/pewpew.git /home/ec2-user/pewpew" + "\n" + "java -cp /home/ec2-user/pewpew/repo/pewpew-0.1.jar com.smartass.pewpew.PewPew"; System.out.println(); System.out.println("Assembling the fleet."); DescribeInstancesResult liveStatus = ec2.describeInstances(); RunInstancesResult result = ec2.runInstances(new RunInstancesRequest().withImageId(AMI_ID).withInstanceType("t1.micro").withKeyName("archkey").withSecurityGroups("default").withUserData(Base64.encodeBase64String(userData.getBytes())).withInstanceInitiatedShutdownBehavior("terminate").withMinCount(numInstances).withMaxCount(numInstances)); Reservation reservation = result.getReservation(); ArrayList<String> instanceIds = new ArrayList<String>(); for (Instance i : reservation.getInstances()) { System.out.println(i.getInstanceId()+" answered our call."); instanceIds.add(i.getInstanceId()); } return instanceIds; } // This turns ec2 instances into DNS names we can use to remote control the instances private static ArrayList<String> instanceIdsToDNS(AmazonEC2 ec2, ArrayList<String> instanceIds) { // Copy the array, because we're about to mutilate it instanceIds = new ArrayList<String>(instanceIds); System.out.println(); System.out.println("Waiting for instances to come online."); ArrayList<String> instanceIPs = new ArrayList<String>(); while (instanceIds.size() > 0) { System.out.print("."); DescribeInstancesResult status = ec2.describeInstances((new DescribeInstancesRequest()).withInstanceIds(instanceIds)); for (Reservation r : status.getReservations()) { for (Instance i : r.getInstances()) { if (i.getState().getName().equals("running") && i.getPublicDnsName().length() > 0) { instanceIPs.add(i.getPublicDnsName()); instanceIds.remove(i.getInstanceId()); System.out.print("\n"); System.out.println(i.getPublicDnsName()+" has come online. Still waiting on : "+instanceIds.size()); } } } } System.out.println(); System.out.println("Fleet assembled."); return instanceIPs; } // This turns off the instances once we're done using them private static void terminateEC2Instances(AmazonEC2 ec2, ArrayList<String> instanceIds) { System.out.println(); System.out.println("Dispersing the fleet."); TerminateInstancesResult result = ec2.terminateInstances((new TerminateInstancesRequest()).withInstanceIds(instanceIds)); System.out.println("Everyone's going home."); System.out.println(); System.out.println("PewPew Commander, over and out."); System.out.println(); } private static boolean killDisplay = false; // This does the work, once all the instances have been started up private static ArrayList<Socket> waitForSockets(ArrayList<String> cannons) { // First we need to gather all the instances ArrayList<Thread> socketThreads = new ArrayList<Thread>(); ArrayList<WaitForSocket> socketRunnables = new ArrayList<WaitForSocket>(); System.out.println(); System.out.println("Waiting for everyone to heat up cannons. (this can take a minute or two)"); for (String cannon : cannons) { WaitForSocket sock = new WaitForSocket(cannon); socketRunnables.add(sock); Thread t = new Thread(sock); socketThreads.add(t); t.start(); } Thread display = new Thread(new DisplayRunnable(".",100,-1)); display.start(); try { for (Thread t : socketThreads) { t.join(); } } catch (InterruptedException e) { System.err.println("Asynchronous insubbordination! Bailing."); return null; } killDisplay = true; ArrayList<Socket> sockets = new ArrayList<Socket>(); for (WaitForSocket waitForSocket : socketRunnables) { sockets.add(waitForSocket.getSocket()); } return sockets; } // This does the work, once all the instances have been started up private static TestResult runTest(ArrayList<Socket> sockets, TestConfig config, int tests) { // Now all the threads are System.out.println(); System.out.println("Sending out orders to "+tests+" instances."); System.out.println(); ArrayList<Thread> threads = new ArrayList<Thread>(); ArrayList<CommanderRunnable> runnables = new ArrayList<CommanderRunnable>(); for (int i = 0; i < tests; i++) { Socket socket = sockets.get(i); System.out.println("Sending firing orders to "+socket); CommanderRunnable cr = new CommanderRunnable(socket,config); runnables.add(cr); Thread t = new Thread(cr); threads.add(t); t.start(); } System.out.println(); System.out.println("All (REST API) guns blazing! Waiting for response."); System.out.println(); try { for (Thread t : threads) { t.join(); } } catch (InterruptedException e) { System.err.println("Asynchronous insubbordination! Bailing."); return new TestResult(); } System.out.println(); TestResult allResults = new TestResult(); for (CommanderRunnable cr : runnables) { TestResult result = cr.getResult(); allResults.passedTests += result.passedTests; allResults.failedTests += result.failedTests; allResults.msAverageResponse += result.msAverageResponse; } allResults.msAverageResponse /= threads.size(); System.out.println(); System.out.println("All transmissions received."); System.out.println("Battle summary:"); System.out.println("Passed tests: "+allResults.passedTests); System.out.println("Failed tests: "+allResults.failedTests); System.out.println("Average response (ms): "+allResults.msAverageResponse); return allResults; } private static class DisplayRunnable implements Runnable { String str; int delay; int counter; public DisplayRunnable(String str, int delay, int counter) { this.str = str; this.delay = delay; this.counter = counter; } public void run() { while (true) { // If counter is not -1, then we should limit our prints if (counter > 0) { counter --; if (counter == 0) return; } // Otherwise we take our cues from killDisplay else if (killDisplay) return; System.out.print(str); try { Thread.sleep(100); } catch (InterruptedException e) { // Do nothing. This isn't a critical thread. } } } } private static class WaitForSocket implements Runnable { Socket s; String host; public WaitForSocket(String host) { this.host = host; } public void run() { // Wait for the server to come online while (true) { try { s = new Socket(host,2109); System.out.print("\n"); System.out.println(s+" is ready to receive firing orders."); break; } catch (IOException e) { } } } public Socket getSocket() { return s; } } private static class CommanderRunnable implements Runnable { Socket s; TestConfig config; TestResult result; public CommanderRunnable(Socket s, TestConfig config) { this.s = s; this.config = config; } public TestResult getResult() { return result; } public void run() { // Fire the cannon try { ObjectOutputStream oos = new ObjectOutputStream(s.getOutputStream()); oos.writeObject(config); ObjectInputStream ois = new ObjectInputStream(s.getInputStream()); result = (TestResult)ois.readObject(); return; } catch (ClassNotFoundException e) { System.err.println("Transmission came through, but didn't follow the right format. Spooling down cannon."); } catch (IOException e) { System.out.println("Transmission garbled. Spooling down cannon."); } } } }
true
true
public static void main(String[] args) { System.out.println(); System.out.println("Welcome to PewPew Commander!"); System.out.println("\"Your one stop shop for all your DDoS needs!\" (just kidding, use only for good)"); System.out.println(); System.out.println("Preparing written orders."); System.out.println(); int instances = 1; int hits = 10; int delay = 100; int timeout = 500; Flag flag = Flag.NONE; for (String arg : args) { if (arg.equals("--instances") || arg.equals("-i")) { flag = Flag.INSTANCES; } else if (arg.equals("--hits") || arg.equals("-h")) { flag = Flag.HITS; } else if (arg.equals("--delay") || arg.equals("-d")) { flag = Flag.DELAY; } else if (arg.equals("--timeout") || arg.equals("-t")) { flag = Flag.TIMEOUT; } else if (arg.equals("--help") || arg.equals("-h")) { printHelp(); return; } else { int input = 0; try { input = Integer.parseInt(arg); } catch (Exception e) { System.out.println("Couldn't read integer input. No fractions or decimals are allowed."); printHelp(); return; } if (flag == Flag.INSTANCES) { instances = input; } else if (flag == Flag.HITS) { hits = input; } else if (flag == Flag.DELAY) { delay = input; } else if (flag == Flag.TIMEOUT) { hits = input; } else { System.out.println("Command not recognized : "+arg); printHelp(); return; } flag = Flag.NONE; } } // Here's a config for testing the crash reporter TestConfig config = new TestConfig(); config.url = "amazonloadbalancer-1358579984.us-west-2.elb.amazonaws.com/log/"; config.expectedResponse = "your concerns will be noted in the captains log ;)"; config.numHits = hits; config.msDelay = delay; config.msTimeout = timeout; config.urlParamGenerator = new CrashTestParameterGenerator(); System.out.println("Planning to hit \""+config.url+"\" from "+instances+" instances, with "+hits+" hits each, at a "+delay+" ms delay, accepting a max lag per request of "+timeout+" ms."); ArrayList<String> cannons = new ArrayList<String>(); ArrayList<String> instanceIds = new ArrayList<String>(); AmazonEC2 ec2 = null; cannons.add("localhost"); if (useEC2) { ec2 = createEC2(); // Create the instances instanceIds = createEC2Instances(ec2, instances); // Wait for instances to start up cannons = instanceIdsToDNS(ec2, instanceIds); } // Wait for our sockets ArrayList<Socket> sockets = waitForSockets(cannons); // Tell instances to attack ArrayList<TestResult> results = new ArrayList<TestResult>(); for (int i = 0; i < sockets.size()*2; i++) { // Blast from i instances, and get results long startTimeMS = System.currentTimeMillis(); int testInstances = (int)Math.ceil(i/2)+1; TestResult result = runTest(sockets,config,testInstances); result.instances = testInstances; result.config = config; result.duration = System.currentTimeMillis() - startTimeMS; results.add(result); } System.out.println(); System.out.println("==============\nTest Results\n=============="); long perInstanceAvgResponseMs = 0; float avgHitsPerSecond = 0; for (int i = 0; i < results.size(); i++) { TestResult result = results.get(i); System.out.println("\n-------\nTest "+i+":\n-------"); System.out.println("simultaneous instances: "+result.instances); System.out.println(); System.out.println("instance config:"); System.out.println("delay between hits: "+result.config.msDelay); System.out.println("timeout: "+result.config.msTimeout); System.out.println("consecutive hits: "+result.config.numHits); System.out.println("theorhetical max hits per second: "+((1000f/(float)result.config.msTimeout)*(float)result.instances)); System.out.println(); System.out.println("passed: "+result.passedTests); System.out.println("failed: "+result.failedTests); System.out.println("avg response time: "+result.msAverageResponse); System.out.println("avg response time per instance: "+(result.msAverageResponse/(long)result.instances)); System.out.println("realized hits per second: "+(((float)result.passedTests/(float)result.duration)*1000f)); perInstanceAvgResponseMs += (result.msAverageResponse/(long)result.instances); avgHitsPerSecond += (((float)result.passedTests/(float)result.duration)*1000f); } perInstanceAvgResponseMs /= results.size(); avgHitsPerSecond /= results.size(); System.out.println(); System.out.println("****"); System.out.println("Overall avg response time per instance : "+perInstanceAvgResponseMs); System.out.println("Overall avg hits per second : "+avgHitsPerSecond); System.out.println(); // Terminate the instances if (useEC2) { terminateEC2Instances(ec2, instanceIds); } }
public static void main(String[] args) { System.out.println(); System.out.println("Welcome to PewPew Commander!"); System.out.println("\"Your one stop shop for all your DDoS needs!\" (just kidding, use only for good)"); System.out.println(); System.out.println("Preparing written orders."); System.out.println(); int instances = 1; int hits = 10; int delay = 100; int timeout = 500; Flag flag = Flag.NONE; for (String arg : args) { if (arg.equals("--instances") || arg.equals("-i")) { flag = Flag.INSTANCES; } else if (arg.equals("--hits") || arg.equals("-h")) { flag = Flag.HITS; } else if (arg.equals("--delay") || arg.equals("-d")) { flag = Flag.DELAY; } else if (arg.equals("--timeout") || arg.equals("-t")) { flag = Flag.TIMEOUT; } else if (arg.equals("--help") || arg.equals("-h")) { printHelp(); return; } else { int input = 0; try { input = Integer.parseInt(arg); } catch (Exception e) { System.out.println("Couldn't read integer input. No fractions or decimals are allowed."); printHelp(); return; } if (flag == Flag.INSTANCES) { instances = input; } else if (flag == Flag.HITS) { hits = input; } else if (flag == Flag.DELAY) { delay = input; } else if (flag == Flag.TIMEOUT) { timeout = input; } else { System.out.println("Command not recognized : "+arg); printHelp(); return; } flag = Flag.NONE; } } // Here's a config for testing the crash reporter TestConfig config = new TestConfig(); config.url = "amazonloadbalancer-1358579984.us-west-2.elb.amazonaws.com/log/"; config.expectedResponse = "your concerns will be noted in the captains log ;)"; config.numHits = hits; config.msDelay = delay; config.msTimeout = timeout; config.urlParamGenerator = new CrashTestParameterGenerator(); System.out.println("Planning to hit \""+config.url+"\" from "+instances+" instances, with "+hits+" hits each, at a "+delay+" ms delay, accepting a max lag per request of "+timeout+" ms."); ArrayList<String> cannons = new ArrayList<String>(); ArrayList<String> instanceIds = new ArrayList<String>(); AmazonEC2 ec2 = null; cannons.add("localhost"); if (useEC2) { ec2 = createEC2(); // Create the instances instanceIds = createEC2Instances(ec2, instances); // Wait for instances to start up cannons = instanceIdsToDNS(ec2, instanceIds); } // Wait for our sockets ArrayList<Socket> sockets = waitForSockets(cannons); // Tell instances to attack ArrayList<TestResult> results = new ArrayList<TestResult>(); for (int i = 0; i < sockets.size()*2; i++) { // Blast from i instances, and get results long startTimeMS = System.currentTimeMillis(); int testInstances = (int)Math.ceil(i/2)+1; TestResult result = runTest(sockets,config,testInstances); result.instances = testInstances; result.config = config; result.duration = System.currentTimeMillis() - startTimeMS; results.add(result); } System.out.println(); System.out.println("==============\nTest Results\n=============="); long perInstanceAvgResponseMs = 0; float avgHitsPerSecond = 0; for (int i = 0; i < results.size(); i++) { TestResult result = results.get(i); System.out.println("\n-------\nTest "+i+":\n-------"); System.out.println("simultaneous instances: "+result.instances); System.out.println(); System.out.println("instance config:"); System.out.println("delay between hits: "+result.config.msDelay); System.out.println("timeout: "+result.config.msTimeout); System.out.println("consecutive hits: "+result.config.numHits); System.out.println("theorhetical max hits per second: "+((1000f/(float)result.config.msTimeout)*(float)result.instances)); System.out.println(); System.out.println("passed: "+result.passedTests); System.out.println("failed: "+result.failedTests); System.out.println("avg response time: "+result.msAverageResponse); System.out.println("avg response time per instance: "+(result.msAverageResponse/(long)result.instances)); System.out.println("realized hits per second: "+(((float)result.passedTests/(float)result.duration)*1000f)); perInstanceAvgResponseMs += (result.msAverageResponse/(long)result.instances); avgHitsPerSecond += (((float)result.passedTests/(float)result.duration)*1000f); } perInstanceAvgResponseMs /= results.size(); avgHitsPerSecond /= results.size(); System.out.println(); System.out.println("****"); System.out.println("Overall avg response time per instance : "+perInstanceAvgResponseMs); System.out.println("Overall avg hits per second : "+avgHitsPerSecond); System.out.println(); // Terminate the instances if (useEC2) { terminateEC2Instances(ec2, instanceIds); } }
diff --git a/app/src/me/openphoto/android/app/bitmapfun/util/ImageCache.java b/app/src/me/openphoto/android/app/bitmapfun/util/ImageCache.java index 86b7ae1..0c9f487 100644 --- a/app/src/me/openphoto/android/app/bitmapfun/util/ImageCache.java +++ b/app/src/me/openphoto/android/app/bitmapfun/util/ImageCache.java @@ -1,285 +1,290 @@ /* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package me.openphoto.android.app.bitmapfun.util; import java.io.File; import me.openphoto.android.app.BuildConfig; import me.openphoto.android.app.OpenPhotoApplication; import me.openphoto.android.app.util.CommonUtils; import android.app.ActivityManager; import android.content.Context; import android.graphics.Bitmap; import android.graphics.Bitmap.CompressFormat; import android.support.v4.app.FragmentActivity; import android.support.v4.util.LruCache; /** * This class holds our bitmap caches (memory and disk). */ public class ImageCache { private static final String TAG = "ImageCache"; public static final String THUMBS_CACHE_DIR = "thumbs"; public static final String LOCAL_THUMBS_CACHE_DIR = "thumbs_local"; public static final String LARGE_IMAGES_CACHE_DIR = "images"; // Default memory cache size private static final int DEFAULT_MEM_CACHE_SIZE = 1024 * 1024 * 5; // 5MB // Default disk cache size private static final int DEFAULT_DISK_CACHE_SIZE = 1024 * 1024 * 10; // 10MB private static final int DEFAULT_DISK_CACHE_MAX_ITEM_SIZE = 64; // Compression settings when writing images to disk cache private static final CompressFormat DEFAULT_COMPRESS_FORMAT = CompressFormat.JPEG; private static final int DEFAULT_COMPRESS_QUALITY = 70; // Constants to easily toggle various caches private static final boolean DEFAULT_MEM_CACHE_ENABLED = true; private static final boolean DEFAULT_DISK_CACHE_ENABLED = true; private static final boolean DEFAULT_CLEAR_DISK_CACHE_ON_START = true; private DiskLruCache mDiskCache; private LruCache<String, Bitmap> mMemoryCache; /** * Creating a new ImageCache object using the specified parameters. * * @param context The context to use * @param cacheParams The cache parameters to use to initialize the cache */ public ImageCache(Context context, ImageCacheParams cacheParams) { init(context, cacheParams); } /** * Creating a new ImageCache object using the default parameters. * * @param context The context to use * @param uniqueName A unique name that will be appended to the cache * directory */ public ImageCache(Context context, String uniqueName) { init(context, new ImageCacheParams(uniqueName)); } /** * Find and return an existing ImageCache stored in a {@link RetainFragment} * , if not found a new one is created with defaults and saved to a * {@link RetainFragment}. * * @param activity The calling {@link FragmentActivity} * @param uniqueName A unique name to append to the cache directory * @return An existing retained ImageCache object or a new one if one did * not exist. */ public static ImageCache findOrCreateCache( final FragmentActivity activity, final String uniqueName) { return findOrCreateCache(activity, uniqueName, DEFAULT_CLEAR_DISK_CACHE_ON_START); } /** * Find and return an existing ImageCache stored in a {@link RetainFragment} * , if not found a new one is created with defaults and saved to a * {@link RetainFragment}. * * @param activity The calling {@link FragmentActivity} * @param uniqueName A unique name to append to the cache directory * @return An existing retained ImageCache object or a new one if one did * not exist. * @param clearDiskCacheOnStart whether to clear disk cache on start * @return */ public static ImageCache findOrCreateCache( final FragmentActivity activity, final String uniqueName, boolean clearDiskCacheOnStart) { ImageCacheParams params = new ImageCacheParams(uniqueName); // Get memory class of this device, exceeding this amount will throw an // OutOfMemory exception. final int memClass = ((ActivityManager) activity.getSystemService( Context.ACTIVITY_SERVICE)).getMemoryClass(); // Use 1/8th of the available memory for this memory cache. params.memCacheSize = 1024 * 1024 * memClass / 8; params.clearDiskCacheOnStart = clearDiskCacheOnStart; CommonUtils.debug(TAG, "Calculated memory cache size: " + params.memCacheSize); return findOrCreateCache(activity, params); } /** * Find and return an existing ImageCache stored in a {@link RetainFragment} * , if not found a new one is created using the supplied params and saved * to a {@link RetainFragment}. * * @param activity The calling {@link FragmentActivity} * @param cacheParams The cache parameters to use if creating the ImageCache * @return An existing retained ImageCache object or a new one if one did * not exist */ public static ImageCache findOrCreateCache( final FragmentActivity activity, ImageCacheParams cacheParams) { // Search for, or create an instance of the non-UI RetainFragment final RetainFragment mRetainFragment = RetainFragment.findOrCreateRetainFragment( activity.getSupportFragmentManager()); // See if we already have an ImageCache stored in RetainFragment ImageCache imageCache = (ImageCache) mRetainFragment .getObject(cacheParams.uniqueName); // No existing ImageCache, create one and store it in RetainFragment if (imageCache == null) { imageCache = new ImageCache(activity, cacheParams); mRetainFragment.setObject(imageCache); } return imageCache; } /** * Initialize the cache, providing all parameters. * * @param context The context to use * @param cacheParams The cache parameters to initialize the cache */ private void init(Context context, ImageCacheParams cacheParams) { final File diskCacheDir = DiskLruCache.getDiskCacheDir(context, cacheParams.uniqueName); // Set up disk cache if (cacheParams.diskCacheEnabled) { mDiskCache = DiskLruCache.openCache(context, diskCacheDir, cacheParams.diskCacheSize, cacheParams.diskCacheMaxItemSize); - mDiskCache.setCompressParams(cacheParams.compressFormat, cacheParams.compressQuality); - if (cacheParams.clearDiskCacheOnStart) { - mDiskCache.clearCache(); + // Issue #259 fix. Sometimes previous step returns null + if (mDiskCache != null) + { + mDiskCache.setCompressParams(cacheParams.compressFormat, + cacheParams.compressQuality); + if (cacheParams.clearDiskCacheOnStart) { + mDiskCache.clearCache(); + } } } // Set up memory cache if (cacheParams.memoryCacheEnabled) { mMemoryCache = new LruCache<String, Bitmap>(cacheParams.memCacheSize) { /** * Measure item size in bytes rather than units which is more * practical for a bitmap cache */ @Override protected int sizeOf(String key, Bitmap bitmap) { return Utils.getBitmapSize(bitmap); } }; } } public void addBitmapToCache(String data, Bitmap bitmap) { if (data == null || bitmap == null) { return; } // Add to memory cache if (mMemoryCache != null && mMemoryCache.get(data) == null) { mMemoryCache.put(data, bitmap); } // Add to disk cache if (mDiskCache != null && !mDiskCache.containsKey(data)) { mDiskCache.put(data, bitmap); } } /** * Get from memory cache. * * @param data Unique identifier for which item to get * @return The bitmap if found in cache, null otherwise */ public Bitmap getBitmapFromMemCache(String data) { if (mMemoryCache != null) { final Bitmap memBitmap = mMemoryCache.get(data); if (memBitmap != null) { if (BuildConfig.DEBUG) { CommonUtils.debug(TAG, "Memory cache hit"); } return memBitmap; } } return null; } /** * Get from disk cache. * * @param data Unique identifier for which item to get * @return The bitmap if found in cache, null otherwise */ public Bitmap getBitmapFromDiskCache(String data) { if (mDiskCache != null) { return mDiskCache.get(data); } return null; } public void clearCaches() { clearDiskCache(); clearMemoryCache(); } public void clearDiskCache() { if (mDiskCache != null) { mDiskCache.clearCache(); } } public void clearMemoryCache() { if (mMemoryCache != null) { CommonUtils.debug(TAG, "Requested memory cache cleaning"); mMemoryCache.evictAll(); } } public static void clearDiskCaches() { DiskLruCache.clearCaches(OpenPhotoApplication.getContext(), THUMBS_CACHE_DIR, LOCAL_THUMBS_CACHE_DIR, LARGE_IMAGES_CACHE_DIR, ImageFetcher.HTTP_CACHE_DIR); } /** * A holder class that contains cache parameters. */ public static class ImageCacheParams { public String uniqueName; public int memCacheSize = DEFAULT_MEM_CACHE_SIZE; public int diskCacheSize = DEFAULT_DISK_CACHE_SIZE; public int diskCacheMaxItemSize = DEFAULT_DISK_CACHE_MAX_ITEM_SIZE; public CompressFormat compressFormat = DEFAULT_COMPRESS_FORMAT; public int compressQuality = DEFAULT_COMPRESS_QUALITY; public boolean memoryCacheEnabled = DEFAULT_MEM_CACHE_ENABLED; public boolean diskCacheEnabled = DEFAULT_DISK_CACHE_ENABLED; public boolean clearDiskCacheOnStart = DEFAULT_CLEAR_DISK_CACHE_ON_START; public ImageCacheParams(String uniqueName) { this.uniqueName = uniqueName; } } }
true
true
private void init(Context context, ImageCacheParams cacheParams) { final File diskCacheDir = DiskLruCache.getDiskCacheDir(context, cacheParams.uniqueName); // Set up disk cache if (cacheParams.diskCacheEnabled) { mDiskCache = DiskLruCache.openCache(context, diskCacheDir, cacheParams.diskCacheSize, cacheParams.diskCacheMaxItemSize); mDiskCache.setCompressParams(cacheParams.compressFormat, cacheParams.compressQuality); if (cacheParams.clearDiskCacheOnStart) { mDiskCache.clearCache(); } } // Set up memory cache if (cacheParams.memoryCacheEnabled) { mMemoryCache = new LruCache<String, Bitmap>(cacheParams.memCacheSize) { /** * Measure item size in bytes rather than units which is more * practical for a bitmap cache */ @Override protected int sizeOf(String key, Bitmap bitmap) { return Utils.getBitmapSize(bitmap); } }; } }
private void init(Context context, ImageCacheParams cacheParams) { final File diskCacheDir = DiskLruCache.getDiskCacheDir(context, cacheParams.uniqueName); // Set up disk cache if (cacheParams.diskCacheEnabled) { mDiskCache = DiskLruCache.openCache(context, diskCacheDir, cacheParams.diskCacheSize, cacheParams.diskCacheMaxItemSize); // Issue #259 fix. Sometimes previous step returns null if (mDiskCache != null) { mDiskCache.setCompressParams(cacheParams.compressFormat, cacheParams.compressQuality); if (cacheParams.clearDiskCacheOnStart) { mDiskCache.clearCache(); } } } // Set up memory cache if (cacheParams.memoryCacheEnabled) { mMemoryCache = new LruCache<String, Bitmap>(cacheParams.memCacheSize) { /** * Measure item size in bytes rather than units which is more * practical for a bitmap cache */ @Override protected int sizeOf(String key, Bitmap bitmap) { return Utils.getBitmapSize(bitmap); } }; } }
diff --git a/src/com/ichi2/anki/CardModel.java b/src/com/ichi2/anki/CardModel.java index a781a13b..91fbb02e 100644 --- a/src/com/ichi2/anki/CardModel.java +++ b/src/com/ichi2/anki/CardModel.java @@ -1,430 +1,430 @@ /**************************************************************************************** * Copyright (c) 2009 Daniel Svärd <[email protected]> * * Copyright (c) 2010 Rick Gruber-Riemer <[email protected]> * * * * This program is free software; you can redistribute it and/or modify it under * * the terms of the GNU General Public License as published by the Free Software * * Foundation; either version 3 of the License, or (at your option) any later * * version. * * * * This program is distributed in the hope that it will be useful, but WITHOUT ANY * * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A * * PARTICULAR PURPOSE. See the GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License along with * * this program. If not, see <http://www.gnu.org/licenses/>. * ****************************************************************************************/ package com.ichi2.anki; import android.database.Cursor; import java.util.Comparator; import java.util.HashMap; import java.util.TreeMap; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Card model. Card models are used to make question/answer pairs for the information you add to facts. You can display * any number of fields on the question side and answer side. * * @see http://ichi2.net/anki/wiki/ModelProperties#Card_Templates */ public class CardModel implements Comparator<CardModel> { // TODO: Javadoc. // TODO: Methods for reading/writing from/to DB. public static final int DEFAULT_FONT_SIZE = 20; public static final int DEFAULT_FONT_SIZE_RATIO = 100; public static final String DEFAULT_FONT_FAMILY = "Arial"; public static final String DEFAULT_FONT_COLOR = "#000000"; public static final String DEFAULT_BACKGROUND_COLOR = "#FFFFFF"; /** Regex pattern used in removing tags from text before diff */ private static final Pattern sFactPattern = Pattern.compile("%\\([tT]ags\\)s"); private static final Pattern sModelPattern = Pattern.compile("%\\(modelTags\\)s"); private static final Pattern sTemplPattern = Pattern.compile("%\\(cardModel\\)s"); // BEGIN SQL table columns private long mId; // Primary key private int mOrdinal; private long mModelId; // Foreign key models.id private String mName; private String mDescription = ""; private int mActive = 1; // Formats: question/answer/last (not used) private String mQformat; private String mAformat; private String mLformat; // Question/answer editor format (not used yet) private String mQedformat; private String mAedformat; private int mQuestionInAnswer = 0; // Unused private String mQuestionFontFamily = DEFAULT_FONT_FAMILY; private int mQuestionFontSize = DEFAULT_FONT_SIZE; private String mQuestionFontColour = DEFAULT_FONT_COLOR; // Used for both question & answer private int mQuestionAlign = 0; // Unused private String mAnswerFontFamily = DEFAULT_FONT_FAMILY; private int mAnswerFontSize = DEFAULT_FONT_SIZE; private String mAnswerFontColour = DEFAULT_FONT_COLOR; private int mAnswerAlign = 0; private String mLastFontFamily = DEFAULT_FONT_FAMILY; private int mLastFontSize = DEFAULT_FONT_SIZE; // Used as background colour private String mLastFontColour = DEFAULT_BACKGROUND_COLOR; private String mEditQuestionFontFamily = ""; private int mEditQuestionFontSize = 0; private String mEditAnswerFontFamily = ""; private int mEditAnswerFontSize = 0; // Empty answer private int mAllowEmptyAnswer = 1; private String mTypeAnswer = ""; // END SQL table entries /** * Backward reference */ private Model mModel; /** * Constructor. */ public CardModel(String name, String qformat, String aformat, boolean active) { mName = name; mQformat = qformat; mAformat = aformat; mActive = active ? 1 : 0; mId = Utils.genID(); } /** * Constructor. */ public CardModel() { this("", "q", "a", true); } /** SELECT string with only those fields, which are used in AnkiDroid */ private static final String SELECT_STRING = "SELECT id, ordinal, modelId, name, description, active, qformat, " + "aformat, questionInAnswer, questionFontFamily, questionFontSize, questionFontColour, questionAlign, " + "answerFontFamily, answerFontSize, answerFontColour, answerAlign, lastFontColour" + " FROM cardModels"; /** * @param modelId * @param models will be changed by adding all found CardModels into it * @return unordered CardModels which are related to a given Model and eventually active put into the parameter * "models" */ protected static final void fromDb(Deck deck, long modelId, TreeMap<Long, CardModel> models) { Cursor cursor = null; CardModel myCardModel = null; try { StringBuffer query = new StringBuffer(SELECT_STRING); query.append(" WHERE modelId = "); query.append(modelId); cursor = AnkiDatabaseManager.getDatabase(deck.getDeckPath()).getDatabase().rawQuery(query.toString(), null); if (cursor.moveToFirst()) { do { myCardModel = new CardModel(); myCardModel.mId = cursor.getLong(0); myCardModel.mOrdinal = cursor.getInt(1); myCardModel.mModelId = cursor.getLong(2); myCardModel.mName = cursor.getString(3); myCardModel.mDescription = cursor.getString(4); myCardModel.mActive = cursor.getInt(5); myCardModel.mQformat = cursor.getString(6); myCardModel.mAformat = cursor.getString(7); myCardModel.mQuestionInAnswer = cursor.getInt(8); myCardModel.mQuestionFontFamily = cursor.getString(9); myCardModel.mQuestionFontSize = cursor.getInt(10); myCardModel.mQuestionFontColour = cursor.getString(11); myCardModel.mQuestionAlign = cursor.getInt(12); myCardModel.mAnswerFontFamily = cursor.getString(13); myCardModel.mAnswerFontSize = cursor.getInt(14); myCardModel.mAnswerFontColour = cursor.getString(15); myCardModel.mAnswerAlign = cursor.getInt(16); myCardModel.mLastFontColour = cursor.getString(17); models.put(myCardModel.mId, myCardModel); } while (cursor.moveToNext()); } } finally { if (cursor != null && !cursor.isClosed()) { cursor.close(); } } } public boolean isActive() { return (mActive != 0); } /** * @param cardModelId * @return the modelId for a given cardModel or 0, if it cannot be found */ protected static final long modelIdFromDB(Deck deck, long cardModelId) { Cursor cursor = null; long modelId = -1; try { String query = "SELECT modelId FROM cardModels WHERE id = " + cardModelId; cursor = AnkiDatabaseManager.getDatabase(deck.getDeckPath()).getDatabase().rawQuery(query, null); cursor.moveToFirst(); modelId = cursor.getLong(0); } finally { if (cursor != null && !cursor.isClosed()) { cursor.close(); } } return modelId; } // XXX Unused // /** // * Return a copy of this object. // */ // public CardModel copy() { // CardModel cardModel = new CardModel(mName, mQformat, mAformat, (mActive == 1) ? true : false); // cardModel.mOrdinal = mOrdinal; // cardModel.mModelId = mModelId; // cardModel.mDescription = mDescription; // cardModel.mLformat = mLformat; // cardModel.mQedformat = mQedformat; // cardModel.mAedformat = mAedformat; // cardModel.mQuestionInAnswer = mQuestionInAnswer; // cardModel.mQuestionFontFamily = mQuestionFontFamily; // cardModel.mQuestionFontSize = mQuestionFontSize; // cardModel.mQuestionFontColour = mQuestionFontColour; // cardModel.mQuestionAlign = mQuestionAlign; // cardModel.mAnswerFontFamily = mAnswerFontFamily; // cardModel.mAnswerFontSize = mAnswerFontSize; // cardModel.mAnswerFontColour = mAnswerFontColour; // cardModel.mAnswerAlign = mAnswerAlign; // cardModel.mLastFontFamily = mLastFontFamily; // cardModel.mLastFontSize = mLastFontSize; // cardModel.mLastFontColour = mLastFontColour; // cardModel.mEditQuestionFontFamily = mEditQuestionFontFamily; // cardModel.mEditQuestionFontSize = mEditQuestionFontSize; // cardModel.mEditAnswerFontFamily = mEditAnswerFontFamily; // cardModel.mEditAnswerFontSize = mEditAnswerFontSize; // cardModel.mAllowEmptyAnswer = mAllowEmptyAnswer; // cardModel.mTypeAnswer = mTypeAnswer; // cardModel.mModel = null; // // return cardModel; // } public static HashMap<String, String> formatQA(Fact fact, CardModel cm, String[] tags) { // Not pretty, I know. String question = cm.mQformat; String answer = cm.mAformat; // First deal with the tag fields: // %(tags)s = factTags tags where src = 0 // %(modelTags)s = modelTags tags where src = 1 // %(cardModel)s = templateTags tags where src = 2 Matcher tagMatcher; // fact tags %(tags)s or %(Tags)s tagMatcher = sFactPattern.matcher(question); question = tagMatcher.replaceAll(tags[Card.TAGS_FACT]); tagMatcher = sFactPattern.matcher(answer); answer = tagMatcher.replaceAll(tags[Card.TAGS_FACT]); // modelTags %(modelTags)s tagMatcher = sModelPattern.matcher(question); question = tagMatcher.replaceAll(tags[Card.TAGS_MODEL]); tagMatcher = sModelPattern.matcher(answer); answer = tagMatcher.replaceAll(tags[Card.TAGS_MODEL]); // templateTags %(cardModel)s tagMatcher = sTemplPattern.matcher(question); question = tagMatcher.replaceAll(tags[Card.TAGS_TEMPL]); tagMatcher = sTemplPattern.matcher(answer); answer = tagMatcher.replaceAll(tags[Card.TAGS_TEMPL]); int replaceAt = question.indexOf("%("); while (replaceAt != -1) { - if (question.substring(replaceAt + 2,replaceAt + 7).equals("text:")){ + if (question.substring(replaceAt, replaceAt + 7).equals("%(text:")){ question = replaceHtmlField(question, fact, replaceAt); } else { question = replaceField(question, fact, replaceAt, true); } replaceAt = question.indexOf("%("); } replaceAt = answer.indexOf("%("); while (replaceAt != -1) { - if (answer.substring(replaceAt + 2,replaceAt + 7).equals("text:")){ + if (answer.substring(replaceAt, replaceAt + 7).equals("%(text:")){ answer = replaceHtmlField(answer, fact, replaceAt); } else { answer = replaceField(answer, fact, replaceAt, true); } replaceAt = answer.indexOf("%("); } HashMap<String, String> returnMap = new HashMap<String, String>(); returnMap.put("question", question); returnMap.put("answer", answer); return returnMap; } private static String replaceField(String replaceFrom, Fact fact, int replaceAt, boolean isQuestion) { int endIndex = replaceFrom.indexOf(")", replaceAt); String fieldName = replaceFrom.substring(replaceAt + 2, endIndex); char fieldType = replaceFrom.charAt(endIndex + 1); if (isQuestion) { String replace = "%(" + fieldName + ")" + fieldType; String with = "<span class=\"fm" + Long.toHexString(fact.getFieldModelId(fieldName)) + "\">" + fact.getFieldValue(fieldName) + "</span>"; replaceFrom = replaceFrom.replace(replace, with); } else { replaceFrom.replace( "%(" + fieldName + ")" + fieldType, "<span class=\"fma" + Long.toHexString(fact.getFieldModelId(fieldName)) + "\">" + fact.getFieldValue(fieldName) + "</span"); } return replaceFrom; } private static String replaceHtmlField(String replaceFrom, Fact fact, int replaceAt) { int endIndex = replaceFrom.indexOf(")", replaceAt); String fieldName = replaceFrom.substring(replaceAt + 7, endIndex); char fieldType = replaceFrom.charAt(endIndex + 1); String replace = "%(text:" + fieldName + ")" + fieldType; String with = fact.getFieldValue(fieldName); replaceFrom = replaceFrom.replace(replace, with); return replaceFrom; } /** * Implements Comparator by comparing the field "ordinal". * @param object1 * @param object2 * @return */ @Override public int compare(CardModel object1, CardModel object2) { return object1.mOrdinal - object2.mOrdinal; } /** * @return the id */ public long getId() { return mId; } /** * @return the ordinal */ public int getOrdinal() { return mOrdinal; } /** * @return the questionInAnswer */ public boolean isQuestionInAnswer() { // FIXME hmmm, is that correct? return (mQuestionInAnswer == 0); } /** * @return the lastFontColour */ public String getLastFontColour() { return mLastFontColour; } /** * @return the questionFontFamily */ public String getQuestionFontFamily() { return mQuestionFontFamily; } /** * @return the questionFontSize */ public int getQuestionFontSize() { return mQuestionFontSize; } /** * @return the questionFontColour */ public String getQuestionFontColour() { return mQuestionFontColour; } /** * @return the questionAlign */ public int getQuestionAlign() { return mQuestionAlign; } /** * @return the answerFontFamily */ public String getAnswerFontFamily() { return mAnswerFontFamily; } /** * @return the answerFontSize */ public int getAnswerFontSize() { return mAnswerFontSize; } /** * @return the answerFontColour */ public String getAnswerFontColour() { return mAnswerFontColour; } /** * @return the answerAlign */ public int getAnswerAlign() { return mAnswerAlign; } /** * @return the name */ public String getName() { return mName; } }
false
true
public static HashMap<String, String> formatQA(Fact fact, CardModel cm, String[] tags) { // Not pretty, I know. String question = cm.mQformat; String answer = cm.mAformat; // First deal with the tag fields: // %(tags)s = factTags tags where src = 0 // %(modelTags)s = modelTags tags where src = 1 // %(cardModel)s = templateTags tags where src = 2 Matcher tagMatcher; // fact tags %(tags)s or %(Tags)s tagMatcher = sFactPattern.matcher(question); question = tagMatcher.replaceAll(tags[Card.TAGS_FACT]); tagMatcher = sFactPattern.matcher(answer); answer = tagMatcher.replaceAll(tags[Card.TAGS_FACT]); // modelTags %(modelTags)s tagMatcher = sModelPattern.matcher(question); question = tagMatcher.replaceAll(tags[Card.TAGS_MODEL]); tagMatcher = sModelPattern.matcher(answer); answer = tagMatcher.replaceAll(tags[Card.TAGS_MODEL]); // templateTags %(cardModel)s tagMatcher = sTemplPattern.matcher(question); question = tagMatcher.replaceAll(tags[Card.TAGS_TEMPL]); tagMatcher = sTemplPattern.matcher(answer); answer = tagMatcher.replaceAll(tags[Card.TAGS_TEMPL]); int replaceAt = question.indexOf("%("); while (replaceAt != -1) { if (question.substring(replaceAt + 2,replaceAt + 7).equals("text:")){ question = replaceHtmlField(question, fact, replaceAt); } else { question = replaceField(question, fact, replaceAt, true); } replaceAt = question.indexOf("%("); } replaceAt = answer.indexOf("%("); while (replaceAt != -1) { if (answer.substring(replaceAt + 2,replaceAt + 7).equals("text:")){ answer = replaceHtmlField(answer, fact, replaceAt); } else { answer = replaceField(answer, fact, replaceAt, true); } replaceAt = answer.indexOf("%("); } HashMap<String, String> returnMap = new HashMap<String, String>(); returnMap.put("question", question); returnMap.put("answer", answer); return returnMap; }
public static HashMap<String, String> formatQA(Fact fact, CardModel cm, String[] tags) { // Not pretty, I know. String question = cm.mQformat; String answer = cm.mAformat; // First deal with the tag fields: // %(tags)s = factTags tags where src = 0 // %(modelTags)s = modelTags tags where src = 1 // %(cardModel)s = templateTags tags where src = 2 Matcher tagMatcher; // fact tags %(tags)s or %(Tags)s tagMatcher = sFactPattern.matcher(question); question = tagMatcher.replaceAll(tags[Card.TAGS_FACT]); tagMatcher = sFactPattern.matcher(answer); answer = tagMatcher.replaceAll(tags[Card.TAGS_FACT]); // modelTags %(modelTags)s tagMatcher = sModelPattern.matcher(question); question = tagMatcher.replaceAll(tags[Card.TAGS_MODEL]); tagMatcher = sModelPattern.matcher(answer); answer = tagMatcher.replaceAll(tags[Card.TAGS_MODEL]); // templateTags %(cardModel)s tagMatcher = sTemplPattern.matcher(question); question = tagMatcher.replaceAll(tags[Card.TAGS_TEMPL]); tagMatcher = sTemplPattern.matcher(answer); answer = tagMatcher.replaceAll(tags[Card.TAGS_TEMPL]); int replaceAt = question.indexOf("%("); while (replaceAt != -1) { if (question.substring(replaceAt, replaceAt + 7).equals("%(text:")){ question = replaceHtmlField(question, fact, replaceAt); } else { question = replaceField(question, fact, replaceAt, true); } replaceAt = question.indexOf("%("); } replaceAt = answer.indexOf("%("); while (replaceAt != -1) { if (answer.substring(replaceAt, replaceAt + 7).equals("%(text:")){ answer = replaceHtmlField(answer, fact, replaceAt); } else { answer = replaceField(answer, fact, replaceAt, true); } replaceAt = answer.indexOf("%("); } HashMap<String, String> returnMap = new HashMap<String, String>(); returnMap.put("question", question); returnMap.put("answer", answer); return returnMap; }
diff --git a/eBudgeting/src/main/java/biz/thaicom/eBudgeting/models/pln/ObjectiveTypeId.java b/eBudgeting/src/main/java/biz/thaicom/eBudgeting/models/pln/ObjectiveTypeId.java index fa5679f..201fb32 100644 --- a/eBudgeting/src/main/java/biz/thaicom/eBudgeting/models/pln/ObjectiveTypeId.java +++ b/eBudgeting/src/main/java/biz/thaicom/eBudgeting/models/pln/ObjectiveTypeId.java @@ -1,76 +1,76 @@ package biz.thaicom.eBudgeting.models.pln; public enum ObjectiveTypeId { ROOT(100L), แผนงาน(101L), ผลผลิตโครงการ(102L), กิจกรรมหลัก(103L), กิจกรรมรอง(104L), กิจกรรมย่อย(105L), กิจกรรมเสริม(106L),กิจกรรมสนับสนุน (107L), ยุทธศาสตร์การจัดสรรงบประมาณ (109L), ประเด็นยุทธศาสตร์(121), เป้าหมายเชิงยุทธศาสตร์(110), เป้าหมายบริการกระทรวง(111), เป้าหมายบริการหน่วยงาน(112), เป้าประสงค์เชิงนโยบาย(113), ยุทธศาสตร์กระทรวง(114), กลยุทธ์หน่วยงาน(115), กลยุทธ์วิธีการหน่วยงาน(116), แนวทางการจัดสรรงบประมาณ(118), วิสัยทัศน์(119), พันธกิจ(120); private final long id; private ObjectiveTypeId(long id) { this.id = id; } public long getValue() { return id; } public String getName() { if(id==100) { return "ROOT"; } else if(id==101){ return "แผนงาน"; } else if(id==102){ return "ผลผลิต/โครงการ"; } else if(id==103){ return "กิจกรรมหลัก"; } else if(id==104){ return "กิจกรรมรอง"; } else if(id==105){ return "กิจกรรมย่อย"; } else if(id==106){ return "กิจกรรมเสริม"; } else if(id==107){ return "กิจกรรมสนับสนุน"; } else if(id==108){ return "กิจกรรมรายละเอียด"; } else if(id==109){ return "ยุทธศาสตร์การจัดสรรงบประมาณ"; } else if(id==110) { return "เป้าหมายเชิงยุทธศาสตร์"; } else if(id==111) { return "เป้าหมายบริการกระทรวง"; } else if(id==112) { return "เป้าหมายบริการหน่วยงาน"; } else if(id==113) { return "เป้าประสงค์(เป้าหมาย)เชิงนโยบาย"; } else if(id==114) { return "ยุทธศาสตร์กระทรวง"; } else if(id==115) { return "กลยุทธ์หน่วยงาน"; } else if(id==116) { return "กลยุทธ์/วิธีการกรมฯ"; } else if(id==117) { return ""; } else if(id==118) { - return "แนวทางการจัดสรรงบประมาณ (กลยุทธ์หลัก)"; + return "แนวทางการจัดสรรงบประมาณ(กลยุทธ์หลัก)"; } else if(id==119) { return "วิสัยทัศน์"; } else if(id==120) { return "พันธกิจ"; } else if(id==121) { return "ประเด็นยุทธศาสตร์"; } return "undefined"; } }
true
true
public String getName() { if(id==100) { return "ROOT"; } else if(id==101){ return "แผนงาน"; } else if(id==102){ return "ผลผลิต/โครงการ"; } else if(id==103){ return "กิจกรรมหลัก"; } else if(id==104){ return "กิจกรรมรอง"; } else if(id==105){ return "กิจกรรมย่อย"; } else if(id==106){ return "กิจกรรมเสริม"; } else if(id==107){ return "กิจกรรมสนับสนุน"; } else if(id==108){ return "กิจกรรมรายละเอียด"; } else if(id==109){ return "ยุทธศาสตร์การจัดสรรงบประมาณ"; } else if(id==110) { return "เป้าหมายเชิงยุทธศาสตร์"; } else if(id==111) { return "เป้าหมายบริการกระทรวง"; } else if(id==112) { return "เป้าหมายบริการหน่วยงาน"; } else if(id==113) { return "เป้าประสงค์(เป้าหมาย)เชิงนโยบาย"; } else if(id==114) { return "ยุทธศาสตร์กระทรวง"; } else if(id==115) { return "กลยุทธ์หน่วยงาน"; } else if(id==116) { return "กลยุทธ์/วิธีการกรมฯ"; } else if(id==117) { return ""; } else if(id==118) { return "แนวทางการจัดสรรงบประมาณ (กลยุทธ์หลัก)"; } else if(id==119) { return "วิสัยทัศน์"; } else if(id==120) { return "พันธกิจ"; } else if(id==121) { return "ประเด็นยุทธศาสตร์"; } return "undefined"; }
public String getName() { if(id==100) { return "ROOT"; } else if(id==101){ return "แผนงาน"; } else if(id==102){ return "ผลผลิต/โครงการ"; } else if(id==103){ return "กิจกรรมหลัก"; } else if(id==104){ return "กิจกรรมรอง"; } else if(id==105){ return "กิจกรรมย่อย"; } else if(id==106){ return "กิจกรรมเสริม"; } else if(id==107){ return "กิจกรรมสนับสนุน"; } else if(id==108){ return "กิจกรรมรายละเอียด"; } else if(id==109){ return "ยุทธศาสตร์การจัดสรรงบประมาณ"; } else if(id==110) { return "เป้าหมายเชิงยุทธศาสตร์"; } else if(id==111) { return "เป้าหมายบริการกระทรวง"; } else if(id==112) { return "เป้าหมายบริการหน่วยงาน"; } else if(id==113) { return "เป้าประสงค์(เป้าหมาย)เชิงนโยบาย"; } else if(id==114) { return "ยุทธศาสตร์กระทรวง"; } else if(id==115) { return "กลยุทธ์หน่วยงาน"; } else if(id==116) { return "กลยุทธ์/วิธีการกรมฯ"; } else if(id==117) { return ""; } else if(id==118) { return "แนวทางการจัดสรรงบประมาณ(กลยุทธ์หลัก)"; } else if(id==119) { return "วิสัยทัศน์"; } else if(id==120) { return "พันธกิจ"; } else if(id==121) { return "ประเด็นยุทธศาสตร์"; } return "undefined"; }
diff --git a/src/main/java/com/secondmarket/utility/EdgarUtils.java b/src/main/java/com/secondmarket/utility/EdgarUtils.java index 143cb43..45f49b3 100644 --- a/src/main/java/com/secondmarket/utility/EdgarUtils.java +++ b/src/main/java/com/secondmarket/utility/EdgarUtils.java @@ -1,339 +1,339 @@ package com.secondmarket.utility; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.regex.Pattern; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import com.secondmarket.model.EdgarCompanyDetail; import com.secondmarket.model.EdgarDocDetail; import com.secondmarket.model.EdgarFilingDetail; import com.secondmarket.properties.SMProperties; /*** * * @author Danjuan Ye * */ public class EdgarUtils { public static final String preUrl = "http://www.sec.gov"; private SMProperties p; public EdgarUtils(SMProperties p) { this.p = p; } /*** * Replace the percent Encode Reserved Characters. * @param title * @return */ private String percentEncodeReservedCharacters(String title) { char[] chars = title.toCharArray(); for(int i=0; i<chars.length;i++){ if(!Character.isDigit(chars[i])&&!Character.isLetter(chars[i])){ chars[i]='-'; } } return new String(chars); } /** * Get the Edgar Doc according the crunchbase CompanyName and State * * @param companyName * @param state * @return */ public Map<String, EdgarCompanyDetail> getEdgarDoc(String companyName, String state) { if(companyName.length() < 2){ // System.out.println("---- "+ companyName + " --- "+state); return null; } // get company link first List<EdgarDocDetail> detailList = new ArrayList<EdgarDocDetail>(); List<EdgarDocDetail> multiDocList; List<EdgarCompanyDetail> nameList = new ArrayList<EdgarCompanyDetail>(); EdgarCompanyDetail item; EdgarDocDetail temp; String url2; Elements tds; String urlName = this.percentEncodeReservedCharacters(companyName.trim()); String url = "http://www.sec.gov/cgi-bin/browse-edgar?company=" + urlName + "&match=contains&CIK=&filenum=&State=" + state + "&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany"; // System.out.println(url); Document doc; try { - doc = Jsoup.connect(url).get(); + doc = Jsoup.connect(url).timeout(10*1000).get(); for (Element table : doc.select("table.tableFile2")) { for (Element row : table.select("tr")) { tds = row.select("td"); if (tds.size() == 3) { item = getCompanyTitlebySearch(tds); if (item != null) nameList.add(item); } else if (tds.size() == 5) { temp = getCompanyDetailDocListbySearch(tds); if (temp != null) { detailList.add(temp); } } } } if (nameList.size() > 0) { // System.out.println("--------Multiple Company Names --------"); for (EdgarCompanyDetail name : nameList) { url2 = preUrl + name.getCompanyLink(); Document doc2; doc2 = Jsoup.connect(url2).get(); multiDocList = new ArrayList<EdgarDocDetail>(); for (Element table : doc2.select("table.tableFile2")) { for (Element row : table.select("tr")) { tds = row.select("td"); if (tds.size() == 5) { temp = getCompanyDetailDocListbySearch(tds); if (temp != null) { multiDocList.add(temp); } } } } name.setDetailList(multiDocList); } } else { // System.out.println("--------One Company Names --------"); if (detailList.size() > 0) { item = new EdgarCompanyDetail(); item.setCompanyName(companyName); item.setCompanyLink(url); item.setLocation(state); item.setDetailList(detailList); nameList.add(item); } } } catch (IOException e) { e.printStackTrace(); } return setEdgarFilingDetail(nameList); } /*** * Set the filing detail info for the specified the doc type. * * @param nameList * @return */ public Map<String, EdgarCompanyDetail> setEdgarFilingDetail( List<EdgarCompanyDetail> nameList) { Map<String, EdgarCompanyDetail> map = new TreeMap<String, EdgarCompanyDetail>(); List<EdgarFilingDetail> filingList; List<EdgarDocDetail> removedList = new ArrayList<EdgarDocDetail>(); // int companyNum = 1; // int docNum = 1; // int rmNum = 1; String url; boolean flag; EdgarFilingDetail item; Elements tds; List<Pattern> patternList = getInfoboxPattern(); for (EdgarCompanyDetail name : nameList) { // System.out.println("COMPANY " + (companyNum++) + " -> " // + name.getCompanyName() + " -- " + name.getLocation()); // docNum = 1; flag = false; for (EdgarDocDetail entry : name.getDetailList()) { // System.out.println("DOC " + (docNum++) + " " // + entry.getFilings() + " " + entry.getFileDate()); url = preUrl + entry.getFormatLink(); filingList = new ArrayList<EdgarFilingDetail>(); Document doc; try { doc = Jsoup.connect(url).get(); for (Element table : doc.select("table.tableFile")) { for (Element row : table.select("tr")) { tds = row.select("td"); if (tds.size() == 5) { item = getDocsbyCompany(tds, patternList); if (item != null) { filingList.add(item); flag = true; } } } } } catch (IOException e) { e.printStackTrace(); } entry.setDocList(filingList); if(!flag){ removedList.add(entry); } } if(removedList.size()>0){ // rmNum = 1; //removed the EdgarDocs do not have filing details for(EdgarDocDetail rmItem: removedList){ name.getDetailList().remove(rmItem); // System.out.println("REMOVED " + (rmNum++) + " " // + rmItem.getFilings() + " " + rmItem.getFileDate()); } } removedList.clear(); if (!map.containsKey(name.getCompanyName())&&name.getDetailList().size()>0 && flag) { // System.out.println("PUT INTO MAP : "+name.getCompanyName().replace('.', '#')); map.put(name.getCompanyName().replace('.', '#'), name); } } return map; } /*** * Get the filing detail info for the specified the doc type. * * @param tds * @return */ private EdgarFilingDetail getDocsbyCompany(Elements tds, List<Pattern> pList) { EdgarFilingDetail item = null; String filingName = tds.get(2).getElementsByTag("a").get(0).ownText() .toLowerCase(); boolean flag = false; for(Pattern pattern: pList){ if(checkPatternMatch(pattern, filingName)){ flag = true; break; } } if (flag) { item = new EdgarFilingDetail(); item.setSeq(tds.get(0).ownText()); item.setDescr(tds.get(1).ownText()); item.setDocName(tds.get(2).getElementsByTag("a").get(0).ownText()); item.setDocLink(tds.get(2).getElementsByTag("a").get(0) .attr("href")); item.setType(tds.get(3).ownText()); item.setSize(tds.get(4).ownText()); // for test // System.out.println("SEQ " + tds.get(0).ownText() + " -> " // + tds.get(2).getElementsByTag("a").get(0).ownText()); } // else{ // System.out.println("----SEQ " + tds.get(0).ownText() + " -> " // + tds.get(2).getElementsByTag("a").get(0).ownText()); // } return item; } /*** * * @param tds * @return */ private EdgarDocDetail getCompanyDetailDocListbySearch(Elements tds) { EdgarDocDetail item = null; item = new EdgarDocDetail(); if (tds.get(4).hasText()) { item.setFileNum(tds.get(4).text()); item.setFileNumLink(tds.get(4).getElementsByTag("a").get(0) .attr("href")); } item.setFilings(tds.get(0).ownText()); item.setFormat(tds.get(1).getElementsByTag("a").get(0).ownText()); item.setFormatLink(tds.get(1).getElementsByTag("a").get(0).attr("href")); item.setDescr(tds.get(2).text()); item.setFileDate(tds.get(3).text()); return item; } /*** * */ private EdgarCompanyDetail getCompanyTitlebySearch(Elements tds) { EdgarCompanyDetail item = null; if (tds.get(1).getElementsByTag("a").size() == 0 && !tds.get(2).text().equals("")) { // looking for the company do not have a SIC (private company) item = new EdgarCompanyDetail(); item.setCompanyLink(tds.get(0).getElementsByTag("a").get(0) .attr("href")); item.setCompanyName(tds.get(1).ownText()); // item.setSICNum(tds.get(1).getElementsByTag("a").get(0).ownText()); // item.setSICLink(tds.get(1).getElementsByTag("a").get(0) // .attr("href")); item.setLocation(tds.get(2).text()); item.setLocationLink(tds.get(2).getElementsByTag("a").get(0) .attr("href")); } return item; } /////////////////////////////////////////////////////////////////////////// //PATTERN MATCH /////////////////////////////////////////////////////////////////////////// public List<Pattern> getInfoboxPattern(){ return p.getValues("DOCTYPE", "OPTIONS"); } public Pattern getInfoboxSpecifiedPattern(){ Pattern myPattern = null; try { myPattern = p.getValue("DOCTYPE", "OPTIONS", "OPTION"); } catch (Exception e) { e.printStackTrace(); } return myPattern; } public static boolean checkPatternMatch(Pattern myPattern, String text){ return myPattern.matcher(text).matches(); } /** * @param args * @throws IOException */ public static void main(String[] args) throws IOException { SMProperties property = null; try { property = SMProperties.getInstance("EDGAR"); } catch (Exception e) { e.printStackTrace(); } // foursquare, secondMarket EdgarUtils dataImporter = new EdgarUtils(property); Map<String, EdgarCompanyDetail> titleList = dataImporter.getEdgarDoc( "Facebook", "CA"); System.out.println(titleList.toString()); } }
true
true
public Map<String, EdgarCompanyDetail> getEdgarDoc(String companyName, String state) { if(companyName.length() < 2){ // System.out.println("---- "+ companyName + " --- "+state); return null; } // get company link first List<EdgarDocDetail> detailList = new ArrayList<EdgarDocDetail>(); List<EdgarDocDetail> multiDocList; List<EdgarCompanyDetail> nameList = new ArrayList<EdgarCompanyDetail>(); EdgarCompanyDetail item; EdgarDocDetail temp; String url2; Elements tds; String urlName = this.percentEncodeReservedCharacters(companyName.trim()); String url = "http://www.sec.gov/cgi-bin/browse-edgar?company=" + urlName + "&match=contains&CIK=&filenum=&State=" + state + "&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany"; // System.out.println(url); Document doc; try { doc = Jsoup.connect(url).get(); for (Element table : doc.select("table.tableFile2")) { for (Element row : table.select("tr")) { tds = row.select("td"); if (tds.size() == 3) { item = getCompanyTitlebySearch(tds); if (item != null) nameList.add(item); } else if (tds.size() == 5) { temp = getCompanyDetailDocListbySearch(tds); if (temp != null) { detailList.add(temp); } } } } if (nameList.size() > 0) { // System.out.println("--------Multiple Company Names --------"); for (EdgarCompanyDetail name : nameList) { url2 = preUrl + name.getCompanyLink(); Document doc2; doc2 = Jsoup.connect(url2).get(); multiDocList = new ArrayList<EdgarDocDetail>(); for (Element table : doc2.select("table.tableFile2")) { for (Element row : table.select("tr")) { tds = row.select("td"); if (tds.size() == 5) { temp = getCompanyDetailDocListbySearch(tds); if (temp != null) { multiDocList.add(temp); } } } } name.setDetailList(multiDocList); } } else { // System.out.println("--------One Company Names --------"); if (detailList.size() > 0) { item = new EdgarCompanyDetail(); item.setCompanyName(companyName); item.setCompanyLink(url); item.setLocation(state); item.setDetailList(detailList); nameList.add(item); } } } catch (IOException e) { e.printStackTrace(); } return setEdgarFilingDetail(nameList); }
public Map<String, EdgarCompanyDetail> getEdgarDoc(String companyName, String state) { if(companyName.length() < 2){ // System.out.println("---- "+ companyName + " --- "+state); return null; } // get company link first List<EdgarDocDetail> detailList = new ArrayList<EdgarDocDetail>(); List<EdgarDocDetail> multiDocList; List<EdgarCompanyDetail> nameList = new ArrayList<EdgarCompanyDetail>(); EdgarCompanyDetail item; EdgarDocDetail temp; String url2; Elements tds; String urlName = this.percentEncodeReservedCharacters(companyName.trim()); String url = "http://www.sec.gov/cgi-bin/browse-edgar?company=" + urlName + "&match=contains&CIK=&filenum=&State=" + state + "&Country=&SIC=&owner=exclude&Find=Find+Companies&action=getcompany"; // System.out.println(url); Document doc; try { doc = Jsoup.connect(url).timeout(10*1000).get(); for (Element table : doc.select("table.tableFile2")) { for (Element row : table.select("tr")) { tds = row.select("td"); if (tds.size() == 3) { item = getCompanyTitlebySearch(tds); if (item != null) nameList.add(item); } else if (tds.size() == 5) { temp = getCompanyDetailDocListbySearch(tds); if (temp != null) { detailList.add(temp); } } } } if (nameList.size() > 0) { // System.out.println("--------Multiple Company Names --------"); for (EdgarCompanyDetail name : nameList) { url2 = preUrl + name.getCompanyLink(); Document doc2; doc2 = Jsoup.connect(url2).get(); multiDocList = new ArrayList<EdgarDocDetail>(); for (Element table : doc2.select("table.tableFile2")) { for (Element row : table.select("tr")) { tds = row.select("td"); if (tds.size() == 5) { temp = getCompanyDetailDocListbySearch(tds); if (temp != null) { multiDocList.add(temp); } } } } name.setDetailList(multiDocList); } } else { // System.out.println("--------One Company Names --------"); if (detailList.size() > 0) { item = new EdgarCompanyDetail(); item.setCompanyName(companyName); item.setCompanyLink(url); item.setLocation(state); item.setDetailList(detailList); nameList.add(item); } } } catch (IOException e) { e.printStackTrace(); } return setEdgarFilingDetail(nameList); }
diff --git a/net.sourceforge.vrapper.core/src/net/sourceforge/vrapper/vim/commands/SubstitutionOperation.java b/net.sourceforge.vrapper.core/src/net/sourceforge/vrapper/vim/commands/SubstitutionOperation.java index 5859d696..ac257b38 100644 --- a/net.sourceforge.vrapper.core/src/net/sourceforge/vrapper/vim/commands/SubstitutionOperation.java +++ b/net.sourceforge.vrapper.core/src/net/sourceforge/vrapper/vim/commands/SubstitutionOperation.java @@ -1,152 +1,157 @@ package net.sourceforge.vrapper.vim.commands; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; import net.sourceforge.vrapper.platform.SearchAndReplaceService; import net.sourceforge.vrapper.platform.TextContent; import net.sourceforge.vrapper.utils.ContentType; import net.sourceforge.vrapper.utils.LineInformation; import net.sourceforge.vrapper.utils.TextRange; import net.sourceforge.vrapper.vim.EditorAdaptor; /** * Perform a substitution on a range of lines. Can be current line, * all lines, or any range in between. * For example, :s/foo/blah/g or :%s/foo/blah/g or :2,5s/foo/blah/g */ public class SubstitutionOperation extends SimpleTextOperation { private String substitution; public SubstitutionOperation(String substitution) { this.substitution = substitution; } @Override public void execute(EditorAdaptor editorAdaptor, TextRange region, ContentType contentType) { TextContent model = editorAdaptor.getModelContent(); int startLine; int endLine; if(region == null) { //special case, recalculate 'current line' every time //(this is to ensure '.' always works on current line) int offset = editorAdaptor.getPosition().getModelOffset(); startLine = model.getLineInformationOfOffset(offset).getNumber(); endLine = startLine; } else { startLine = model.getLineInformationOfOffset( region.getLeftBound().getModelOffset() ).getNumber(); endLine = model.getLineInformationOfOffset( region.getRightBound().getModelOffset() ).getNumber(); + if(model.getTextLength() == region.getRightBound().getModelOffset()) { + //the endLine calculation is off-by-one for the last line in the file + //force it to actually use the last line + endLine = model.getNumberOfLines(); + } } //whatever character is after 's' is our delimiter String delim = "" + substitution.charAt( substitution.indexOf('s') + 1); //split on the delimiter, unless that delimiter is escaped with a backslash (:s/\/\///) String[] fields = substitution.split("(?<!\\\\)"+delim); String find = ""; String replace = ""; String flags = ""; //'s' or '%s' = fields[0] if(fields.length > 1) { find = fields[1]; if(find.length() == 0) { //if no pattern defined, use last search find = editorAdaptor.getRegisterManager().getRegister("/").getContent().getText(); } } if(fields.length > 2) { replace = fields[2]; //Vim uses \r to represent a newline but Eclipse interprets that as carriage-return //Eclipse uses \R as a platform-independent newline replace = replace.replaceAll("\\\\r", "\\\\R"); } if(fields.length > 3) { flags = fields[3]; } //before attempting substitution, is this regex even valid? try { Pattern.compile(find); } catch (PatternSyntaxException e) { editorAdaptor.getUserInterfaceService().setErrorMessage(e.getDescription()); return; } int numReplaces = 0; int lineReplaceCount = 0; if(startLine == endLine) { LineInformation currentLine = model.getLineInformation(startLine); //begin and end compound change so a single 'u' undoes all replaces editorAdaptor.getHistory().beginCompoundChange(); numReplaces = performReplace(currentLine, find, replace, flags, editorAdaptor); editorAdaptor.getHistory().endCompoundChange(); } else { LineInformation line; int lineChanges = 0; int totalLines = model.getNumberOfLines(); int lineDiff; //perform search individually on each line in the range //(so :%s without 'g' flag runs once on each line) editorAdaptor.getHistory().beginCompoundChange(); for(int i=startLine; i < endLine; i++) { line = model.getLineInformation(i); lineChanges = performReplace(line, find, replace, flags, editorAdaptor); if(lineChanges > 0) { lineReplaceCount++; } numReplaces += lineChanges; lineDiff = model.getNumberOfLines() - totalLines; if(lineDiff > 0) { //lines were introduced as a result of this replacement //skip over those introduced lines and move on to the next intended line i += lineDiff; endLine += lineDiff; totalLines += lineDiff; } } editorAdaptor.getHistory().endCompoundChange(); } if(numReplaces == 0) { editorAdaptor.getUserInterfaceService().setErrorMessage("'"+find+"' not found"); } else if(lineReplaceCount > 0) { editorAdaptor.getUserInterfaceService().setInfoMessage( numReplaces + " substitutions on " + lineReplaceCount + " lines" ); } //enable '&', 'g&', and ':s' features editorAdaptor.getRegisterManager().setLastSubstitution(this); } private int performReplace(LineInformation line, String find, String replace, String flags, EditorAdaptor editorAdaptor) { //Eclipse regex doesn't handle '^' and '$' like Vim does. //Time for some special cases! if(find.equals("^")) { //insert the text at the beginning of the line editorAdaptor.getModelContent().replace(line.getBeginOffset(), 0, replace); return 1; } else if(find.equals("$")) { //insert the text at the end of the line editorAdaptor.getModelContent().replace(line.getEndOffset(), 0, replace); return 1; } else { //let Eclipse handle the regex SearchAndReplaceService searchAndReplace = editorAdaptor.getSearchAndReplaceService(); return searchAndReplace.replace(line, find, replace, flags); } } public TextOperation repetition() { return this; } }
true
true
public void execute(EditorAdaptor editorAdaptor, TextRange region, ContentType contentType) { TextContent model = editorAdaptor.getModelContent(); int startLine; int endLine; if(region == null) { //special case, recalculate 'current line' every time //(this is to ensure '.' always works on current line) int offset = editorAdaptor.getPosition().getModelOffset(); startLine = model.getLineInformationOfOffset(offset).getNumber(); endLine = startLine; } else { startLine = model.getLineInformationOfOffset( region.getLeftBound().getModelOffset() ).getNumber(); endLine = model.getLineInformationOfOffset( region.getRightBound().getModelOffset() ).getNumber(); } //whatever character is after 's' is our delimiter String delim = "" + substitution.charAt( substitution.indexOf('s') + 1); //split on the delimiter, unless that delimiter is escaped with a backslash (:s/\/\///) String[] fields = substitution.split("(?<!\\\\)"+delim); String find = ""; String replace = ""; String flags = ""; //'s' or '%s' = fields[0] if(fields.length > 1) { find = fields[1]; if(find.length() == 0) { //if no pattern defined, use last search find = editorAdaptor.getRegisterManager().getRegister("/").getContent().getText(); } } if(fields.length > 2) { replace = fields[2]; //Vim uses \r to represent a newline but Eclipse interprets that as carriage-return //Eclipse uses \R as a platform-independent newline replace = replace.replaceAll("\\\\r", "\\\\R"); } if(fields.length > 3) { flags = fields[3]; } //before attempting substitution, is this regex even valid? try { Pattern.compile(find); } catch (PatternSyntaxException e) { editorAdaptor.getUserInterfaceService().setErrorMessage(e.getDescription()); return; } int numReplaces = 0; int lineReplaceCount = 0; if(startLine == endLine) { LineInformation currentLine = model.getLineInformation(startLine); //begin and end compound change so a single 'u' undoes all replaces editorAdaptor.getHistory().beginCompoundChange(); numReplaces = performReplace(currentLine, find, replace, flags, editorAdaptor); editorAdaptor.getHistory().endCompoundChange(); } else { LineInformation line; int lineChanges = 0; int totalLines = model.getNumberOfLines(); int lineDiff; //perform search individually on each line in the range //(so :%s without 'g' flag runs once on each line) editorAdaptor.getHistory().beginCompoundChange(); for(int i=startLine; i < endLine; i++) { line = model.getLineInformation(i); lineChanges = performReplace(line, find, replace, flags, editorAdaptor); if(lineChanges > 0) { lineReplaceCount++; } numReplaces += lineChanges; lineDiff = model.getNumberOfLines() - totalLines; if(lineDiff > 0) { //lines were introduced as a result of this replacement //skip over those introduced lines and move on to the next intended line i += lineDiff; endLine += lineDiff; totalLines += lineDiff; } } editorAdaptor.getHistory().endCompoundChange(); } if(numReplaces == 0) { editorAdaptor.getUserInterfaceService().setErrorMessage("'"+find+"' not found"); } else if(lineReplaceCount > 0) { editorAdaptor.getUserInterfaceService().setInfoMessage( numReplaces + " substitutions on " + lineReplaceCount + " lines" ); } //enable '&', 'g&', and ':s' features editorAdaptor.getRegisterManager().setLastSubstitution(this); }
public void execute(EditorAdaptor editorAdaptor, TextRange region, ContentType contentType) { TextContent model = editorAdaptor.getModelContent(); int startLine; int endLine; if(region == null) { //special case, recalculate 'current line' every time //(this is to ensure '.' always works on current line) int offset = editorAdaptor.getPosition().getModelOffset(); startLine = model.getLineInformationOfOffset(offset).getNumber(); endLine = startLine; } else { startLine = model.getLineInformationOfOffset( region.getLeftBound().getModelOffset() ).getNumber(); endLine = model.getLineInformationOfOffset( region.getRightBound().getModelOffset() ).getNumber(); if(model.getTextLength() == region.getRightBound().getModelOffset()) { //the endLine calculation is off-by-one for the last line in the file //force it to actually use the last line endLine = model.getNumberOfLines(); } } //whatever character is after 's' is our delimiter String delim = "" + substitution.charAt( substitution.indexOf('s') + 1); //split on the delimiter, unless that delimiter is escaped with a backslash (:s/\/\///) String[] fields = substitution.split("(?<!\\\\)"+delim); String find = ""; String replace = ""; String flags = ""; //'s' or '%s' = fields[0] if(fields.length > 1) { find = fields[1]; if(find.length() == 0) { //if no pattern defined, use last search find = editorAdaptor.getRegisterManager().getRegister("/").getContent().getText(); } } if(fields.length > 2) { replace = fields[2]; //Vim uses \r to represent a newline but Eclipse interprets that as carriage-return //Eclipse uses \R as a platform-independent newline replace = replace.replaceAll("\\\\r", "\\\\R"); } if(fields.length > 3) { flags = fields[3]; } //before attempting substitution, is this regex even valid? try { Pattern.compile(find); } catch (PatternSyntaxException e) { editorAdaptor.getUserInterfaceService().setErrorMessage(e.getDescription()); return; } int numReplaces = 0; int lineReplaceCount = 0; if(startLine == endLine) { LineInformation currentLine = model.getLineInformation(startLine); //begin and end compound change so a single 'u' undoes all replaces editorAdaptor.getHistory().beginCompoundChange(); numReplaces = performReplace(currentLine, find, replace, flags, editorAdaptor); editorAdaptor.getHistory().endCompoundChange(); } else { LineInformation line; int lineChanges = 0; int totalLines = model.getNumberOfLines(); int lineDiff; //perform search individually on each line in the range //(so :%s without 'g' flag runs once on each line) editorAdaptor.getHistory().beginCompoundChange(); for(int i=startLine; i < endLine; i++) { line = model.getLineInformation(i); lineChanges = performReplace(line, find, replace, flags, editorAdaptor); if(lineChanges > 0) { lineReplaceCount++; } numReplaces += lineChanges; lineDiff = model.getNumberOfLines() - totalLines; if(lineDiff > 0) { //lines were introduced as a result of this replacement //skip over those introduced lines and move on to the next intended line i += lineDiff; endLine += lineDiff; totalLines += lineDiff; } } editorAdaptor.getHistory().endCompoundChange(); } if(numReplaces == 0) { editorAdaptor.getUserInterfaceService().setErrorMessage("'"+find+"' not found"); } else if(lineReplaceCount > 0) { editorAdaptor.getUserInterfaceService().setInfoMessage( numReplaces + " substitutions on " + lineReplaceCount + " lines" ); } //enable '&', 'g&', and ':s' features editorAdaptor.getRegisterManager().setLastSubstitution(this); }
diff --git a/app/pdf/PdfGenerator.java b/app/pdf/PdfGenerator.java index cc72f04..b77f978 100644 --- a/app/pdf/PdfGenerator.java +++ b/app/pdf/PdfGenerator.java @@ -1,60 +1,60 @@ package pdf; import com.itextpdf.text.Font; import com.itextpdf.text.FontFactory; import play.Play; import java.io.File; import java.io.FileNotFoundException; import java.util.Date; /** * @author Lukasz Piliszczuk <lukasz.piliszczuk AT zenika.com> */ public abstract class PdfGenerator { private static final String GENERATED_PATH = "/generated/"; private static final String GENERATED_EXTENSION = ".pdf"; protected Font textBoldFont; protected Font textFont; protected Font titleFont; protected String rootPath; public PdfGenerator() { rootPath = Play.configuration.getProperty("my.pdf.resources.path"); String mode = Play.configuration.getProperty("application.mode"); if (mode.equals("dev")) { rootPath = Play.applicationPath.getPath() + rootPath; } else { - rootPath = "/app/" + rootPath; + rootPath = "/app/app/" + rootPath; } FontFactory.register(new File(rootPath + "/ARIALN.ttf").getPath(), "arialnarrow_normal"); FontFactory.register(new File(rootPath + "/ARIALNB.ttf").getPath(), "arialnarrow_bold"); textBoldFont = FontFactory.getFont("arialnarrow_bold", 8); textFont = FontFactory.getFont("arialnarrow_normal", 8); titleFont = FontFactory.getFont("arialnarrow_bold", 14); } protected File getFileForGeneration(String folder, String name) { File folderFile = new File(GENERATED_PATH + folder); if (!folderFile.exists()) { folderFile.mkdirs(); } return new File(folderFile, name + GENERATED_EXTENSION); } protected File getSupinfoLogo() { return new File(rootPath + "/supinfo_logo.png"); } }
true
true
public PdfGenerator() { rootPath = Play.configuration.getProperty("my.pdf.resources.path"); String mode = Play.configuration.getProperty("application.mode"); if (mode.equals("dev")) { rootPath = Play.applicationPath.getPath() + rootPath; } else { rootPath = "/app/" + rootPath; } FontFactory.register(new File(rootPath + "/ARIALN.ttf").getPath(), "arialnarrow_normal"); FontFactory.register(new File(rootPath + "/ARIALNB.ttf").getPath(), "arialnarrow_bold"); textBoldFont = FontFactory.getFont("arialnarrow_bold", 8); textFont = FontFactory.getFont("arialnarrow_normal", 8); titleFont = FontFactory.getFont("arialnarrow_bold", 14); }
public PdfGenerator() { rootPath = Play.configuration.getProperty("my.pdf.resources.path"); String mode = Play.configuration.getProperty("application.mode"); if (mode.equals("dev")) { rootPath = Play.applicationPath.getPath() + rootPath; } else { rootPath = "/app/app/" + rootPath; } FontFactory.register(new File(rootPath + "/ARIALN.ttf").getPath(), "arialnarrow_normal"); FontFactory.register(new File(rootPath + "/ARIALNB.ttf").getPath(), "arialnarrow_bold"); textBoldFont = FontFactory.getFont("arialnarrow_bold", 8); textFont = FontFactory.getFont("arialnarrow_normal", 8); titleFont = FontFactory.getFont("arialnarrow_bold", 14); }
diff --git a/dspace/src/org/dspace/app/webui/servlet/admin/ItemMapServlet.java b/dspace/src/org/dspace/app/webui/servlet/admin/ItemMapServlet.java index 8d989e591..b9b1e8ccd 100644 --- a/dspace/src/org/dspace/app/webui/servlet/admin/ItemMapServlet.java +++ b/dspace/src/org/dspace/app/webui/servlet/admin/ItemMapServlet.java @@ -1,411 +1,411 @@ /* * ItemMapServlet.java * * Version: $Revision$ * * Date: $Date$ * * Copyright (c) 2002-2005, Hewlett-Packard Company and Massachusetts * Institute of Technology. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * - Neither the name of the Hewlett-Packard Company nor the name of the * Massachusetts Institute of Technology nor the names of their * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ package org.dspace.app.webui.servlet.admin; import java.util.HashMap; import java.util.Map; import java.util.LinkedList; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.dspace.app.webui.servlet.DSpaceServlet; import org.dspace.app.webui.util.JSPManager; import org.dspace.app.webui.util.UIUtil; import org.dspace.authorize.AuthorizeException; import org.dspace.authorize.AuthorizeManager; import org.dspace.browse.Browse; import org.dspace.content.Collection; import org.dspace.content.Item; import org.dspace.content.ItemIterator; import org.dspace.core.Constants; import org.dspace.core.Context; import org.dspace.storage.rdbms.DatabaseManager; import org.dspace.storage.rdbms.TableRow; import org.dspace.storage.rdbms.TableRowIterator; /** * Servlet for editing and deleting (expunging) items * * @version $Revision$ */ public class ItemMapServlet extends DSpaceServlet { protected void doDSGet(Context context, HttpServletRequest request, HttpServletResponse response) throws java.sql.SQLException, javax.servlet.ServletException, java.io.IOException, AuthorizeException { doDSPost(context, request, response); } protected void doDSPost(Context context, HttpServletRequest request, HttpServletResponse response) throws java.sql.SQLException, javax.servlet.ServletException, java.io.IOException, AuthorizeException { String jspPage = null; // get with a collection ID means put up browse window int myID = UIUtil.getIntParameter(request, "cid"); // get collection Collection myCollection = Collection.find(context, myID); // authorize check AuthorizeManager.authorizeAction(context, myCollection, Constants.COLLECTION_ADMIN); String action = request.getParameter("action"); if (action == null) { action = ""; } // Defined non-empty value shows that 'Cancel' has been pressed String cancel = request.getParameter("cancel"); if (cancel == null) { cancel = ""; } if (action.equals("") || !cancel.equals("")) { // get with no action parameter set means to put up the main page // which is statistics and some command buttons to add/remove items // // also holds for interruption by pressing 'Cancel' int count_native = 0; // # of items owned by this collection int count_import = 0; // # of virtual items Map myItems = new HashMap(); // # for the browser Map myCollections = new HashMap(); // collections for list Map myCounts = new HashMap(); // counts for each collection // get all items from that collection, add them to a hash ItemIterator i = myCollection.getItems(); // iterate through the items in this collection, and count how many // are native, and how many are imports, and which collections they // came from while (i.hasNext()) { Item myItem = i.next(); // get key for hash Integer myKey = new Integer(myItem.getID()); if (myItem.isOwningCollection(myCollection)) { count_native++; } else { count_import++; } // is the collection in the hash? Collection owningCollection = myItem.getOwningCollection(); Integer cKey = new Integer(owningCollection.getID()); if (myCollections.containsKey(cKey)) { Integer x = (Integer) myCounts.get(cKey); int myCount = x.intValue() + 1; // increment count for that collection myCounts.put(cKey, new Integer(myCount)); } else { // store and initialize count myCollections.put(cKey, owningCollection); myCounts.put(cKey, new Integer(1)); } // store the item myItems.put(myKey, myItem); } // remove this collection's entry because we already have a native // count myCollections.remove(new Integer(myCollection.getID())); // sort items - later // show page request.setAttribute("collection", myCollection); request.setAttribute("count_native", new Integer(count_native)); request.setAttribute("count_import", new Integer(count_import)); request.setAttribute("items", myItems); request.setAttribute("collections", myCollections); request.setAttribute("collection_counts", myCounts); request .setAttribute("all_collections", Collection .findAll(context)); // show this page when we're done jspPage = "itemmap-main.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } /* * else if( action.equals("add") ) { int itemID = * UIUtil.getIntParameter(request, "item_id"); String handle = * (String)request.getParameter("handle"); boolean error = true; Item * itemToAdd = null; * * if( itemID > 0 ) { itemToAdd = Item.find(context, itemID); * * if( itemToAdd != null ) error = false; } else if(handle != null && * !handle.equals("")) { DSpaceObject * dso=HandleManager.resolveToObject(context, handle); * * if(dso != null && dso.getType() == Constants.ITEM) { itemToAdd = * (Item)dso; error = false; } } * * //FIXME: error handling! if( !error ) { String myTitle = * itemToAdd.getDC("title",null,Item.ANY)[0].value; String ownerName = * itemToAdd.getOwningCollection().getMetadata("name"); * // hook up item, but first, does it belong already? TableRowIterator * tri = DatabaseManager.query(context, "collection2item", "SELECT * collection2item.* FROM collection2item WHERE " + "collection_id=" + * myCollection.getID() + " AND item_id=" + itemToAdd.getID()); * * if(tri.hasNext()) { request.setAttribute("message", "Item is already * part of that collection!"); } else { // Create mapping * myCollection.addItem( itemToAdd ); * // set up a nice 'done' message request.setAttribute("message", * "Item added successfully: <br> " + myTitle + " <br> From Collection: * <br> " + ownerName); * } * * request.setAttribute("collection", myCollection); * // show this page when we're done jspPage = "itemmap-info.jsp"; * // show the page JSPManager.showJSP(request, response, jspPage); } * else { // Display an error } } else if( action.equals("Add Entire * Collection") ) { int targetID = UIUtil.getIntParameter(request, * "collection2import"); * * Collection targetCollection = Collection.find(context, targetID); * // get all items from that collection and add them if not // already * added * // get all items to be added ItemIterator i = * targetCollection.getItems(); Map toAdd = new HashMap(); String * message = ""; * * while( i.hasNext() ) { Item myItem = i.next(); * * toAdd.put(new Integer(myItem.getID()), myItem); } * // now see what we already have, removing dups from the 'toAdd' list * i = myCollection.getItems(); * * while( i.hasNext() ) { Item myItem = i.next(); Integer myKey = new * Integer(myItem.getID()); * // remove works even if key isn't present toAdd.remove(myKey); } * // what's left in toAdd should be added Iterator addKeys = * toAdd.keySet().iterator(); * * while( addKeys.hasNext() ) { Item myItem = * (Item)toAdd.get(addKeys.next()); myCollection.addItem(myItem); * message += " <br> Added item ID: " + myItem.getID(); } * * request.setAttribute("message", message); * request.setAttribute("collection", myCollection); * // show this page when we're done jspPage = "itemmap-info.jsp"; * // show the page JSPManager.showJSP(request, response, jspPage); } */ else if (action.equals("Remove")) { // get item IDs to remove String[] itemIDs = request.getParameterValues("item_ids"); String message = "remove"; LinkedList removedItems = new LinkedList(); for (int j = 0; j < itemIDs.length; j++) { int i = Integer.parseInt(itemIDs[j]); removedItems.add(itemIDs[j]); Item myItem = Item.find(context, i); // make sure item doesn't belong to this collection if (!myItem.isOwningCollection(myCollection)) { myCollection.removeItem(myItem); Browse.itemChanged(context,myItem); } } request.setAttribute("message", message); request.setAttribute("collection", myCollection); request.setAttribute("processedItems", removedItems); // show this page when we're done jspPage = "itemmap-info.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } else if (action.equals("Add")) { // get item IDs to add String[] itemIDs = request.getParameterValues("item_ids"); String message = "added"; LinkedList addedItems = new LinkedList(); if (itemIDs == null) { message = "none-selected"; } else { for (int j = 0; j < itemIDs.length; j++) { int i = Integer.parseInt(itemIDs[j]); Item myItem = Item.find(context, i); if (AuthorizeManager.authorizeActionBoolean(context, myItem, Constants.READ)) { // make sure item doesn't belong to this collection if (!myItem.isOwningCollection(myCollection)) { myCollection.addItem(myItem); Browse.itemChanged(context,myItem); addedItems.add(itemIDs[j]); } } } } request.setAttribute("message", message); request.setAttribute("collection", myCollection); request.setAttribute("processedItems", addedItems); // show this page when we're done jspPage = "itemmap-info.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } else if (action.equals("Search Authors")) { // find all items with a matching author string and not currently in // this collection // sorting by date would be ideal... String myQuery = (String) request.getParameter("namepart"); TableRowIterator tri = DatabaseManager.query(context, "SELECT * from ItemsByAuthor WHERE sort_author like ? AND " + "item_id NOT IN (SELECT item_id FROM collection2item " + - "WHERE collection_id= ? ", + "WHERE collection_id= ? )", '%'+myQuery.toLowerCase()+'%',myCollection.getID()); Map items = new HashMap(); while (tri.hasNext()) { TableRow tr = tri.next(); // now instantiate and pass items to 'Add' page int itemID = tr.getIntColumn("item_id"); Item myItem = Item.find(context, itemID); // only put on list if you can read item if (AuthorizeManager.authorizeActionBoolean(context, myItem, Constants.READ)) { items.put(new Integer(itemID), myItem); } } tri.close(); request.setAttribute("collection", myCollection); request.setAttribute("browsetext", myQuery); request.setAttribute("items", items); request.setAttribute("browsetype", new String("Add")); jspPage = "itemmap-browse.jsp"; JSPManager.showJSP(request, response, jspPage); } else if (action.equals("browse")) { // target collection to browse int t = UIUtil.getIntParameter(request, "t"); Collection targetCollection = Collection.find(context, t); // now find all imported items from that collection // seemingly inefficient, but database should have this query cached ItemIterator i = myCollection.getItems(); Map items = new HashMap(); while (i.hasNext()) { Item myItem = i.next(); if (myItem.isOwningCollection(targetCollection)) { Integer myKey = new Integer(myItem.getID()); items.put(myKey, myItem); } } request.setAttribute("collection", myCollection); request.setAttribute("browsetext", targetCollection .getMetadata("name")); request.setAttribute("items", items); request.setAttribute("browsetype", new String("Remove")); // show this page when we're done jspPage = "itemmap-browse.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } context.complete(); } }
true
true
protected void doDSPost(Context context, HttpServletRequest request, HttpServletResponse response) throws java.sql.SQLException, javax.servlet.ServletException, java.io.IOException, AuthorizeException { String jspPage = null; // get with a collection ID means put up browse window int myID = UIUtil.getIntParameter(request, "cid"); // get collection Collection myCollection = Collection.find(context, myID); // authorize check AuthorizeManager.authorizeAction(context, myCollection, Constants.COLLECTION_ADMIN); String action = request.getParameter("action"); if (action == null) { action = ""; } // Defined non-empty value shows that 'Cancel' has been pressed String cancel = request.getParameter("cancel"); if (cancel == null) { cancel = ""; } if (action.equals("") || !cancel.equals("")) { // get with no action parameter set means to put up the main page // which is statistics and some command buttons to add/remove items // // also holds for interruption by pressing 'Cancel' int count_native = 0; // # of items owned by this collection int count_import = 0; // # of virtual items Map myItems = new HashMap(); // # for the browser Map myCollections = new HashMap(); // collections for list Map myCounts = new HashMap(); // counts for each collection // get all items from that collection, add them to a hash ItemIterator i = myCollection.getItems(); // iterate through the items in this collection, and count how many // are native, and how many are imports, and which collections they // came from while (i.hasNext()) { Item myItem = i.next(); // get key for hash Integer myKey = new Integer(myItem.getID()); if (myItem.isOwningCollection(myCollection)) { count_native++; } else { count_import++; } // is the collection in the hash? Collection owningCollection = myItem.getOwningCollection(); Integer cKey = new Integer(owningCollection.getID()); if (myCollections.containsKey(cKey)) { Integer x = (Integer) myCounts.get(cKey); int myCount = x.intValue() + 1; // increment count for that collection myCounts.put(cKey, new Integer(myCount)); } else { // store and initialize count myCollections.put(cKey, owningCollection); myCounts.put(cKey, new Integer(1)); } // store the item myItems.put(myKey, myItem); } // remove this collection's entry because we already have a native // count myCollections.remove(new Integer(myCollection.getID())); // sort items - later // show page request.setAttribute("collection", myCollection); request.setAttribute("count_native", new Integer(count_native)); request.setAttribute("count_import", new Integer(count_import)); request.setAttribute("items", myItems); request.setAttribute("collections", myCollections); request.setAttribute("collection_counts", myCounts); request .setAttribute("all_collections", Collection .findAll(context)); // show this page when we're done jspPage = "itemmap-main.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } /* * else if( action.equals("add") ) { int itemID = * UIUtil.getIntParameter(request, "item_id"); String handle = * (String)request.getParameter("handle"); boolean error = true; Item * itemToAdd = null; * * if( itemID > 0 ) { itemToAdd = Item.find(context, itemID); * * if( itemToAdd != null ) error = false; } else if(handle != null && * !handle.equals("")) { DSpaceObject * dso=HandleManager.resolveToObject(context, handle); * * if(dso != null && dso.getType() == Constants.ITEM) { itemToAdd = * (Item)dso; error = false; } } * * //FIXME: error handling! if( !error ) { String myTitle = * itemToAdd.getDC("title",null,Item.ANY)[0].value; String ownerName = * itemToAdd.getOwningCollection().getMetadata("name"); * // hook up item, but first, does it belong already? TableRowIterator * tri = DatabaseManager.query(context, "collection2item", "SELECT * collection2item.* FROM collection2item WHERE " + "collection_id=" + * myCollection.getID() + " AND item_id=" + itemToAdd.getID()); * * if(tri.hasNext()) { request.setAttribute("message", "Item is already * part of that collection!"); } else { // Create mapping * myCollection.addItem( itemToAdd ); * // set up a nice 'done' message request.setAttribute("message", * "Item added successfully: <br> " + myTitle + " <br> From Collection: * <br> " + ownerName); * } * * request.setAttribute("collection", myCollection); * // show this page when we're done jspPage = "itemmap-info.jsp"; * // show the page JSPManager.showJSP(request, response, jspPage); } * else { // Display an error } } else if( action.equals("Add Entire * Collection") ) { int targetID = UIUtil.getIntParameter(request, * "collection2import"); * * Collection targetCollection = Collection.find(context, targetID); * // get all items from that collection and add them if not // already * added * // get all items to be added ItemIterator i = * targetCollection.getItems(); Map toAdd = new HashMap(); String * message = ""; * * while( i.hasNext() ) { Item myItem = i.next(); * * toAdd.put(new Integer(myItem.getID()), myItem); } * // now see what we already have, removing dups from the 'toAdd' list * i = myCollection.getItems(); * * while( i.hasNext() ) { Item myItem = i.next(); Integer myKey = new * Integer(myItem.getID()); * // remove works even if key isn't present toAdd.remove(myKey); } * // what's left in toAdd should be added Iterator addKeys = * toAdd.keySet().iterator(); * * while( addKeys.hasNext() ) { Item myItem = * (Item)toAdd.get(addKeys.next()); myCollection.addItem(myItem); * message += " <br> Added item ID: " + myItem.getID(); } * * request.setAttribute("message", message); * request.setAttribute("collection", myCollection); * // show this page when we're done jspPage = "itemmap-info.jsp"; * // show the page JSPManager.showJSP(request, response, jspPage); } */ else if (action.equals("Remove")) { // get item IDs to remove String[] itemIDs = request.getParameterValues("item_ids"); String message = "remove"; LinkedList removedItems = new LinkedList(); for (int j = 0; j < itemIDs.length; j++) { int i = Integer.parseInt(itemIDs[j]); removedItems.add(itemIDs[j]); Item myItem = Item.find(context, i); // make sure item doesn't belong to this collection if (!myItem.isOwningCollection(myCollection)) { myCollection.removeItem(myItem); Browse.itemChanged(context,myItem); } } request.setAttribute("message", message); request.setAttribute("collection", myCollection); request.setAttribute("processedItems", removedItems); // show this page when we're done jspPage = "itemmap-info.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } else if (action.equals("Add")) { // get item IDs to add String[] itemIDs = request.getParameterValues("item_ids"); String message = "added"; LinkedList addedItems = new LinkedList(); if (itemIDs == null) { message = "none-selected"; } else { for (int j = 0; j < itemIDs.length; j++) { int i = Integer.parseInt(itemIDs[j]); Item myItem = Item.find(context, i); if (AuthorizeManager.authorizeActionBoolean(context, myItem, Constants.READ)) { // make sure item doesn't belong to this collection if (!myItem.isOwningCollection(myCollection)) { myCollection.addItem(myItem); Browse.itemChanged(context,myItem); addedItems.add(itemIDs[j]); } } } } request.setAttribute("message", message); request.setAttribute("collection", myCollection); request.setAttribute("processedItems", addedItems); // show this page when we're done jspPage = "itemmap-info.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } else if (action.equals("Search Authors")) { // find all items with a matching author string and not currently in // this collection // sorting by date would be ideal... String myQuery = (String) request.getParameter("namepart"); TableRowIterator tri = DatabaseManager.query(context, "SELECT * from ItemsByAuthor WHERE sort_author like ? AND " + "item_id NOT IN (SELECT item_id FROM collection2item " + "WHERE collection_id= ? ", '%'+myQuery.toLowerCase()+'%',myCollection.getID()); Map items = new HashMap(); while (tri.hasNext()) { TableRow tr = tri.next(); // now instantiate and pass items to 'Add' page int itemID = tr.getIntColumn("item_id"); Item myItem = Item.find(context, itemID); // only put on list if you can read item if (AuthorizeManager.authorizeActionBoolean(context, myItem, Constants.READ)) { items.put(new Integer(itemID), myItem); } } tri.close(); request.setAttribute("collection", myCollection); request.setAttribute("browsetext", myQuery); request.setAttribute("items", items); request.setAttribute("browsetype", new String("Add")); jspPage = "itemmap-browse.jsp"; JSPManager.showJSP(request, response, jspPage); } else if (action.equals("browse")) { // target collection to browse int t = UIUtil.getIntParameter(request, "t"); Collection targetCollection = Collection.find(context, t); // now find all imported items from that collection // seemingly inefficient, but database should have this query cached ItemIterator i = myCollection.getItems(); Map items = new HashMap(); while (i.hasNext()) { Item myItem = i.next(); if (myItem.isOwningCollection(targetCollection)) { Integer myKey = new Integer(myItem.getID()); items.put(myKey, myItem); } } request.setAttribute("collection", myCollection); request.setAttribute("browsetext", targetCollection .getMetadata("name")); request.setAttribute("items", items); request.setAttribute("browsetype", new String("Remove")); // show this page when we're done jspPage = "itemmap-browse.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } context.complete(); }
protected void doDSPost(Context context, HttpServletRequest request, HttpServletResponse response) throws java.sql.SQLException, javax.servlet.ServletException, java.io.IOException, AuthorizeException { String jspPage = null; // get with a collection ID means put up browse window int myID = UIUtil.getIntParameter(request, "cid"); // get collection Collection myCollection = Collection.find(context, myID); // authorize check AuthorizeManager.authorizeAction(context, myCollection, Constants.COLLECTION_ADMIN); String action = request.getParameter("action"); if (action == null) { action = ""; } // Defined non-empty value shows that 'Cancel' has been pressed String cancel = request.getParameter("cancel"); if (cancel == null) { cancel = ""; } if (action.equals("") || !cancel.equals("")) { // get with no action parameter set means to put up the main page // which is statistics and some command buttons to add/remove items // // also holds for interruption by pressing 'Cancel' int count_native = 0; // # of items owned by this collection int count_import = 0; // # of virtual items Map myItems = new HashMap(); // # for the browser Map myCollections = new HashMap(); // collections for list Map myCounts = new HashMap(); // counts for each collection // get all items from that collection, add them to a hash ItemIterator i = myCollection.getItems(); // iterate through the items in this collection, and count how many // are native, and how many are imports, and which collections they // came from while (i.hasNext()) { Item myItem = i.next(); // get key for hash Integer myKey = new Integer(myItem.getID()); if (myItem.isOwningCollection(myCollection)) { count_native++; } else { count_import++; } // is the collection in the hash? Collection owningCollection = myItem.getOwningCollection(); Integer cKey = new Integer(owningCollection.getID()); if (myCollections.containsKey(cKey)) { Integer x = (Integer) myCounts.get(cKey); int myCount = x.intValue() + 1; // increment count for that collection myCounts.put(cKey, new Integer(myCount)); } else { // store and initialize count myCollections.put(cKey, owningCollection); myCounts.put(cKey, new Integer(1)); } // store the item myItems.put(myKey, myItem); } // remove this collection's entry because we already have a native // count myCollections.remove(new Integer(myCollection.getID())); // sort items - later // show page request.setAttribute("collection", myCollection); request.setAttribute("count_native", new Integer(count_native)); request.setAttribute("count_import", new Integer(count_import)); request.setAttribute("items", myItems); request.setAttribute("collections", myCollections); request.setAttribute("collection_counts", myCounts); request .setAttribute("all_collections", Collection .findAll(context)); // show this page when we're done jspPage = "itemmap-main.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } /* * else if( action.equals("add") ) { int itemID = * UIUtil.getIntParameter(request, "item_id"); String handle = * (String)request.getParameter("handle"); boolean error = true; Item * itemToAdd = null; * * if( itemID > 0 ) { itemToAdd = Item.find(context, itemID); * * if( itemToAdd != null ) error = false; } else if(handle != null && * !handle.equals("")) { DSpaceObject * dso=HandleManager.resolveToObject(context, handle); * * if(dso != null && dso.getType() == Constants.ITEM) { itemToAdd = * (Item)dso; error = false; } } * * //FIXME: error handling! if( !error ) { String myTitle = * itemToAdd.getDC("title",null,Item.ANY)[0].value; String ownerName = * itemToAdd.getOwningCollection().getMetadata("name"); * // hook up item, but first, does it belong already? TableRowIterator * tri = DatabaseManager.query(context, "collection2item", "SELECT * collection2item.* FROM collection2item WHERE " + "collection_id=" + * myCollection.getID() + " AND item_id=" + itemToAdd.getID()); * * if(tri.hasNext()) { request.setAttribute("message", "Item is already * part of that collection!"); } else { // Create mapping * myCollection.addItem( itemToAdd ); * // set up a nice 'done' message request.setAttribute("message", * "Item added successfully: <br> " + myTitle + " <br> From Collection: * <br> " + ownerName); * } * * request.setAttribute("collection", myCollection); * // show this page when we're done jspPage = "itemmap-info.jsp"; * // show the page JSPManager.showJSP(request, response, jspPage); } * else { // Display an error } } else if( action.equals("Add Entire * Collection") ) { int targetID = UIUtil.getIntParameter(request, * "collection2import"); * * Collection targetCollection = Collection.find(context, targetID); * // get all items from that collection and add them if not // already * added * // get all items to be added ItemIterator i = * targetCollection.getItems(); Map toAdd = new HashMap(); String * message = ""; * * while( i.hasNext() ) { Item myItem = i.next(); * * toAdd.put(new Integer(myItem.getID()), myItem); } * // now see what we already have, removing dups from the 'toAdd' list * i = myCollection.getItems(); * * while( i.hasNext() ) { Item myItem = i.next(); Integer myKey = new * Integer(myItem.getID()); * // remove works even if key isn't present toAdd.remove(myKey); } * // what's left in toAdd should be added Iterator addKeys = * toAdd.keySet().iterator(); * * while( addKeys.hasNext() ) { Item myItem = * (Item)toAdd.get(addKeys.next()); myCollection.addItem(myItem); * message += " <br> Added item ID: " + myItem.getID(); } * * request.setAttribute("message", message); * request.setAttribute("collection", myCollection); * // show this page when we're done jspPage = "itemmap-info.jsp"; * // show the page JSPManager.showJSP(request, response, jspPage); } */ else if (action.equals("Remove")) { // get item IDs to remove String[] itemIDs = request.getParameterValues("item_ids"); String message = "remove"; LinkedList removedItems = new LinkedList(); for (int j = 0; j < itemIDs.length; j++) { int i = Integer.parseInt(itemIDs[j]); removedItems.add(itemIDs[j]); Item myItem = Item.find(context, i); // make sure item doesn't belong to this collection if (!myItem.isOwningCollection(myCollection)) { myCollection.removeItem(myItem); Browse.itemChanged(context,myItem); } } request.setAttribute("message", message); request.setAttribute("collection", myCollection); request.setAttribute("processedItems", removedItems); // show this page when we're done jspPage = "itemmap-info.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } else if (action.equals("Add")) { // get item IDs to add String[] itemIDs = request.getParameterValues("item_ids"); String message = "added"; LinkedList addedItems = new LinkedList(); if (itemIDs == null) { message = "none-selected"; } else { for (int j = 0; j < itemIDs.length; j++) { int i = Integer.parseInt(itemIDs[j]); Item myItem = Item.find(context, i); if (AuthorizeManager.authorizeActionBoolean(context, myItem, Constants.READ)) { // make sure item doesn't belong to this collection if (!myItem.isOwningCollection(myCollection)) { myCollection.addItem(myItem); Browse.itemChanged(context,myItem); addedItems.add(itemIDs[j]); } } } } request.setAttribute("message", message); request.setAttribute("collection", myCollection); request.setAttribute("processedItems", addedItems); // show this page when we're done jspPage = "itemmap-info.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } else if (action.equals("Search Authors")) { // find all items with a matching author string and not currently in // this collection // sorting by date would be ideal... String myQuery = (String) request.getParameter("namepart"); TableRowIterator tri = DatabaseManager.query(context, "SELECT * from ItemsByAuthor WHERE sort_author like ? AND " + "item_id NOT IN (SELECT item_id FROM collection2item " + "WHERE collection_id= ? )", '%'+myQuery.toLowerCase()+'%',myCollection.getID()); Map items = new HashMap(); while (tri.hasNext()) { TableRow tr = tri.next(); // now instantiate and pass items to 'Add' page int itemID = tr.getIntColumn("item_id"); Item myItem = Item.find(context, itemID); // only put on list if you can read item if (AuthorizeManager.authorizeActionBoolean(context, myItem, Constants.READ)) { items.put(new Integer(itemID), myItem); } } tri.close(); request.setAttribute("collection", myCollection); request.setAttribute("browsetext", myQuery); request.setAttribute("items", items); request.setAttribute("browsetype", new String("Add")); jspPage = "itemmap-browse.jsp"; JSPManager.showJSP(request, response, jspPage); } else if (action.equals("browse")) { // target collection to browse int t = UIUtil.getIntParameter(request, "t"); Collection targetCollection = Collection.find(context, t); // now find all imported items from that collection // seemingly inefficient, but database should have this query cached ItemIterator i = myCollection.getItems(); Map items = new HashMap(); while (i.hasNext()) { Item myItem = i.next(); if (myItem.isOwningCollection(targetCollection)) { Integer myKey = new Integer(myItem.getID()); items.put(myKey, myItem); } } request.setAttribute("collection", myCollection); request.setAttribute("browsetext", targetCollection .getMetadata("name")); request.setAttribute("items", items); request.setAttribute("browsetype", new String("Remove")); // show this page when we're done jspPage = "itemmap-browse.jsp"; // show the page JSPManager.showJSP(request, response, jspPage); } context.complete(); }
diff --git a/had_one_dismissal/src/test/java/com/jclarity/had_one_dismissal/web/PopulateTest.java b/had_one_dismissal/src/test/java/com/jclarity/had_one_dismissal/web/PopulateTest.java index 86c71b9..205fd96 100644 --- a/had_one_dismissal/src/test/java/com/jclarity/had_one_dismissal/web/PopulateTest.java +++ b/had_one_dismissal/src/test/java/com/jclarity/had_one_dismissal/web/PopulateTest.java @@ -1,57 +1,56 @@ package com.jclarity.had_one_dismissal.web; import static org.junit.Assert.assertEquals; import org.junit.Test; import org.junit.runner.RunWith; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.transaction.annotation.Transactional; import com.jclarity.had_one_dismissal.domain.Applicant; import com.jclarity.had_one_dismissal.domain.Company; import com.jclarity.had_one_dismissal.domain.Location; import com.jclarity.had_one_dismissal.domain.Tag; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = { "classpath:/META-INF/spring/applicationContext.xml", "file:src/test/resources/webmvc-test-config.xml" }) public class PopulateTest { private static final Logger LOGGER = LoggerFactory.getLogger(PopulateTest.class); @Transactional @Test public void loadsDataFromFiles() throws Exception { LOGGER.info("Populate Test"); - System.out.println("fuck off"); long total = System.currentTimeMillis(); long time = total; Populate.loadLocations(); time = updateStopwatch(time, "loadLocations"); Populate.loadTags(); time = updateStopwatch(time, "loadTags"); Populate.loadNames(); time = updateStopwatch(time, "loadNames"); Populate.loadCompanies(); time = updateStopwatch(time, "loadCompanies"); Populate.loadJobListings(); updateStopwatch(time, "loadJobListings"); total = System.currentTimeMillis() - total; LOGGER.info("TOTAL: {}", total); assertEquals(272L, Location.countLocations()); assertEquals(658L, Tag.countTags()); assertEquals(10000, Applicant.countApplicants()); assertEquals(2155, Company.countCompanys()); } private long updateStopwatch(long time, String name) { time = System.currentTimeMillis() - time; LOGGER.info("Time in {}: {}", name, time); return System.currentTimeMillis(); } }
true
true
public void loadsDataFromFiles() throws Exception { LOGGER.info("Populate Test"); System.out.println("fuck off"); long total = System.currentTimeMillis(); long time = total; Populate.loadLocations(); time = updateStopwatch(time, "loadLocations"); Populate.loadTags(); time = updateStopwatch(time, "loadTags"); Populate.loadNames(); time = updateStopwatch(time, "loadNames"); Populate.loadCompanies(); time = updateStopwatch(time, "loadCompanies"); Populate.loadJobListings(); updateStopwatch(time, "loadJobListings"); total = System.currentTimeMillis() - total; LOGGER.info("TOTAL: {}", total); assertEquals(272L, Location.countLocations()); assertEquals(658L, Tag.countTags()); assertEquals(10000, Applicant.countApplicants()); assertEquals(2155, Company.countCompanys()); }
public void loadsDataFromFiles() throws Exception { LOGGER.info("Populate Test"); long total = System.currentTimeMillis(); long time = total; Populate.loadLocations(); time = updateStopwatch(time, "loadLocations"); Populate.loadTags(); time = updateStopwatch(time, "loadTags"); Populate.loadNames(); time = updateStopwatch(time, "loadNames"); Populate.loadCompanies(); time = updateStopwatch(time, "loadCompanies"); Populate.loadJobListings(); updateStopwatch(time, "loadJobListings"); total = System.currentTimeMillis() - total; LOGGER.info("TOTAL: {}", total); assertEquals(272L, Location.countLocations()); assertEquals(658L, Tag.countTags()); assertEquals(10000, Applicant.countApplicants()); assertEquals(2155, Company.countCompanys()); }
diff --git a/core/src/main/java/edu/northwestern/bioinformatics/studycalendar/utils/mail/MailMessageFactory.java b/core/src/main/java/edu/northwestern/bioinformatics/studycalendar/utils/mail/MailMessageFactory.java index b0d897f76..17c4029bd 100644 --- a/core/src/main/java/edu/northwestern/bioinformatics/studycalendar/utils/mail/MailMessageFactory.java +++ b/core/src/main/java/edu/northwestern/bioinformatics/studycalendar/utils/mail/MailMessageFactory.java @@ -1,78 +1,78 @@ package edu.northwestern.bioinformatics.studycalendar.utils.mail; import edu.northwestern.bioinformatics.studycalendar.domain.Notification; import static edu.northwestern.bioinformatics.studycalendar.configuration.Configuration.MAIL_EXCEPTIONS_TO; import static edu.northwestern.bioinformatics.studycalendar.configuration.Configuration.MAIL_REPLY_TO; import gov.nih.nci.cabig.ctms.tools.configuration.Configuration; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.web.context.ServletContextAware; import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; import java.util.List; /** * @author Rhett Sutphin */ public class MailMessageFactory implements ServletContextAware { // private static Log log = LogFactory.getLog(MailMessageFactory.class); private static Logger log = LoggerFactory.getLogger(MailMessageFactory.class); private freemarker.template.Configuration freemarkerConfiguration; private ServletContext servletContext; private Configuration configuration; ////// FACTORY public ExceptionMailMessage createExceptionMailMessage(Throwable exception, HttpServletRequest request) { List<String> to = configuration.get(MAIL_EXCEPTIONS_TO); if (to == null) { - log.error("Uncaught exception encountered, but report e-mail messages not configured. To turn them on, set at least one address for the mailExceptionsTo property."); + log.error("Uncaught exception encountered, but report e-mail messages not configured. To turn them on, set at least one address for the mailExceptionsTo property.", exception); return null; } else { ExceptionMailMessage message = configureMessage(new ExceptionMailMessage()); message.setTo(to.toArray(new String[to.size()])); message.setUncaughtException(exception); message.setRequest(request); message.setServletContext(servletContext); return message; } } public ScheduleNotificationMailMessage createScheduleNotificationMailMessage(String toAddress, final Notification notification) { if (toAddress == null || StringUtils.isEmpty(toAddress)) { log.error("to address is null or empty. can not send email for new schedules. "); return null; } else { ScheduleNotificationMailMessage message = configureMessage(new ScheduleNotificationMailMessage()); message.setTo(toAddress); message.setNotification(notification); return message; } } private <T extends StudyCalendarMailMessage> T configureMessage(T message) { message.setFreemarkerConfiguration(freemarkerConfiguration); message.setConfiguration(configuration); message.setReplyTo(configuration.get(MAIL_REPLY_TO)); message.onInitialization(); return message; } ////// CONFIGURATION public void setFreemarkerConfiguration(freemarker.template.Configuration freemarkerConfiguration) { this.freemarkerConfiguration = freemarkerConfiguration; } public void setServletContext(ServletContext servletContext) { this.servletContext = servletContext; } public void setConfiguration(Configuration configuration) { this.configuration = configuration; } }
true
true
public ExceptionMailMessage createExceptionMailMessage(Throwable exception, HttpServletRequest request) { List<String> to = configuration.get(MAIL_EXCEPTIONS_TO); if (to == null) { log.error("Uncaught exception encountered, but report e-mail messages not configured. To turn them on, set at least one address for the mailExceptionsTo property."); return null; } else { ExceptionMailMessage message = configureMessage(new ExceptionMailMessage()); message.setTo(to.toArray(new String[to.size()])); message.setUncaughtException(exception); message.setRequest(request); message.setServletContext(servletContext); return message; } }
public ExceptionMailMessage createExceptionMailMessage(Throwable exception, HttpServletRequest request) { List<String> to = configuration.get(MAIL_EXCEPTIONS_TO); if (to == null) { log.error("Uncaught exception encountered, but report e-mail messages not configured. To turn them on, set at least one address for the mailExceptionsTo property.", exception); return null; } else { ExceptionMailMessage message = configureMessage(new ExceptionMailMessage()); message.setTo(to.toArray(new String[to.size()])); message.setUncaughtException(exception); message.setRequest(request); message.setServletContext(servletContext); return message; } }
diff --git a/src/swt/org/pathvisio/search/PathwaySearchComposite.java b/src/swt/org/pathvisio/search/PathwaySearchComposite.java index c710818e..abfe8fe6 100644 --- a/src/swt/org/pathvisio/search/PathwaySearchComposite.java +++ b/src/swt/org/pathvisio/search/PathwaySearchComposite.java @@ -1,328 +1,331 @@ // PathVisio, // a tool for data visualization and analysis using Biological Pathways // Copyright 2006-2007 BiGCaT Bioinformatics // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package org.pathvisio.search; import java.io.File; import java.lang.reflect.InvocationTargetException; import java.util.HashMap; import org.eclipse.core.runtime.IProgressMonitor; import org.eclipse.jface.dialogs.MessageDialog; import org.eclipse.jface.dialogs.ProgressMonitorDialog; import org.eclipse.swt.SWT; import org.eclipse.swt.custom.StackLayout; import org.eclipse.swt.events.SelectionAdapter; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.layout.FillLayout; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.layout.GridLayout; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Combo; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.swt.widgets.DirectoryDialog; import org.eclipse.swt.widgets.Group; import org.eclipse.swt.widgets.Label; import org.eclipse.swt.widgets.Text; import org.pathvisio.model.DataSource; import org.pathvisio.model.Xref; import org.pathvisio.debug.Logger; import org.pathvisio.gui.swt.MainWindowBase; import org.pathvisio.preferences.swt.SwtPreferences.SwtPreference; import org.pathvisio.search.SearchMethods.SearchException; import org.pathvisio.util.swt.SwtUtils.SimpleRunnableWithProgress; public class PathwaySearchComposite extends Composite { HashMap<String, SearchOptionComposite> searchControls; MainWindowBase window; public PathwaySearchComposite(Composite parent, int style, MainWindowBase window) { super(parent, style); this.window = window; setLayout(new GridLayout()); initSearchComposite(); initSearchResultTable(); } StackLayout pathwaySearchStack; SearchResultTable searchResultTable; private void initSearchComposite() { searchControls = new HashMap<String, SearchOptionComposite>(); String[] soLabels = new String[] { "gene id", "gene symbol" }; final HashMap<String, String> labelMappings = new HashMap<String, String>(); labelMappings.put(soLabels[0], "pathwaysContainingGene"); labelMappings.put(soLabels[1], "pathwaysContainingGeneSymbol"); final Group group = new Group(this, SWT.SHADOW_ETCHED_IN); group.setText("Search"); group.setLayoutData(new GridData(GridData.FILL_HORIZONTAL)); group.setLayout(new GridLayout(2, false)); Label label = new Label(group, SWT.CENTER); label.setText("Search by:"); final Combo combo = new Combo(group, SWT.READ_ONLY); combo.setItems(soLabels); combo.setLayoutData(new GridData(GridData.FILL_HORIZONTAL)); GridData span2cols = new GridData(GridData.FILL_HORIZONTAL); span2cols.horizontalSpan = 2; final Group sGroup = new Group(group, SWT.NULL); pathwaySearchStack = new StackLayout(); sGroup.setLayout(pathwaySearchStack); sGroup.setLayoutData(span2cols); //Add search options composites to stacklayout final Composite[] searchOptionControls = new Composite[2]; searchOptionControls[0] = pathwaysContainingGene(sGroup); searchOptionControls[1] = pathwaysContainingGeneSymbol(sGroup); combo.addSelectionListener(new SelectionAdapter() { public void widgetSelected(SelectionEvent e) { int selection = combo.getSelectionIndex(); if(selection > -1) searchControls.get(labelMappings.get(combo.getText())).select(); sGroup.layout(); } }); //Set initial selection //set symbol search as default pathwaySearchStack.topControl = searchOptionControls[1]; combo.select(1); } private void initSearchResultTable() { Group group = new Group(this, SWT.SHADOW_ETCHED_IN); group.setText("Results"); group.setLayoutData(new GridData(GridData.FILL_BOTH)); group.setLayout(new FillLayout()); searchResultTable = new SearchResultTable(group, SWT.NULL); } public Composite pathwaysContainingGeneSymbol(Composite parent) { SearchOptionComposite comp = new SearchOptionComposite(parent, SWT.NULL) { void select() { pathwaySearchStack.topControl = this; pack(); } public Composite createContents(Composite parent) { setLayout(new GridLayout(3, false)); GridData span2cols = new GridData(GridData.FILL_HORIZONTAL); span2cols.horizontalSpan = 2; Label symLabel = new Label(parent, SWT.CENTER); symLabel.setText("Gene symbol:"); final Text symText = new Text(parent, SWT.SINGLE | SWT.BORDER); symText.setLayoutData(span2cols); Label dirLabel = new Label(parent, SWT.CENTER); dirLabel.setText("Directory to search:"); final Text dirText = createDirText(parent); createDirButton(parent, dirText); Button searchButton = new Button(parent, SWT.PUSH); searchButton.setText("Search"); GridData span3cols = new GridData(GridData.HORIZONTAL_ALIGN_END); span3cols.horizontalSpan = 3; searchButton.setLayoutData(span3cols); searchButton.addSelectionListener(new SelectionAdapter() { public void widgetSelected(SelectionEvent e) { String id = symText.getText(); String folder = dirText.getText(); if(id.equals("") || folder.equals("")) { MessageDialog.openError(getShell(), "error", "please specify id and pathway folder"); return; } SearchRunnableWithProgress srwp = new SearchRunnableWithProgress( "pathwaysContainingGeneSymbol", new Class[] { String.class, File.class, SearchResultTable.class, SearchRunnableWithProgress.class }); srwp.setArgs(new Object[] {id, new File(folder), searchResultTable, srwp }); ProgressMonitorDialog dialog = new ProgressMonitorDialog(getShell()); try { dialog.run(true, true, srwp); } catch(Exception ex) { MessageDialog.openError(getShell(), "Error", "Unable to perform search: " + ex.getMessage()); return; } } }); //Add controls to hash to enable preset values name2Control.put("idText", symText); name2Control.put("dirText", dirText); return parent; } }; searchControls.put("pathwaysContainingGeneSymbol", comp); //Add to available search options return comp; } public Composite pathwaysContainingGene(Composite parent) { SearchOptionComposite comp = new SearchOptionComposite(parent, SWT.NULL) { void select() { pathwaySearchStack.topControl = this; } public Composite createContents(Composite parent) { setLayout(new GridLayout(3, false)); GridData span2cols = new GridData(GridData.FILL_HORIZONTAL); span2cols.horizontalSpan = 2; Label idLabel = new Label(parent, SWT.CENTER); idLabel.setText("Gene id:"); final Text idText = new Text(parent, SWT.SINGLE | SWT.BORDER); idText.setLayoutData(span2cols); Label systemLabel = new Label(parent, SWT.CENTER); systemLabel.setText("Id system:"); final Combo systemCombo = new Combo(parent, SWT.SINGLE | SWT.READ_ONLY); final String[] datasources = DataSource.getFullNames().toArray(new String[0]); + for(int i = 0; i < datasources.length; i++) { + if(datasources[i] == null) datasources[i] = ""; + } systemCombo.setItems(datasources); systemCombo.setLayoutData(span2cols); Label dirLabel = new Label(parent, SWT.CENTER); dirLabel.setText("Directory to search:"); final Text dirText = createDirText(parent); createDirButton(parent, dirText); Button searchButton = new Button(parent, SWT.PUSH); searchButton.setText("Search"); GridData span3cols = new GridData(GridData.HORIZONTAL_ALIGN_END); span3cols.horizontalSpan = 3; searchButton.setLayoutData(span3cols); searchButton.addSelectionListener(new SelectionAdapter() { public void widgetSelected(SelectionEvent e) { String id = idText.getText(); int codeIndex = systemCombo.getSelectionIndex(); DataSource ds = codeIndex == -1 ? null : DataSource.getByFullName (datasources[codeIndex]); Xref ref = new Xref (id, ds); String folder = dirText.getText(); if(id.equals("") || ds.equals("") || folder.equals("")) { MessageDialog.openError(getShell(), "error", "please specify id, code and pathway folder"); return; } SearchRunnableWithProgress srwp = new SearchRunnableWithProgress( "pathwaysContainingGeneID", new Class[] { Xref.class, File.class, SearchResultTable.class, SearchRunnableWithProgress.class }); SearchRunnableWithProgress.setMonitorInfo("Searching", (int)SearchMethods.TOTAL_WORK); srwp.setArgs(new Object[] {ref, new File(folder), searchResultTable, srwp }); ProgressMonitorDialog dialog = new ProgressMonitorDialog(getShell()); try { dialog.run(true, true, srwp); } catch(Exception ex) { MessageDialog.openError(getShell(), "Error", "Unable to perform search: " + ex.getMessage()); return; } } }); //Add controls to hash to enable preset values name2Control.put("idText", idText); name2Control.put("systemCombo", systemCombo); name2Control.put("dirText", dirText); return parent; } }; searchControls.put("pathwaysContainingGene", comp); //Add to available search options return comp; } private Text createDirText(Composite parent) { Text t = new Text(parent, SWT.SINGLE | SWT.BORDER); t.setText(SwtPreference.SWT_DIR_PWFILES.getValue()); t.setLayoutData(new GridData(GridData.FILL_HORIZONTAL)); return t; } private Button createDirButton(Composite parent, final Text dirText) { Button b = new Button(parent, SWT.PUSH); b.setText("Browse"); b.addSelectionListener(new SelectionAdapter() { public void widgetSelected(SelectionEvent e) { DirectoryDialog dd = new DirectoryDialog(getShell()); dd.setFilterPath(dirText.getText()); String dirName = dd.open(); if(dirName != null) dirText.setText(dirName); } }); return b; } public class SearchRunnableWithProgress extends SimpleRunnableWithProgress { public SearchRunnableWithProgress(String method, Class<?>[] parameters) { super(SearchMethods.class, method, parameters); } public void run(IProgressMonitor monitor) { try { super.run(monitor); } catch (InterruptedException e) { openMessageDialog("error", e.getMessage()); Logger.log.error("Unable to start search", e); } catch (InvocationTargetException e) { if(e.getCause() instanceof SearchException) openMessageDialog("", e.getCause().getMessage()); else { openMessageDialog("error", "Cause: " + e.getCause().getMessage()); Logger.log.error("while searching", e); } } } } public abstract class SearchOptionComposite extends Composite { HashMap<String, Control> name2Control; public SearchOptionComposite(Composite parent, int style) { super(parent, style); name2Control = new HashMap<String,Control>(); createContents(this); } public void setContents(HashMap<String, String> name2Value) { for(String key : name2Control.keySet()) { if(name2Value.containsKey(key)) { Control c = name2Control.get(key); if(c instanceof Text) ((Text)c).setText(name2Value.get(key)); else if(c instanceof Combo) ((Combo)c).setText(name2Value.get(key)); } } } abstract void select(); public abstract Composite createContents(Composite parent); } }
true
true
public Composite pathwaysContainingGene(Composite parent) { SearchOptionComposite comp = new SearchOptionComposite(parent, SWT.NULL) { void select() { pathwaySearchStack.topControl = this; } public Composite createContents(Composite parent) { setLayout(new GridLayout(3, false)); GridData span2cols = new GridData(GridData.FILL_HORIZONTAL); span2cols.horizontalSpan = 2; Label idLabel = new Label(parent, SWT.CENTER); idLabel.setText("Gene id:"); final Text idText = new Text(parent, SWT.SINGLE | SWT.BORDER); idText.setLayoutData(span2cols); Label systemLabel = new Label(parent, SWT.CENTER); systemLabel.setText("Id system:"); final Combo systemCombo = new Combo(parent, SWT.SINGLE | SWT.READ_ONLY); final String[] datasources = DataSource.getFullNames().toArray(new String[0]); systemCombo.setItems(datasources); systemCombo.setLayoutData(span2cols); Label dirLabel = new Label(parent, SWT.CENTER); dirLabel.setText("Directory to search:"); final Text dirText = createDirText(parent); createDirButton(parent, dirText); Button searchButton = new Button(parent, SWT.PUSH); searchButton.setText("Search"); GridData span3cols = new GridData(GridData.HORIZONTAL_ALIGN_END); span3cols.horizontalSpan = 3; searchButton.setLayoutData(span3cols); searchButton.addSelectionListener(new SelectionAdapter() { public void widgetSelected(SelectionEvent e) { String id = idText.getText(); int codeIndex = systemCombo.getSelectionIndex(); DataSource ds = codeIndex == -1 ? null : DataSource.getByFullName (datasources[codeIndex]); Xref ref = new Xref (id, ds); String folder = dirText.getText(); if(id.equals("") || ds.equals("") || folder.equals("")) { MessageDialog.openError(getShell(), "error", "please specify id, code and pathway folder"); return; } SearchRunnableWithProgress srwp = new SearchRunnableWithProgress( "pathwaysContainingGeneID", new Class[] { Xref.class, File.class, SearchResultTable.class, SearchRunnableWithProgress.class }); SearchRunnableWithProgress.setMonitorInfo("Searching", (int)SearchMethods.TOTAL_WORK); srwp.setArgs(new Object[] {ref, new File(folder), searchResultTable, srwp }); ProgressMonitorDialog dialog = new ProgressMonitorDialog(getShell()); try { dialog.run(true, true, srwp); } catch(Exception ex) { MessageDialog.openError(getShell(), "Error", "Unable to perform search: " + ex.getMessage()); return; } } }); //Add controls to hash to enable preset values name2Control.put("idText", idText); name2Control.put("systemCombo", systemCombo); name2Control.put("dirText", dirText); return parent; } }; searchControls.put("pathwaysContainingGene", comp); //Add to available search options return comp; }
public Composite pathwaysContainingGene(Composite parent) { SearchOptionComposite comp = new SearchOptionComposite(parent, SWT.NULL) { void select() { pathwaySearchStack.topControl = this; } public Composite createContents(Composite parent) { setLayout(new GridLayout(3, false)); GridData span2cols = new GridData(GridData.FILL_HORIZONTAL); span2cols.horizontalSpan = 2; Label idLabel = new Label(parent, SWT.CENTER); idLabel.setText("Gene id:"); final Text idText = new Text(parent, SWT.SINGLE | SWT.BORDER); idText.setLayoutData(span2cols); Label systemLabel = new Label(parent, SWT.CENTER); systemLabel.setText("Id system:"); final Combo systemCombo = new Combo(parent, SWT.SINGLE | SWT.READ_ONLY); final String[] datasources = DataSource.getFullNames().toArray(new String[0]); for(int i = 0; i < datasources.length; i++) { if(datasources[i] == null) datasources[i] = ""; } systemCombo.setItems(datasources); systemCombo.setLayoutData(span2cols); Label dirLabel = new Label(parent, SWT.CENTER); dirLabel.setText("Directory to search:"); final Text dirText = createDirText(parent); createDirButton(parent, dirText); Button searchButton = new Button(parent, SWT.PUSH); searchButton.setText("Search"); GridData span3cols = new GridData(GridData.HORIZONTAL_ALIGN_END); span3cols.horizontalSpan = 3; searchButton.setLayoutData(span3cols); searchButton.addSelectionListener(new SelectionAdapter() { public void widgetSelected(SelectionEvent e) { String id = idText.getText(); int codeIndex = systemCombo.getSelectionIndex(); DataSource ds = codeIndex == -1 ? null : DataSource.getByFullName (datasources[codeIndex]); Xref ref = new Xref (id, ds); String folder = dirText.getText(); if(id.equals("") || ds.equals("") || folder.equals("")) { MessageDialog.openError(getShell(), "error", "please specify id, code and pathway folder"); return; } SearchRunnableWithProgress srwp = new SearchRunnableWithProgress( "pathwaysContainingGeneID", new Class[] { Xref.class, File.class, SearchResultTable.class, SearchRunnableWithProgress.class }); SearchRunnableWithProgress.setMonitorInfo("Searching", (int)SearchMethods.TOTAL_WORK); srwp.setArgs(new Object[] {ref, new File(folder), searchResultTable, srwp }); ProgressMonitorDialog dialog = new ProgressMonitorDialog(getShell()); try { dialog.run(true, true, srwp); } catch(Exception ex) { MessageDialog.openError(getShell(), "Error", "Unable to perform search: " + ex.getMessage()); return; } } }); //Add controls to hash to enable preset values name2Control.put("idText", idText); name2Control.put("systemCombo", systemCombo); name2Control.put("dirText", dirText); return parent; } }; searchControls.put("pathwaysContainingGene", comp); //Add to available search options return comp; }
diff --git a/src/main/java/ca/krasnay/sqlbuilder/PostgresqlDialect.java b/src/main/java/ca/krasnay/sqlbuilder/PostgresqlDialect.java index e23da3d..48be1a6 100644 --- a/src/main/java/ca/krasnay/sqlbuilder/PostgresqlDialect.java +++ b/src/main/java/ca/krasnay/sqlbuilder/PostgresqlDialect.java @@ -1,18 +1,18 @@ package ca.krasnay.sqlbuilder; /** * Dialect for PostgreSQL. * * @author John Krasnay <[email protected]> */ public class PostgresqlDialect implements Dialect { public String createCountSelect(String sql) { - return "select count(*) from (" + sql + ")"; + return "select count(*) from (" + sql + ") a"; } public String createPageSelect(String sql, int limit, int offset) { return String.format("%s limit %d offset %d", sql, limit, offset); } }
true
true
public String createCountSelect(String sql) { return "select count(*) from (" + sql + ")"; }
public String createCountSelect(String sql) { return "select count(*) from (" + sql + ") a"; }
diff --git a/swop/src/simplegui/objectron/UserInterface.java b/swop/src/simplegui/objectron/UserInterface.java index 85cef00..dc0a9b1 100644 --- a/swop/src/simplegui/objectron/UserInterface.java +++ b/swop/src/simplegui/objectron/UserInterface.java @@ -1,741 +1,741 @@ package simplegui.objectron; import effect.Effect; import effect.IdentityDiskEffect; import effect.LightGrenadeEffect; import effect.PowerFailureLightGrenadeEffect; import exception.*; import game.*; import grid.*; import item.*; import java.awt.Color; import java.awt.Font; import java.awt.Graphics2D; import java.awt.Image; import java.awt.RenderingHints; import java.util.ArrayList; import javax.swing.JOptionPane; import controller.GameController; import coverage.*; import simplegui.Button; import simplegui.SimpleGUI; public class UserInterface { public final static int DEFAULT_WIDTH_HEIGHT = 10; public static void main(String[] args) { // All code that accesses the simple GUI must run in the AWT event // handling thread. // A simple way to achieve this is to run the entire application in the // AWT event handling thread. // This is done by simply wrapping the body of the main method in a call // of EventQueue.invokeLater. java.awt.EventQueue.invokeLater(new Runnable() { Image playerRed; Image playerBlue; Image cell; Image pf; Image finishRed; Image finishBlue; Image lightGrenade; Image wall; Image lightTrailRed; Image identityDisk; Image chargedIdentityDisk; Image teleporter; Image lightTrailBlue; Image redIndicator; Image blueIndicator; Image marker; GameController game; long startTime; long endTime; int width; int height; boolean quit; // Initial message. String systemMessage; // Game User Interface. SimpleGUI gui; Button pickUp; Button use; Button endTurn; Button startNewGame; Button e; Button w; Button n; Button nE; Button nW; Button s; Button sE; Button sW; Button[] buttons; /** * Disable all buttons except start new game */ public void disableButtons(){ for(Button b: buttons){ b.setEnabled(false); } startNewGame.setEnabled(true); } /** * Enable all buttons */ public void enableButtons(){ for(Button b: buttons){ b.setEnabled(true); } } public String getStats(){ String time = "Time: "+((endTime - startTime)/1000.0) + " s"; return time; } /** * Test if the game has ended and show the winner if that's the case. */ public void checkEnd(){ if(game.isGameEnded()){ endTime = System.currentTimeMillis(); String winMessage = game.getWinnerColour()+" has won the game!"; String stats = getStats(); JOptionPane.showMessageDialog(null, winMessage+"\n"+stats); systemMessage = "The game has ended. "+winMessage; disableButtons(); gui.repaint(); } } /** * Move the current player in the given direction * @param direction The direction to move */ public void move(Direction direction){ try { systemMessage = "You moved "+direction.toString()+". "; systemMessage += getStepOnMessage(direction); if(game.getCurrentActionsLeft() == 1){ systemMessage +="Turn switched."; } game.getMoveController().move(direction); // systemMessage += "You have " + gameFacade.getActionsLeft()+ " actions left."; } catch (InvalidMoveException e) { systemMessage = "Move forbidden!"; } gui.repaint(); checkEnd(); } private String getStepOnMessage(Direction dir) { String message = ""; Square stepOnSquare; try { stepOnSquare = game.getMoveController().getCurrentPlayerLocation().getNeighbour(dir); } catch (OutsideTheGridException e) { return message; } Item[] items = stepOnSquare.getItems(); if(!stepOnSquare.hasPowerFailure()){ if(items.length > 0){ for(Item i: stepOnSquare.getItems()){ if(i instanceof LightGrenade){ LightGrenade lg = (LightGrenade) i; LightGrenadeState lgState = lg.getState(); if(lgState instanceof ActiveLightGrenade){ message+= "You stepped on a light grenade," + "losing "+ LightGrenadeEffect.DAMAGE +" actions. "; } } else if(i instanceof Teleporter) { message += "You stepped on a teleporter and were teleported."; } else if(i instanceof IdentityDisk) { for(Effect e: stepOnSquare.getEffects()) { // There is an identity disk effect on the square if(e instanceof IdentityDiskEffect) { message += "You were shot by an identity disk, losing " + IdentityDiskEffect.DAMAGE + " actions. "; } } } else { throw new UnsupportedOperationException("Not yet implemented."); } } } return message; } if(stepOnSquare.hasPowerFailure()){ message += "You stepped on a square with a power failure. "; if(items.length == 0){ message += "The turn goes to the next player. "; } else{ for(Item i: stepOnSquare.getItems()){ message+= "You stepped on a "+i.toString()+". "; if(i instanceof LightGrenade){ LightGrenade lg = (LightGrenade) i; LightGrenadeState lgState =lg.getState(); if(lgState instanceof ActiveLightGrenade){ message+= "You lose "+PowerFailureLightGrenadeEffect.DAMAGE+" actions. "; } else{ message += "The turn goes to the next player."; } } } } } return message; } public void showMainMenu(){ //show the main menu Object[] options = {"Start game with default dimensions","Start custom game", "Quit"}; int answer = JOptionPane.showOptionDialog(null, "Do you want to play?", "Objectron", JOptionPane.YES_NO_OPTION, JOptionPane.QUESTION_MESSAGE, null, //do not use a custom Icon options, //the titles of buttons null); //default button title //default width and height width = DEFAULT_WIDTH_HEIGHT; height = DEFAULT_WIDTH_HEIGHT; //ask for the width and height of the grid if(answer == 0 || answer == 1){ if(answer == 1){ while (true) { String message; message = "Choose the width of the grid."; while (true) { try { width = Integer.parseInt(JOptionPane .showInputDialog(message)); break; } catch (NumberFormatException e1) { message += "\nPlease input an integer."; continue; } } message = "Choose the height of the grid."; while (true) { try { height = Integer.parseInt(JOptionPane .showInputDialog(message)); break; } catch (NumberFormatException e1) { message += "\nPlease input an integer."; continue; } } try { game.startNewGame(width,height); systemMessage = "Welcome! Objectron starts when "+game.getCurrentPlayerColour()+" performs his first action."; if(gui!=null){ gui.resize( 40 * width, 40 * (height + 4)); enableButtons(); } break; } catch (InvalidDimensionException e1) { JOptionPane.showMessageDialog(null, "The given dimensions are invalid."); continue; } } } else if(answer == 0){ try { game.startNewGame(width,height); systemMessage = "Welcome! Objectron starts when "+game.getCurrentPlayerColour()+" performs his first action."; if(gui!=null){ gui.resize( 40 * width, 40 * (height + 4)); enableButtons(); } } catch (InvalidDimensionException e1) { // Cannot happen e1.printStackTrace(); } } } else{ if(gui!=null) gui.quit(); quit = true; } } public void run() { game = new GameController(); showMainMenu(); if(quit){ return; } gui = new SimpleGUI("Objectron", 40 * width, 40 * (height + 4)) { @Override public void paint(Graphics2D graphics) { //activate anti aliasing graphics.setRenderingHint (RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON); // Draw the squares. for (int i = 1; i < width + 1; i++) { for (int j = 1; j < height + 1; j++) { try { Square square = game.getSquareAtCoordinate(i, j); if(game.isStartingSquare(square)){ // Draw finish square. for(Player p:game.getPlayers()){ if(square.equals(p.getStartingPosition())){ if(p.getPlayerColour() == PlayerColour.RED){ graphics.drawImage(finishBlue, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ graphics.drawImage(finishRed, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } } else if (square.hasPowerFailure()) { graphics.drawImage(pf, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ // Draw normal square. graphics.drawImage(cell, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } if (square.marker) { graphics.drawImage(marker, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } // Set the obstacles. if (square.hasObstacle()) { Obstacle obstacle = square.getObstacle(); // Walls if (obstacle instanceof Wall) { graphics.drawImage(wall, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else if (square.getObstacle() instanceof LightTrail){ LightTrail lightTrail = (LightTrail) square.getObstacle(); if(lightTrail.getPlayer().getPlayerColour() == PlayerColour.RED){ graphics.drawImage(lightTrailRed, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ graphics.drawImage(lightTrailBlue, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } // Set the items. if (square.getNbItems()>0) { for (Item item: square.getItems()){ if (item instanceof LightGrenade) { if (((LightGrenade) item).getState().isVisible()) { graphics.drawImage(lightGrenade, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } else if (item instanceof IdentityDisk){ if(item instanceof ChargedIdentityDisk){ graphics.drawImage(chargedIdentityDisk, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ graphics.drawImage(identityDisk, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } else if (item instanceof Teleporter){ graphics.drawImage(teleporter, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } // Set the players. if (square.hasPlayer()) { Player player = square.getPlayer(); if (player.getPlayerColour() == PlayerColour.RED) { // Draw the player. graphics.drawImage(playerRed, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } if (player.getPlayerColour() == PlayerColour.BLUE) { // Draw the player. graphics.drawImage(playerBlue, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } catch (OutsideTheGridException e) { // Can not occur because of bounded i and j. } } } // draw the colour of the current player if (game.getCurrentPlayerColour() == PlayerColour.RED){ graphics.drawImage(redIndicator, 40, 40, 40, 40, null); } else{ graphics.drawImage(blueIndicator, 40, 40, 40, 40, null); } graphics.drawString(systemMessage, 5, 145); Font font1 = new Font("SansSerif", Font.BOLD, 30); graphics.setFont(font1); graphics.setColor(Color.WHITE); //draw the number of actions left graphics.drawString(""+game.getCurrentActionsLeft(), 53, 71); } }; cell = gui.loadImage("simplegui/objectron/cell.png", 40, 40); pf = gui.loadImage("simplegui/objectron/cellpf.png", 40, 40); wall = gui.loadImage("simplegui/objectron/wall.png", 40, 40); lightGrenade = gui.loadImage("simplegui/objectron/lightgrenade.png", 40, 40); playerRed = gui.loadImage("simplegui/objectron/player_red.png",40, 40); playerBlue = gui.loadImage("simplegui/objectron/player_blue.png", 40, 40); finishBlue = gui.loadImage("simplegui/objectron/cell_finish_blue.png", 40, 40); finishRed = gui.loadImage("simplegui/objectron/cell_finish_red.png", 40, 40); identityDisk = gui.loadImage("simplegui/objectron/idDisk.png", 40, 40); - chargedIdentityDisk = gui.loadImage("simplegui/objectron/chargedIdDisk.png", 40, 40); + chargedIdentityDisk = gui.loadImage("simplegui/objectron/chargedidDisk.png", 40, 40); teleporter = gui.loadImage("simplegui/objectron/teleporter.png", 40, 40); lightTrailBlue = gui.loadImage("simplegui/objectron/cell_lighttrail_blue.png", 40, 40); lightTrailRed = gui.loadImage("simplegui/objectron/cell_lighttrail_red.png", 40, 40); redIndicator = gui.loadImage("simplegui/objectron/red.png", 40, 40); blueIndicator = gui.loadImage("simplegui/objectron/blue.png", 40, 40); marker = gui.loadImage("simplegui/objectron/marker.png", 40, 40); /** * Button to pick up an item. */ pickUp = gui.createButton(120, 0, 120, 40, new Runnable() { public void run() { try { PortableItem[] items = game.getPickUpItemController().getVisiblePortableItemsAtCurrentLocation(); if(items.length == 0) throw new NoItemException(); PortableItem item = (PortableItem)JOptionPane.showInputDialog( null, "Choose an item:", null, JOptionPane.PLAIN_MESSAGE, null, items, null); game.getPickUpItemController().pickUpItem(item); systemMessage = "You picked up a "+item.toString() + "."; } catch (NoItemException e) { systemMessage = "There are no items on this square! Please perform another action."; } catch (OverCapacityException e){ systemMessage = "There is no place left in your inventory. Please perform another action."; } gui.repaint(); checkEnd(); } }); pickUp.setText("Pick Up"); /** * Button to use an item. */ use = gui.createButton(120, 40, 120, 40, new Runnable() { public void run() { try { PortableItem[] items = game.getUseItemController().getItemsInInventory(); if(items.length == 0) throw new NoItemException(); PortableItem item = (PortableItem)JOptionPane.showInputDialog( null, "Choose an item:", null, JOptionPane.PLAIN_MESSAGE, null, items, null); if(!(item instanceof IdentityDisk)) game.getUseItemController().useItem(item); else{ Direction direction = (Direction)JOptionPane.showInputDialog( null, "Choose a direction:", null, JOptionPane.PLAIN_MESSAGE, null, IdentityDisk.getPossibleDirections(), null); game.getUseItemController().useItem((IdentityDisk)item,direction); } systemMessage = "You used a "+item.toString() + ". "; } catch (NoItemException e) { systemMessage = "You have no items in your inventory! Please perform another action."; } catch(InvalidDirectionException e){ systemMessage = "This is not a valid direction."; } gui.repaint(); checkEnd(); } }); use.setText("Use Item"); /** * Button to end the turn. */ endTurn = gui.createButton(120, 80, 120, 40, new Runnable() { public void run() { if(!game.getEndTurnController().hasMoved()){ int answer = JOptionPane.showConfirmDialog(null, "You will lose if you did not yet move. Continue?", null, JOptionPane.YES_NO_OPTION); if(answer==0){ game.getEndTurnController().endTurn(); } } else{ int answer = JOptionPane.showConfirmDialog(null, "Are you sure you want to end this turn?", null, JOptionPane.YES_NO_OPTION); if(answer==0){ game.getEndTurnController().endTurn(); systemMessage = game.getCurrentPlayerColour() +" ended his turn." + " Turn switched."; } } gui.repaint(); checkEnd(); } }); endTurn.setText("End Turn"); /** * Start new game. */ startNewGame = gui.createButton(240, 0, 120, 40, new Runnable() { public void run() { showMainMenu(); } }); startNewGame.setText("New game"); /** * Moving East. */ e = gui.createButton(80, 40, 40, 40, new Runnable() { public void run() { move(Direction.EAST); } }); e.setImage(gui.loadImage("simplegui/objectron/arrow_E.png", 40, 40)); /** * Moving West. */ w = gui.createButton(0, 40, 40, 40, new Runnable() { public void run() { move(Direction.WEST); } }); w.setImage(gui.loadImage("simplegui/objectron/arrow_W.png", 40, 40)); /** * Moving South. */ s = gui.createButton(40, 80, 40, 40, new Runnable() { public void run() { move(Direction.SOUTH); } }); s.setImage(gui.loadImage("simplegui/objectron/arrow_S.png", 40, 40)); /** * Moving South West. */ sW = gui.createButton(0, 80, 40, 40, new Runnable() { public void run() { move(Direction.SOUTHWEST); } }); sW.setImage(gui.loadImage("simplegui/objectron/arrow_SW.png", 40, 40)); /** * Moving South East. */ sE = gui.createButton(80, 80, 40, 40, new Runnable() { public void run() { move(Direction.SOUTHEAST); } }); sE.setImage(gui.loadImage("simplegui/objectron/arrow_SE.png", 40, 40)); /** * Moving North. */ n = gui.createButton(40, 0, 40, 40, new Runnable() { public void run() { move(Direction.NORTH); } }); n.setImage(gui.loadImage("simplegui/objectron/arrow_N.png", 40, 40)); /** * Moving North East. */ nE = gui.createButton(80, 0, 40, 40, new Runnable() { public void run() { move(Direction.NORTHEAST); } }); nE.setImage(gui.loadImage("simplegui/objectron/arrow_NE.png", 40, 40)); /** * Moving North West. */ nW = gui.createButton(0, 0, 40, 40, new Runnable() { public void run() { move(Direction.NORTHWEST); } }); nW.setImage(gui.loadImage("simplegui/objectron/arrow_NW.png", 40, 40)); ArrayList<Button> buttonList = new ArrayList<Button>(); buttonList.add(pickUp); buttonList.add(use); buttonList.add(endTurn); buttonList.add(startNewGame); buttonList.add(e); buttonList.add(w); buttonList.add(n); buttonList.add(s); buttonList.add(nE); buttonList.add(nW); buttonList.add(sE); buttonList.add(sW); buttons = buttonList.toArray(new Button[buttonList.size()]); startTime = System.currentTimeMillis(); } }); } }
true
true
public static void main(String[] args) { // All code that accesses the simple GUI must run in the AWT event // handling thread. // A simple way to achieve this is to run the entire application in the // AWT event handling thread. // This is done by simply wrapping the body of the main method in a call // of EventQueue.invokeLater. java.awt.EventQueue.invokeLater(new Runnable() { Image playerRed; Image playerBlue; Image cell; Image pf; Image finishRed; Image finishBlue; Image lightGrenade; Image wall; Image lightTrailRed; Image identityDisk; Image chargedIdentityDisk; Image teleporter; Image lightTrailBlue; Image redIndicator; Image blueIndicator; Image marker; GameController game; long startTime; long endTime; int width; int height; boolean quit; // Initial message. String systemMessage; // Game User Interface. SimpleGUI gui; Button pickUp; Button use; Button endTurn; Button startNewGame; Button e; Button w; Button n; Button nE; Button nW; Button s; Button sE; Button sW; Button[] buttons; /** * Disable all buttons except start new game */ public void disableButtons(){ for(Button b: buttons){ b.setEnabled(false); } startNewGame.setEnabled(true); } /** * Enable all buttons */ public void enableButtons(){ for(Button b: buttons){ b.setEnabled(true); } } public String getStats(){ String time = "Time: "+((endTime - startTime)/1000.0) + " s"; return time; } /** * Test if the game has ended and show the winner if that's the case. */ public void checkEnd(){ if(game.isGameEnded()){ endTime = System.currentTimeMillis(); String winMessage = game.getWinnerColour()+" has won the game!"; String stats = getStats(); JOptionPane.showMessageDialog(null, winMessage+"\n"+stats); systemMessage = "The game has ended. "+winMessage; disableButtons(); gui.repaint(); } } /** * Move the current player in the given direction * @param direction The direction to move */ public void move(Direction direction){ try { systemMessage = "You moved "+direction.toString()+". "; systemMessage += getStepOnMessage(direction); if(game.getCurrentActionsLeft() == 1){ systemMessage +="Turn switched."; } game.getMoveController().move(direction); // systemMessage += "You have " + gameFacade.getActionsLeft()+ " actions left."; } catch (InvalidMoveException e) { systemMessage = "Move forbidden!"; } gui.repaint(); checkEnd(); } private String getStepOnMessage(Direction dir) { String message = ""; Square stepOnSquare; try { stepOnSquare = game.getMoveController().getCurrentPlayerLocation().getNeighbour(dir); } catch (OutsideTheGridException e) { return message; } Item[] items = stepOnSquare.getItems(); if(!stepOnSquare.hasPowerFailure()){ if(items.length > 0){ for(Item i: stepOnSquare.getItems()){ if(i instanceof LightGrenade){ LightGrenade lg = (LightGrenade) i; LightGrenadeState lgState = lg.getState(); if(lgState instanceof ActiveLightGrenade){ message+= "You stepped on a light grenade," + "losing "+ LightGrenadeEffect.DAMAGE +" actions. "; } } else if(i instanceof Teleporter) { message += "You stepped on a teleporter and were teleported."; } else if(i instanceof IdentityDisk) { for(Effect e: stepOnSquare.getEffects()) { // There is an identity disk effect on the square if(e instanceof IdentityDiskEffect) { message += "You were shot by an identity disk, losing " + IdentityDiskEffect.DAMAGE + " actions. "; } } } else { throw new UnsupportedOperationException("Not yet implemented."); } } } return message; } if(stepOnSquare.hasPowerFailure()){ message += "You stepped on a square with a power failure. "; if(items.length == 0){ message += "The turn goes to the next player. "; } else{ for(Item i: stepOnSquare.getItems()){ message+= "You stepped on a "+i.toString()+". "; if(i instanceof LightGrenade){ LightGrenade lg = (LightGrenade) i; LightGrenadeState lgState =lg.getState(); if(lgState instanceof ActiveLightGrenade){ message+= "You lose "+PowerFailureLightGrenadeEffect.DAMAGE+" actions. "; } else{ message += "The turn goes to the next player."; } } } } } return message; } public void showMainMenu(){ //show the main menu Object[] options = {"Start game with default dimensions","Start custom game", "Quit"}; int answer = JOptionPane.showOptionDialog(null, "Do you want to play?", "Objectron", JOptionPane.YES_NO_OPTION, JOptionPane.QUESTION_MESSAGE, null, //do not use a custom Icon options, //the titles of buttons null); //default button title //default width and height width = DEFAULT_WIDTH_HEIGHT; height = DEFAULT_WIDTH_HEIGHT; //ask for the width and height of the grid if(answer == 0 || answer == 1){ if(answer == 1){ while (true) { String message; message = "Choose the width of the grid."; while (true) { try { width = Integer.parseInt(JOptionPane .showInputDialog(message)); break; } catch (NumberFormatException e1) { message += "\nPlease input an integer."; continue; } } message = "Choose the height of the grid."; while (true) { try { height = Integer.parseInt(JOptionPane .showInputDialog(message)); break; } catch (NumberFormatException e1) { message += "\nPlease input an integer."; continue; } } try { game.startNewGame(width,height); systemMessage = "Welcome! Objectron starts when "+game.getCurrentPlayerColour()+" performs his first action."; if(gui!=null){ gui.resize( 40 * width, 40 * (height + 4)); enableButtons(); } break; } catch (InvalidDimensionException e1) { JOptionPane.showMessageDialog(null, "The given dimensions are invalid."); continue; } } } else if(answer == 0){ try { game.startNewGame(width,height); systemMessage = "Welcome! Objectron starts when "+game.getCurrentPlayerColour()+" performs his first action."; if(gui!=null){ gui.resize( 40 * width, 40 * (height + 4)); enableButtons(); } } catch (InvalidDimensionException e1) { // Cannot happen e1.printStackTrace(); } } } else{ if(gui!=null) gui.quit(); quit = true; } } public void run() { game = new GameController(); showMainMenu(); if(quit){ return; } gui = new SimpleGUI("Objectron", 40 * width, 40 * (height + 4)) { @Override public void paint(Graphics2D graphics) { //activate anti aliasing graphics.setRenderingHint (RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON); // Draw the squares. for (int i = 1; i < width + 1; i++) { for (int j = 1; j < height + 1; j++) { try { Square square = game.getSquareAtCoordinate(i, j); if(game.isStartingSquare(square)){ // Draw finish square. for(Player p:game.getPlayers()){ if(square.equals(p.getStartingPosition())){ if(p.getPlayerColour() == PlayerColour.RED){ graphics.drawImage(finishBlue, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ graphics.drawImage(finishRed, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } } else if (square.hasPowerFailure()) { graphics.drawImage(pf, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ // Draw normal square. graphics.drawImage(cell, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } if (square.marker) { graphics.drawImage(marker, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } // Set the obstacles. if (square.hasObstacle()) { Obstacle obstacle = square.getObstacle(); // Walls if (obstacle instanceof Wall) { graphics.drawImage(wall, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else if (square.getObstacle() instanceof LightTrail){ LightTrail lightTrail = (LightTrail) square.getObstacle(); if(lightTrail.getPlayer().getPlayerColour() == PlayerColour.RED){ graphics.drawImage(lightTrailRed, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ graphics.drawImage(lightTrailBlue, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } // Set the items. if (square.getNbItems()>0) { for (Item item: square.getItems()){ if (item instanceof LightGrenade) { if (((LightGrenade) item).getState().isVisible()) { graphics.drawImage(lightGrenade, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } else if (item instanceof IdentityDisk){ if(item instanceof ChargedIdentityDisk){ graphics.drawImage(chargedIdentityDisk, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ graphics.drawImage(identityDisk, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } else if (item instanceof Teleporter){ graphics.drawImage(teleporter, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } // Set the players. if (square.hasPlayer()) { Player player = square.getPlayer(); if (player.getPlayerColour() == PlayerColour.RED) { // Draw the player. graphics.drawImage(playerRed, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } if (player.getPlayerColour() == PlayerColour.BLUE) { // Draw the player. graphics.drawImage(playerBlue, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } catch (OutsideTheGridException e) { // Can not occur because of bounded i and j. } } } // draw the colour of the current player if (game.getCurrentPlayerColour() == PlayerColour.RED){ graphics.drawImage(redIndicator, 40, 40, 40, 40, null); } else{ graphics.drawImage(blueIndicator, 40, 40, 40, 40, null); } graphics.drawString(systemMessage, 5, 145); Font font1 = new Font("SansSerif", Font.BOLD, 30); graphics.setFont(font1); graphics.setColor(Color.WHITE); //draw the number of actions left graphics.drawString(""+game.getCurrentActionsLeft(), 53, 71); } }; cell = gui.loadImage("simplegui/objectron/cell.png", 40, 40); pf = gui.loadImage("simplegui/objectron/cellpf.png", 40, 40); wall = gui.loadImage("simplegui/objectron/wall.png", 40, 40); lightGrenade = gui.loadImage("simplegui/objectron/lightgrenade.png", 40, 40); playerRed = gui.loadImage("simplegui/objectron/player_red.png",40, 40); playerBlue = gui.loadImage("simplegui/objectron/player_blue.png", 40, 40); finishBlue = gui.loadImage("simplegui/objectron/cell_finish_blue.png", 40, 40); finishRed = gui.loadImage("simplegui/objectron/cell_finish_red.png", 40, 40); identityDisk = gui.loadImage("simplegui/objectron/idDisk.png", 40, 40); chargedIdentityDisk = gui.loadImage("simplegui/objectron/chargedIdDisk.png", 40, 40); teleporter = gui.loadImage("simplegui/objectron/teleporter.png", 40, 40); lightTrailBlue = gui.loadImage("simplegui/objectron/cell_lighttrail_blue.png", 40, 40); lightTrailRed = gui.loadImage("simplegui/objectron/cell_lighttrail_red.png", 40, 40); redIndicator = gui.loadImage("simplegui/objectron/red.png", 40, 40); blueIndicator = gui.loadImage("simplegui/objectron/blue.png", 40, 40); marker = gui.loadImage("simplegui/objectron/marker.png", 40, 40); /** * Button to pick up an item. */ pickUp = gui.createButton(120, 0, 120, 40, new Runnable() { public void run() { try { PortableItem[] items = game.getPickUpItemController().getVisiblePortableItemsAtCurrentLocation(); if(items.length == 0) throw new NoItemException(); PortableItem item = (PortableItem)JOptionPane.showInputDialog( null, "Choose an item:", null, JOptionPane.PLAIN_MESSAGE, null, items, null); game.getPickUpItemController().pickUpItem(item); systemMessage = "You picked up a "+item.toString() + "."; } catch (NoItemException e) { systemMessage = "There are no items on this square! Please perform another action."; } catch (OverCapacityException e){ systemMessage = "There is no place left in your inventory. Please perform another action."; } gui.repaint(); checkEnd(); } }); pickUp.setText("Pick Up"); /** * Button to use an item. */ use = gui.createButton(120, 40, 120, 40, new Runnable() { public void run() { try { PortableItem[] items = game.getUseItemController().getItemsInInventory(); if(items.length == 0) throw new NoItemException(); PortableItem item = (PortableItem)JOptionPane.showInputDialog( null, "Choose an item:", null, JOptionPane.PLAIN_MESSAGE, null, items, null); if(!(item instanceof IdentityDisk)) game.getUseItemController().useItem(item); else{ Direction direction = (Direction)JOptionPane.showInputDialog( null, "Choose a direction:", null, JOptionPane.PLAIN_MESSAGE, null, IdentityDisk.getPossibleDirections(), null); game.getUseItemController().useItem((IdentityDisk)item,direction); } systemMessage = "You used a "+item.toString() + ". "; } catch (NoItemException e) { systemMessage = "You have no items in your inventory! Please perform another action."; } catch(InvalidDirectionException e){ systemMessage = "This is not a valid direction."; } gui.repaint(); checkEnd(); } }); use.setText("Use Item"); /** * Button to end the turn. */ endTurn = gui.createButton(120, 80, 120, 40, new Runnable() { public void run() { if(!game.getEndTurnController().hasMoved()){ int answer = JOptionPane.showConfirmDialog(null, "You will lose if you did not yet move. Continue?", null, JOptionPane.YES_NO_OPTION); if(answer==0){ game.getEndTurnController().endTurn(); } } else{ int answer = JOptionPane.showConfirmDialog(null, "Are you sure you want to end this turn?", null, JOptionPane.YES_NO_OPTION); if(answer==0){ game.getEndTurnController().endTurn(); systemMessage = game.getCurrentPlayerColour() +" ended his turn." + " Turn switched."; } } gui.repaint(); checkEnd(); } }); endTurn.setText("End Turn"); /** * Start new game. */ startNewGame = gui.createButton(240, 0, 120, 40, new Runnable() { public void run() { showMainMenu(); } }); startNewGame.setText("New game"); /** * Moving East. */ e = gui.createButton(80, 40, 40, 40, new Runnable() { public void run() { move(Direction.EAST); } }); e.setImage(gui.loadImage("simplegui/objectron/arrow_E.png", 40, 40)); /** * Moving West. */ w = gui.createButton(0, 40, 40, 40, new Runnable() { public void run() { move(Direction.WEST); } }); w.setImage(gui.loadImage("simplegui/objectron/arrow_W.png", 40, 40)); /** * Moving South. */ s = gui.createButton(40, 80, 40, 40, new Runnable() { public void run() { move(Direction.SOUTH); } }); s.setImage(gui.loadImage("simplegui/objectron/arrow_S.png", 40, 40)); /** * Moving South West. */ sW = gui.createButton(0, 80, 40, 40, new Runnable() { public void run() { move(Direction.SOUTHWEST); } }); sW.setImage(gui.loadImage("simplegui/objectron/arrow_SW.png", 40, 40)); /** * Moving South East. */ sE = gui.createButton(80, 80, 40, 40, new Runnable() { public void run() { move(Direction.SOUTHEAST); } }); sE.setImage(gui.loadImage("simplegui/objectron/arrow_SE.png", 40, 40)); /** * Moving North. */ n = gui.createButton(40, 0, 40, 40, new Runnable() { public void run() { move(Direction.NORTH); } }); n.setImage(gui.loadImage("simplegui/objectron/arrow_N.png", 40, 40)); /** * Moving North East. */ nE = gui.createButton(80, 0, 40, 40, new Runnable() { public void run() { move(Direction.NORTHEAST); } }); nE.setImage(gui.loadImage("simplegui/objectron/arrow_NE.png", 40, 40)); /** * Moving North West. */ nW = gui.createButton(0, 0, 40, 40, new Runnable() { public void run() { move(Direction.NORTHWEST); } }); nW.setImage(gui.loadImage("simplegui/objectron/arrow_NW.png", 40, 40)); ArrayList<Button> buttonList = new ArrayList<Button>(); buttonList.add(pickUp); buttonList.add(use); buttonList.add(endTurn); buttonList.add(startNewGame); buttonList.add(e); buttonList.add(w); buttonList.add(n); buttonList.add(s); buttonList.add(nE); buttonList.add(nW); buttonList.add(sE); buttonList.add(sW); buttons = buttonList.toArray(new Button[buttonList.size()]); startTime = System.currentTimeMillis(); } }); }
public static void main(String[] args) { // All code that accesses the simple GUI must run in the AWT event // handling thread. // A simple way to achieve this is to run the entire application in the // AWT event handling thread. // This is done by simply wrapping the body of the main method in a call // of EventQueue.invokeLater. java.awt.EventQueue.invokeLater(new Runnable() { Image playerRed; Image playerBlue; Image cell; Image pf; Image finishRed; Image finishBlue; Image lightGrenade; Image wall; Image lightTrailRed; Image identityDisk; Image chargedIdentityDisk; Image teleporter; Image lightTrailBlue; Image redIndicator; Image blueIndicator; Image marker; GameController game; long startTime; long endTime; int width; int height; boolean quit; // Initial message. String systemMessage; // Game User Interface. SimpleGUI gui; Button pickUp; Button use; Button endTurn; Button startNewGame; Button e; Button w; Button n; Button nE; Button nW; Button s; Button sE; Button sW; Button[] buttons; /** * Disable all buttons except start new game */ public void disableButtons(){ for(Button b: buttons){ b.setEnabled(false); } startNewGame.setEnabled(true); } /** * Enable all buttons */ public void enableButtons(){ for(Button b: buttons){ b.setEnabled(true); } } public String getStats(){ String time = "Time: "+((endTime - startTime)/1000.0) + " s"; return time; } /** * Test if the game has ended and show the winner if that's the case. */ public void checkEnd(){ if(game.isGameEnded()){ endTime = System.currentTimeMillis(); String winMessage = game.getWinnerColour()+" has won the game!"; String stats = getStats(); JOptionPane.showMessageDialog(null, winMessage+"\n"+stats); systemMessage = "The game has ended. "+winMessage; disableButtons(); gui.repaint(); } } /** * Move the current player in the given direction * @param direction The direction to move */ public void move(Direction direction){ try { systemMessage = "You moved "+direction.toString()+". "; systemMessage += getStepOnMessage(direction); if(game.getCurrentActionsLeft() == 1){ systemMessage +="Turn switched."; } game.getMoveController().move(direction); // systemMessage += "You have " + gameFacade.getActionsLeft()+ " actions left."; } catch (InvalidMoveException e) { systemMessage = "Move forbidden!"; } gui.repaint(); checkEnd(); } private String getStepOnMessage(Direction dir) { String message = ""; Square stepOnSquare; try { stepOnSquare = game.getMoveController().getCurrentPlayerLocation().getNeighbour(dir); } catch (OutsideTheGridException e) { return message; } Item[] items = stepOnSquare.getItems(); if(!stepOnSquare.hasPowerFailure()){ if(items.length > 0){ for(Item i: stepOnSquare.getItems()){ if(i instanceof LightGrenade){ LightGrenade lg = (LightGrenade) i; LightGrenadeState lgState = lg.getState(); if(lgState instanceof ActiveLightGrenade){ message+= "You stepped on a light grenade," + "losing "+ LightGrenadeEffect.DAMAGE +" actions. "; } } else if(i instanceof Teleporter) { message += "You stepped on a teleporter and were teleported."; } else if(i instanceof IdentityDisk) { for(Effect e: stepOnSquare.getEffects()) { // There is an identity disk effect on the square if(e instanceof IdentityDiskEffect) { message += "You were shot by an identity disk, losing " + IdentityDiskEffect.DAMAGE + " actions. "; } } } else { throw new UnsupportedOperationException("Not yet implemented."); } } } return message; } if(stepOnSquare.hasPowerFailure()){ message += "You stepped on a square with a power failure. "; if(items.length == 0){ message += "The turn goes to the next player. "; } else{ for(Item i: stepOnSquare.getItems()){ message+= "You stepped on a "+i.toString()+". "; if(i instanceof LightGrenade){ LightGrenade lg = (LightGrenade) i; LightGrenadeState lgState =lg.getState(); if(lgState instanceof ActiveLightGrenade){ message+= "You lose "+PowerFailureLightGrenadeEffect.DAMAGE+" actions. "; } else{ message += "The turn goes to the next player."; } } } } } return message; } public void showMainMenu(){ //show the main menu Object[] options = {"Start game with default dimensions","Start custom game", "Quit"}; int answer = JOptionPane.showOptionDialog(null, "Do you want to play?", "Objectron", JOptionPane.YES_NO_OPTION, JOptionPane.QUESTION_MESSAGE, null, //do not use a custom Icon options, //the titles of buttons null); //default button title //default width and height width = DEFAULT_WIDTH_HEIGHT; height = DEFAULT_WIDTH_HEIGHT; //ask for the width and height of the grid if(answer == 0 || answer == 1){ if(answer == 1){ while (true) { String message; message = "Choose the width of the grid."; while (true) { try { width = Integer.parseInt(JOptionPane .showInputDialog(message)); break; } catch (NumberFormatException e1) { message += "\nPlease input an integer."; continue; } } message = "Choose the height of the grid."; while (true) { try { height = Integer.parseInt(JOptionPane .showInputDialog(message)); break; } catch (NumberFormatException e1) { message += "\nPlease input an integer."; continue; } } try { game.startNewGame(width,height); systemMessage = "Welcome! Objectron starts when "+game.getCurrentPlayerColour()+" performs his first action."; if(gui!=null){ gui.resize( 40 * width, 40 * (height + 4)); enableButtons(); } break; } catch (InvalidDimensionException e1) { JOptionPane.showMessageDialog(null, "The given dimensions are invalid."); continue; } } } else if(answer == 0){ try { game.startNewGame(width,height); systemMessage = "Welcome! Objectron starts when "+game.getCurrentPlayerColour()+" performs his first action."; if(gui!=null){ gui.resize( 40 * width, 40 * (height + 4)); enableButtons(); } } catch (InvalidDimensionException e1) { // Cannot happen e1.printStackTrace(); } } } else{ if(gui!=null) gui.quit(); quit = true; } } public void run() { game = new GameController(); showMainMenu(); if(quit){ return; } gui = new SimpleGUI("Objectron", 40 * width, 40 * (height + 4)) { @Override public void paint(Graphics2D graphics) { //activate anti aliasing graphics.setRenderingHint (RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON); // Draw the squares. for (int i = 1; i < width + 1; i++) { for (int j = 1; j < height + 1; j++) { try { Square square = game.getSquareAtCoordinate(i, j); if(game.isStartingSquare(square)){ // Draw finish square. for(Player p:game.getPlayers()){ if(square.equals(p.getStartingPosition())){ if(p.getPlayerColour() == PlayerColour.RED){ graphics.drawImage(finishBlue, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ graphics.drawImage(finishRed, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } } else if (square.hasPowerFailure()) { graphics.drawImage(pf, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ // Draw normal square. graphics.drawImage(cell, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } if (square.marker) { graphics.drawImage(marker, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } // Set the obstacles. if (square.hasObstacle()) { Obstacle obstacle = square.getObstacle(); // Walls if (obstacle instanceof Wall) { graphics.drawImage(wall, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else if (square.getObstacle() instanceof LightTrail){ LightTrail lightTrail = (LightTrail) square.getObstacle(); if(lightTrail.getPlayer().getPlayerColour() == PlayerColour.RED){ graphics.drawImage(lightTrailRed, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ graphics.drawImage(lightTrailBlue, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } // Set the items. if (square.getNbItems()>0) { for (Item item: square.getItems()){ if (item instanceof LightGrenade) { if (((LightGrenade) item).getState().isVisible()) { graphics.drawImage(lightGrenade, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } else if (item instanceof IdentityDisk){ if(item instanceof ChargedIdentityDisk){ graphics.drawImage(chargedIdentityDisk, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } else{ graphics.drawImage(identityDisk, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } else if (item instanceof Teleporter){ graphics.drawImage(teleporter, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } // Set the players. if (square.hasPlayer()) { Player player = square.getPlayer(); if (player.getPlayerColour() == PlayerColour.RED) { // Draw the player. graphics.drawImage(playerRed, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } if (player.getPlayerColour() == PlayerColour.BLUE) { // Draw the player. graphics.drawImage(playerBlue, (i - 1) * 40, (height - j + 4) * 40, 39, 39, null); } } } catch (OutsideTheGridException e) { // Can not occur because of bounded i and j. } } } // draw the colour of the current player if (game.getCurrentPlayerColour() == PlayerColour.RED){ graphics.drawImage(redIndicator, 40, 40, 40, 40, null); } else{ graphics.drawImage(blueIndicator, 40, 40, 40, 40, null); } graphics.drawString(systemMessage, 5, 145); Font font1 = new Font("SansSerif", Font.BOLD, 30); graphics.setFont(font1); graphics.setColor(Color.WHITE); //draw the number of actions left graphics.drawString(""+game.getCurrentActionsLeft(), 53, 71); } }; cell = gui.loadImage("simplegui/objectron/cell.png", 40, 40); pf = gui.loadImage("simplegui/objectron/cellpf.png", 40, 40); wall = gui.loadImage("simplegui/objectron/wall.png", 40, 40); lightGrenade = gui.loadImage("simplegui/objectron/lightgrenade.png", 40, 40); playerRed = gui.loadImage("simplegui/objectron/player_red.png",40, 40); playerBlue = gui.loadImage("simplegui/objectron/player_blue.png", 40, 40); finishBlue = gui.loadImage("simplegui/objectron/cell_finish_blue.png", 40, 40); finishRed = gui.loadImage("simplegui/objectron/cell_finish_red.png", 40, 40); identityDisk = gui.loadImage("simplegui/objectron/idDisk.png", 40, 40); chargedIdentityDisk = gui.loadImage("simplegui/objectron/chargedidDisk.png", 40, 40); teleporter = gui.loadImage("simplegui/objectron/teleporter.png", 40, 40); lightTrailBlue = gui.loadImage("simplegui/objectron/cell_lighttrail_blue.png", 40, 40); lightTrailRed = gui.loadImage("simplegui/objectron/cell_lighttrail_red.png", 40, 40); redIndicator = gui.loadImage("simplegui/objectron/red.png", 40, 40); blueIndicator = gui.loadImage("simplegui/objectron/blue.png", 40, 40); marker = gui.loadImage("simplegui/objectron/marker.png", 40, 40); /** * Button to pick up an item. */ pickUp = gui.createButton(120, 0, 120, 40, new Runnable() { public void run() { try { PortableItem[] items = game.getPickUpItemController().getVisiblePortableItemsAtCurrentLocation(); if(items.length == 0) throw new NoItemException(); PortableItem item = (PortableItem)JOptionPane.showInputDialog( null, "Choose an item:", null, JOptionPane.PLAIN_MESSAGE, null, items, null); game.getPickUpItemController().pickUpItem(item); systemMessage = "You picked up a "+item.toString() + "."; } catch (NoItemException e) { systemMessage = "There are no items on this square! Please perform another action."; } catch (OverCapacityException e){ systemMessage = "There is no place left in your inventory. Please perform another action."; } gui.repaint(); checkEnd(); } }); pickUp.setText("Pick Up"); /** * Button to use an item. */ use = gui.createButton(120, 40, 120, 40, new Runnable() { public void run() { try { PortableItem[] items = game.getUseItemController().getItemsInInventory(); if(items.length == 0) throw new NoItemException(); PortableItem item = (PortableItem)JOptionPane.showInputDialog( null, "Choose an item:", null, JOptionPane.PLAIN_MESSAGE, null, items, null); if(!(item instanceof IdentityDisk)) game.getUseItemController().useItem(item); else{ Direction direction = (Direction)JOptionPane.showInputDialog( null, "Choose a direction:", null, JOptionPane.PLAIN_MESSAGE, null, IdentityDisk.getPossibleDirections(), null); game.getUseItemController().useItem((IdentityDisk)item,direction); } systemMessage = "You used a "+item.toString() + ". "; } catch (NoItemException e) { systemMessage = "You have no items in your inventory! Please perform another action."; } catch(InvalidDirectionException e){ systemMessage = "This is not a valid direction."; } gui.repaint(); checkEnd(); } }); use.setText("Use Item"); /** * Button to end the turn. */ endTurn = gui.createButton(120, 80, 120, 40, new Runnable() { public void run() { if(!game.getEndTurnController().hasMoved()){ int answer = JOptionPane.showConfirmDialog(null, "You will lose if you did not yet move. Continue?", null, JOptionPane.YES_NO_OPTION); if(answer==0){ game.getEndTurnController().endTurn(); } } else{ int answer = JOptionPane.showConfirmDialog(null, "Are you sure you want to end this turn?", null, JOptionPane.YES_NO_OPTION); if(answer==0){ game.getEndTurnController().endTurn(); systemMessage = game.getCurrentPlayerColour() +" ended his turn." + " Turn switched."; } } gui.repaint(); checkEnd(); } }); endTurn.setText("End Turn"); /** * Start new game. */ startNewGame = gui.createButton(240, 0, 120, 40, new Runnable() { public void run() { showMainMenu(); } }); startNewGame.setText("New game"); /** * Moving East. */ e = gui.createButton(80, 40, 40, 40, new Runnable() { public void run() { move(Direction.EAST); } }); e.setImage(gui.loadImage("simplegui/objectron/arrow_E.png", 40, 40)); /** * Moving West. */ w = gui.createButton(0, 40, 40, 40, new Runnable() { public void run() { move(Direction.WEST); } }); w.setImage(gui.loadImage("simplegui/objectron/arrow_W.png", 40, 40)); /** * Moving South. */ s = gui.createButton(40, 80, 40, 40, new Runnable() { public void run() { move(Direction.SOUTH); } }); s.setImage(gui.loadImage("simplegui/objectron/arrow_S.png", 40, 40)); /** * Moving South West. */ sW = gui.createButton(0, 80, 40, 40, new Runnable() { public void run() { move(Direction.SOUTHWEST); } }); sW.setImage(gui.loadImage("simplegui/objectron/arrow_SW.png", 40, 40)); /** * Moving South East. */ sE = gui.createButton(80, 80, 40, 40, new Runnable() { public void run() { move(Direction.SOUTHEAST); } }); sE.setImage(gui.loadImage("simplegui/objectron/arrow_SE.png", 40, 40)); /** * Moving North. */ n = gui.createButton(40, 0, 40, 40, new Runnable() { public void run() { move(Direction.NORTH); } }); n.setImage(gui.loadImage("simplegui/objectron/arrow_N.png", 40, 40)); /** * Moving North East. */ nE = gui.createButton(80, 0, 40, 40, new Runnable() { public void run() { move(Direction.NORTHEAST); } }); nE.setImage(gui.loadImage("simplegui/objectron/arrow_NE.png", 40, 40)); /** * Moving North West. */ nW = gui.createButton(0, 0, 40, 40, new Runnable() { public void run() { move(Direction.NORTHWEST); } }); nW.setImage(gui.loadImage("simplegui/objectron/arrow_NW.png", 40, 40)); ArrayList<Button> buttonList = new ArrayList<Button>(); buttonList.add(pickUp); buttonList.add(use); buttonList.add(endTurn); buttonList.add(startNewGame); buttonList.add(e); buttonList.add(w); buttonList.add(n); buttonList.add(s); buttonList.add(nE); buttonList.add(nW); buttonList.add(sE); buttonList.add(sW); buttons = buttonList.toArray(new Button[buttonList.size()]); startTime = System.currentTimeMillis(); } }); }
diff --git a/junit/org/lttng/flightbox/junit/model/TestDependencyAnalysis.java b/junit/org/lttng/flightbox/junit/model/TestDependencyAnalysis.java index 47a473d..95f4aba 100644 --- a/junit/org/lttng/flightbox/junit/model/TestDependencyAnalysis.java +++ b/junit/org/lttng/flightbox/junit/model/TestDependencyAnalysis.java @@ -1,92 +1,92 @@ package org.lttng.flightbox.junit.model; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.File; import java.util.SortedSet; import org.eclipse.linuxtools.lttng.jni.exception.JniException; import org.junit.Test; import org.lttng.flightbox.dep.BlockingModel; import org.lttng.flightbox.dep.BlockingTaskListener; import org.lttng.flightbox.dep.BlockingItem; import org.lttng.flightbox.io.ModelBuilder; import org.lttng.flightbox.junit.Path; import org.lttng.flightbox.model.SystemModel; import org.lttng.flightbox.model.Task; public class TestDependencyAnalysis { @Test public void testNanosleep() throws JniException { String tracePath = new File(Path.getTraceDir(), "sleep-1x-1sec").getPath(); SystemModel model = new SystemModel(); BlockingTaskListener listener = new BlockingTaskListener(); listener.setModel(model); model.addTaskListener(listener); ModelBuilder.buildFromTrace(tracePath, model); BlockingModel bm = model.getBlockingModel(); Task foundTask = model.getLatestTaskByCmdBasename("sleep"); SortedSet<BlockingItem> taskItems = bm.getBlockingItemsForTask(foundTask); assertEquals(1, taskItems.size()); BlockingItem info = taskItems.first(); double duration = info.getEndTime() - info.getStartTime(); assertEquals(1000000000.0, duration, 10000000.0); } @Test public void testInception() throws JniException { String trace = "inception-3x-100ms"; File file = new File(Path.getTraceDir(), trace); // make sure we have this trace assertTrue("Missing trace " + trace, file.isDirectory()); String tracePath = file.getPath(); SystemModel model = new SystemModel(); BlockingTaskListener listener = new BlockingTaskListener(); listener.setModel(model); model.addTaskListener(listener); ModelBuilder.buildFromTrace(tracePath, model); BlockingModel bm = model.getBlockingModel(); // get the last spawned child Task foundTask = model.getLatestTaskByCmdBasename("inception"); SortedSet<BlockingItem> taskItems = bm.getBlockingItemsForTask(foundTask); // 100ms + 200ms + 400ms = 700ms assertEquals(1, taskItems.size()); BlockingItem info = taskItems.first(); double duration = info.getEndTime() - info.getStartTime(); assertEquals(400000000.0, duration, 10000000.0); // verify recovered blocking information Task master = foundTask.getParentProcess().getParentProcess(); SortedSet<BlockingItem> masterItems = bm.getBlockingItemsForTask(master); BlockingItem nanoSleep = null, waitPid = null; - int SYS_NANOSLEEP = 35; - int SYS_WAITPID = 61; + int SYS_NANOSLEEP = 162; + int SYS_WAITPID = 7; for (BlockingItem item: masterItems) { assertNotNull(item.getWakeUp()); if (item.getWaitingSyscall().getSyscallId() == SYS_NANOSLEEP) { nanoSleep = item; } else if (item.getWaitingSyscall().getSyscallId() == SYS_WAITPID) { waitPid = item; } } assertNotNull(nanoSleep); assertNotNull(waitPid); double p = 10000000; assertEquals(nanoSleep.getDuration(), 100000000, p); assertEquals(waitPid.getDuration(), 600000000, p); } }
true
true
public void testInception() throws JniException { String trace = "inception-3x-100ms"; File file = new File(Path.getTraceDir(), trace); // make sure we have this trace assertTrue("Missing trace " + trace, file.isDirectory()); String tracePath = file.getPath(); SystemModel model = new SystemModel(); BlockingTaskListener listener = new BlockingTaskListener(); listener.setModel(model); model.addTaskListener(listener); ModelBuilder.buildFromTrace(tracePath, model); BlockingModel bm = model.getBlockingModel(); // get the last spawned child Task foundTask = model.getLatestTaskByCmdBasename("inception"); SortedSet<BlockingItem> taskItems = bm.getBlockingItemsForTask(foundTask); // 100ms + 200ms + 400ms = 700ms assertEquals(1, taskItems.size()); BlockingItem info = taskItems.first(); double duration = info.getEndTime() - info.getStartTime(); assertEquals(400000000.0, duration, 10000000.0); // verify recovered blocking information Task master = foundTask.getParentProcess().getParentProcess(); SortedSet<BlockingItem> masterItems = bm.getBlockingItemsForTask(master); BlockingItem nanoSleep = null, waitPid = null; int SYS_NANOSLEEP = 35; int SYS_WAITPID = 61; for (BlockingItem item: masterItems) { assertNotNull(item.getWakeUp()); if (item.getWaitingSyscall().getSyscallId() == SYS_NANOSLEEP) { nanoSleep = item; } else if (item.getWaitingSyscall().getSyscallId() == SYS_WAITPID) { waitPid = item; } } assertNotNull(nanoSleep); assertNotNull(waitPid); double p = 10000000; assertEquals(nanoSleep.getDuration(), 100000000, p); assertEquals(waitPid.getDuration(), 600000000, p); }
public void testInception() throws JniException { String trace = "inception-3x-100ms"; File file = new File(Path.getTraceDir(), trace); // make sure we have this trace assertTrue("Missing trace " + trace, file.isDirectory()); String tracePath = file.getPath(); SystemModel model = new SystemModel(); BlockingTaskListener listener = new BlockingTaskListener(); listener.setModel(model); model.addTaskListener(listener); ModelBuilder.buildFromTrace(tracePath, model); BlockingModel bm = model.getBlockingModel(); // get the last spawned child Task foundTask = model.getLatestTaskByCmdBasename("inception"); SortedSet<BlockingItem> taskItems = bm.getBlockingItemsForTask(foundTask); // 100ms + 200ms + 400ms = 700ms assertEquals(1, taskItems.size()); BlockingItem info = taskItems.first(); double duration = info.getEndTime() - info.getStartTime(); assertEquals(400000000.0, duration, 10000000.0); // verify recovered blocking information Task master = foundTask.getParentProcess().getParentProcess(); SortedSet<BlockingItem> masterItems = bm.getBlockingItemsForTask(master); BlockingItem nanoSleep = null, waitPid = null; int SYS_NANOSLEEP = 162; int SYS_WAITPID = 7; for (BlockingItem item: masterItems) { assertNotNull(item.getWakeUp()); if (item.getWaitingSyscall().getSyscallId() == SYS_NANOSLEEP) { nanoSleep = item; } else if (item.getWaitingSyscall().getSyscallId() == SYS_WAITPID) { waitPid = item; } } assertNotNull(nanoSleep); assertNotNull(waitPid); double p = 10000000; assertEquals(nanoSleep.getDuration(), 100000000, p); assertEquals(waitPid.getDuration(), 600000000, p); }
diff --git a/src/org/omegat/gui/dialogs/LicenseDialog.java b/src/org/omegat/gui/dialogs/LicenseDialog.java index 309b062b..75bb4439 100644 --- a/src/org/omegat/gui/dialogs/LicenseDialog.java +++ b/src/org/omegat/gui/dialogs/LicenseDialog.java @@ -1,156 +1,151 @@ /************************************************************************** OmegaT - Java based Computer Assisted Translation (CAT) tool Copyright (C) 2002-2005 Keith Godfrey et al [email protected] 907.223.2039 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **************************************************************************/ package org.omegat.gui.dialogs; import java.awt.event.ActionEvent; // HP import java.awt.event.KeyEvent; // HP import javax.swing.AbstractAction; // HP import javax.swing.Action; // HP import javax.swing.JComponent; // HP import javax.swing.KeyStroke; // HP import org.omegat.util.OStrings; import org.openide.awt.Mnemonics; /** * Dialog showing GNU Public License. * * @author Maxym Mykhalchuk */ public class LicenseDialog extends javax.swing.JDialog { /** A return status code - returned if Cancel button has been pressed */ public static final int RET_CANCEL = 0; /** A return status code - returned if OK button has been pressed */ public static final int RET_OK = 1; /** Creates new form LicenseDialog */ public LicenseDialog(java.awt.Dialog parent) { super(parent, true); initComponents(); licenseTextPane.setCaretPosition(0); } /** * @return the return status of this dialog - one of RET_OK or RET_CANCEL */ public int getReturnStatus() { return returnStatus; } /** * This method is called from within the constructor to * initialize the form. */ private void initComponents() { buttonPanel = new javax.swing.JPanel(); okButton = new javax.swing.JButton(); scroll = new javax.swing.JScrollPane(); licenseTextPane = new javax.swing.JTextPane(); setTitle(OStrings.getString("LICENSEDIALOG_TITLE")); setResizable(false); addWindowListener(new java.awt.event.WindowAdapter() { public void windowClosing(java.awt.event.WindowEvent evt) { closeDialog(evt); } }); // HP // Handle escape key to close the window KeyStroke escape = KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0, false); Action escapeAction = new AbstractAction() { public void actionPerformed(ActionEvent e) { dispose(); } }; getRootPane().getInputMap(JComponent.WHEN_IN_FOCUSED_WINDOW). put(escape, "ESCAPE"); // NOI18N getRootPane().getActionMap().put("ESCAPE", escapeAction); // NOI18N // END HP buttonPanel.setLayout(new java.awt.FlowLayout(java.awt.FlowLayout.RIGHT)); Mnemonics.setLocalizedText(okButton, OStrings.getString("BUTTON_OK")); okButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { okButtonActionPerformed(evt); } }); buttonPanel.add(okButton); getContentPane().add(buttonPanel, java.awt.BorderLayout.SOUTH); licenseTextPane.setEditable(false); licenseTextPane.setText( "===================================================\n\n" + // NOI18N OStrings.getString("LICENSEDIALOG_PREFACE") + "\n\n===================================================\n\n" + // NOI18N " GNU GENERAL PUBLIC LICENSE\n Version 2, June 1991\n\n Copyright (C) 1989, 1991 Free Software Foundation, Inc.\n 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n Preamble\n\n The licenses for most software are designed to take away your\nfreedom to share and change it. By contrast, the GNU General Public\nLicense is intended to guarantee your freedom to share and change free\nsoftware--to make sure the software is free for all its users. This\nGeneral Public License applies to most of the Free Software\nFoundation's software and to any other program whose authors commit to\nusing it. (Some other Free Software Foundation software is covered by\nthe GNU Library General Public License instead.) You can apply it to\nyour programs, too.\n\n When we speak of free software, we are referring to freedom, not\nprice. Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthis service if you wish), that you receive source code or can get it\nif you want it, that you can change the software or use pieces of it\nin new free programs; and that you know you can do these things.\n\n To protect your rights, we need to make restrictions that forbid\nanyone to deny you these rights or to ask you to surrender the rights.\nThese restrictions translate to certain responsibilities for you if you\ndistribute copies of the software, or if you modify it.\n\n For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must give the recipients all the rights that\nyou have. You must make sure that they, too, receive or can get the\nsource code. And you must show them these terms so they know their\nrights.\n\n We protect your rights with two steps: (1) copyright the software, and\n(2) offer you this license which gives you legal permission to copy,\ndistribute and/or modify the software.\n\n Also, for each author's protection and ours, we want to make certain\nthat everyone understands that there is no warranty for this free\nsoftware. If the software is modified by someone else and passed on, we\nwant its recipients to know that what they have is not the original, so\nthat any problems introduced by others will not reflect on the original\nauthors' reputations.\n\n Finally, any free program is threatened constantly by software\npatents. We wish to avoid the danger that redistributors of a free\nprogram will individually obtain patent licenses, in effect making the\nprogram proprietary. To prevent this, we have made it clear that any\npatent must be licensed for everyone's free use or not licensed at all.\n\n The precise terms and conditions for copying, distribution and\nmodification follow.\n\n GNU GENERAL PUBLIC LICENSE\n TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n 0. This License applies to any program or other work which contains\na notice placed by the copyright holder saying it may be distributed\nunder the terms of this General Public License. The \"Program\", below,\nrefers to any such program or work, and a \"work based on the Program\"\nmeans either the Program or any derivative work under copyright law:\nthat is to say, a work containing the Program or a portion of it,\neither verbatim or with modifications and/or translated into another\nlanguage. (Hereinafter, translation is included without limitation in\nthe term \"modification\".) Each licensee is addressed as \"you\".\n\nActivities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope. The act of\nrunning the Program is not restricted, and the output from the Program\nis covered only if its contents constitute a work based on the\nProgram (independent of having been made by running the Program).\nWhether that is true depends on what the Program does.\n\n 1. You may copy and distribute verbatim copies of the Program's\nsource code as you receive it, in any medium, provided that you\nconspicuously and appropriately publish on each copy an appropriate\ncopyright notice and disclaimer of warranty; keep intact all the\nnotices that refer to this License and to the absence of any warranty;\nand give any other recipients of the Program a copy of this License\nalong with the Program.\n\nYou may charge a fee for the physical act of transferring a copy, and\nyou may at your option offer warranty protection in exchange for a fee.\n\n 2. You may modify your copy or copies of the Program or any portion\nof it, thus forming a work based on the Program, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n a) You must cause the modified files to carry prominent notices\n stating that you changed the files and the date of any change.\n\n b) You must cause any work that you distribute or publish, that in\n whole or in part contains or is derived from the Program or any\n part thereof, to be licensed as a whole at no charge to all third\n parties under the terms of this License.\n\n c) If the modified program normally reads commands interactively\n when run, you must cause it, when started running for such\n interactive use in the most ordinary way, to print or display an\n announcement including an appropriate copyright notice and a\n notice that there is no warranty (or else, saying that you provide\n a warranty) and that users may redistribute the program under\n these conditions, and telling the user how to view a copy of this\n License. (Exception: if the Program itself is interactive but\n does not normally print such an announcement, your work based on\n the Program is not required to print an announcement.)\n\nThese requirements apply to the modified work as a whole. If\nidentifiable sections of that work are not derived from the Program,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works. But when you\ndistribute the same sections as part of a whole which is a work based\non the Program, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote it.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Program.\n\nIn addition, mere aggregation of another work not based on the Program\nwith the Program (or with a work based on the Program) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n 3. You may copy and distribute the Program (or a work based on it,\nunder Section 2) in object code or executable form under the terms of\nSections 1 and 2 above provided that you also do one of the following:\n\n a) Accompany it with the complete corresponding machine-readable\n source code, which must be distributed under the terms of Sections\n 1 and 2 above on a medium customarily used for software interchange; or,\n\n b) Accompany it with a written offer, valid for at least three\n years, to give any third party, for a charge no more than your\n cost of physically performing source distribution, a complete\n machine-readable copy of the corresponding source code, to be\n distributed under the terms of Sections 1 and 2 above on a medium\n customarily used for software interchange; or,\n\n c) Accompany it with the information you received as to the offer\n to distribute corresponding source code. (This alternative is\n allowed only for noncommercial distribution and only if you\n received the program in object code or executable form with such\n an offer, in accord with Subsection b above.)\n\nThe source code for a work means the preferred form of the work for\nmaking modifications to it. For an executable work, complete source\ncode means all the source code for all modules it contains, plus any\nassociated interface definition files, plus the scripts used to\ncontrol compilation and installation of the executable. However, as a\nspecial exception, the source code distributed need not include\nanything that is normally distributed (in either source or binary\nform) with the major components (compiler, kernel, and so on) of the\noperating system on which the executable runs, unless that component\nitself accompanies the executable.\n\nIf distribution of executable or object code is made by offering\naccess to copy from a designated place, then offering equivalent\naccess to copy the source code from the same place counts as\ndistribution of the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n 4. You may not copy, modify, sublicense, or distribute the Program\nexcept as expressly provided under this License. Any attempt\notherwise to copy, modify, sublicense or distribute the Program is\nvoid, and will automatically terminate your rights under this License.\nHowever, parties who have received copies, or rights, from you under\nthis License will not have their licenses terminated so long as such\nparties remain in full compliance.\n\n 5. You are not required to accept this License, since you have not\nsigned it. However, nothing else grants you permission to modify or\ndistribute the Program or its derivative works. These actions are\nprohibited by law if you do not accept this License. Therefore, by\nmodifying or distributing the Program (or any work based on the\nProgram), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Program or works based on it.\n\n 6. Each time you redistribute the Program (or any work based on the\nProgram), the recipient automatically receives a license from the\noriginal licensor to copy, distribute or modify the Program subject to\nthese terms and conditions. You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties to\nthis License.\n\n 7. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Program at all. For example, if a patent\nlicense would not permit royalty-free redistribution of the Program by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Program.\n\nIf any portion of this section is held invalid or unenforceable under\nany particular circumstance, the balance of the section is intended to\napply and the section as a whole is intended to apply in other\ncircumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system, which is\nimplemented by public license practices. Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n 8. If the distribution and/or use of the Program is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Program under this License\nmay add an explicit geographical distribution limitation excluding\nthose countries, so that distribution is permitted only in or among\ncountries not thus excluded. In such case, this License incorporates\nthe limitation as if written in the body of this License.\n\n 9. The Free Software Foundation may publish revised and/or new versions\nof the General Public License from time to time. Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\nEach version is given a distinguishing version number. If the Program\nspecifies a version number of this License which applies to it and \"any\nlater version\", you have the option of following the terms and conditions\neither of that version or of any later version published by the Free\nSoftware Foundation. If the Program does not specify a version number of\nthis License, you may choose any version ever published by the Free Software\nFoundation.\n\n 10. If you wish to incorporate parts of the Program into other free\nprograms whose distribution conditions are different, write to the author\nto ask for permission. For software which is copyrighted by the Free\nSoftware Foundation, write to the Free Software Foundation; we sometimes\nmake exceptions for this. Our decision will be guided by the two goals\nof preserving the free status of all derivatives of our free software and\nof promoting the sharing and reuse of software generally.\n\n NO WARRANTY\n\n 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\nFOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN\nOTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\nPROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\nOR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS\nTO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE\nPROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\nREPAIR OR CORRECTION.\n\n 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\nREDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\nINCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\nOUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\nTO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\nYOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\nPROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGES.\n\n END OF TERMS AND CONDITIONS\n\n How to Apply These Terms to Your New Programs\n\n If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n To do so, attach the following notices to the program. It is safest\nto attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n <one line to give the program's name and a brief idea of what it does.>\n Copyright (C) <year> <name of author>\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program; if not, write to the Free Software\n Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n\nAlso add information on how to contact you by electronic and paper mail.\n\nIf the program is interactive, make it output a short notice like this\nwhen it starts in an interactive mode:\n\n Gnomovision version 69, Copyright (C) year name of author\n Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n This is free software, and you are welcome to redistribute it\n under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License. Of course, the commands you use may\nbe called something other than `show w' and `show c'; they could even be\nmouse-clicks or menu items--whatever suits your program.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the program, if\nnecessary. Here is a sample; alter the names:\n\n Yoyodyne, Inc., hereby disclaims all copyright interest in the program\n `Gnomovision' (which makes passes at compilers) written by James Hacker.\n\n <signature of Ty Coon>, 1 April 1989\n Ty Coon, President of Vice\n\nThis General Public License does not permit incorporating your program into\nproprietary programs. If your program is a subroutine library, you may\nconsider it more useful to permit linking proprietary applications with the\nlibrary. If this is what you want to do, use the GNU Library General\nPublic License instead of this License.\n"); // NOI18N scroll.setViewportView(licenseTextPane); getContentPane().add(scroll, java.awt.BorderLayout.CENTER); java.awt.Dimension screenSize = java.awt.Toolkit.getDefaultToolkit().getScreenSize(); - int width = 400; - setBounds((screenSize.width-width)/2, (screenSize.height-400)/2, width, 400); - javax.swing.JScrollBar hsb = scroll.getHorizontalScrollBar(); - hsb.setValue(hsb.getMaximum()); - width=Math.min(screenSize.width-100, Math.max(width, hsb.getMaximum())+20); - setBounds((screenSize.width-width)/2, (screenSize.height-400)/2, width, 400); + setBounds((screenSize.width-600)/2, (screenSize.height-400)/2, 600, 400); } private void okButtonActionPerformed(java.awt.event.ActionEvent evt) { doClose(RET_OK); } /** Closes the dialog */ private void closeDialog(java.awt.event.WindowEvent evt) { doClose(RET_CANCEL); } private void doClose(int retStatus) { returnStatus = retStatus; setVisible(false); dispose(); } private javax.swing.JPanel buttonPanel; private javax.swing.JScrollPane scroll; private javax.swing.JTextPane licenseTextPane; private javax.swing.JButton okButton; private int returnStatus = RET_CANCEL; }
true
true
private void initComponents() { buttonPanel = new javax.swing.JPanel(); okButton = new javax.swing.JButton(); scroll = new javax.swing.JScrollPane(); licenseTextPane = new javax.swing.JTextPane(); setTitle(OStrings.getString("LICENSEDIALOG_TITLE")); setResizable(false); addWindowListener(new java.awt.event.WindowAdapter() { public void windowClosing(java.awt.event.WindowEvent evt) { closeDialog(evt); } }); // HP // Handle escape key to close the window KeyStroke escape = KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0, false); Action escapeAction = new AbstractAction() { public void actionPerformed(ActionEvent e) { dispose(); } }; getRootPane().getInputMap(JComponent.WHEN_IN_FOCUSED_WINDOW). put(escape, "ESCAPE"); // NOI18N getRootPane().getActionMap().put("ESCAPE", escapeAction); // NOI18N // END HP buttonPanel.setLayout(new java.awt.FlowLayout(java.awt.FlowLayout.RIGHT)); Mnemonics.setLocalizedText(okButton, OStrings.getString("BUTTON_OK")); okButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { okButtonActionPerformed(evt); } }); buttonPanel.add(okButton); getContentPane().add(buttonPanel, java.awt.BorderLayout.SOUTH); licenseTextPane.setEditable(false); licenseTextPane.setText( "===================================================\n\n" + // NOI18N OStrings.getString("LICENSEDIALOG_PREFACE") + "\n\n===================================================\n\n" + // NOI18N " GNU GENERAL PUBLIC LICENSE\n Version 2, June 1991\n\n Copyright (C) 1989, 1991 Free Software Foundation, Inc.\n 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n Preamble\n\n The licenses for most software are designed to take away your\nfreedom to share and change it. By contrast, the GNU General Public\nLicense is intended to guarantee your freedom to share and change free\nsoftware--to make sure the software is free for all its users. This\nGeneral Public License applies to most of the Free Software\nFoundation's software and to any other program whose authors commit to\nusing it. (Some other Free Software Foundation software is covered by\nthe GNU Library General Public License instead.) You can apply it to\nyour programs, too.\n\n When we speak of free software, we are referring to freedom, not\nprice. Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthis service if you wish), that you receive source code or can get it\nif you want it, that you can change the software or use pieces of it\nin new free programs; and that you know you can do these things.\n\n To protect your rights, we need to make restrictions that forbid\nanyone to deny you these rights or to ask you to surrender the rights.\nThese restrictions translate to certain responsibilities for you if you\ndistribute copies of the software, or if you modify it.\n\n For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must give the recipients all the rights that\nyou have. You must make sure that they, too, receive or can get the\nsource code. And you must show them these terms so they know their\nrights.\n\n We protect your rights with two steps: (1) copyright the software, and\n(2) offer you this license which gives you legal permission to copy,\ndistribute and/or modify the software.\n\n Also, for each author's protection and ours, we want to make certain\nthat everyone understands that there is no warranty for this free\nsoftware. If the software is modified by someone else and passed on, we\nwant its recipients to know that what they have is not the original, so\nthat any problems introduced by others will not reflect on the original\nauthors' reputations.\n\n Finally, any free program is threatened constantly by software\npatents. We wish to avoid the danger that redistributors of a free\nprogram will individually obtain patent licenses, in effect making the\nprogram proprietary. To prevent this, we have made it clear that any\npatent must be licensed for everyone's free use or not licensed at all.\n\n The precise terms and conditions for copying, distribution and\nmodification follow.\n\n GNU GENERAL PUBLIC LICENSE\n TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n 0. This License applies to any program or other work which contains\na notice placed by the copyright holder saying it may be distributed\nunder the terms of this General Public License. The \"Program\", below,\nrefers to any such program or work, and a \"work based on the Program\"\nmeans either the Program or any derivative work under copyright law:\nthat is to say, a work containing the Program or a portion of it,\neither verbatim or with modifications and/or translated into another\nlanguage. (Hereinafter, translation is included without limitation in\nthe term \"modification\".) Each licensee is addressed as \"you\".\n\nActivities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope. The act of\nrunning the Program is not restricted, and the output from the Program\nis covered only if its contents constitute a work based on the\nProgram (independent of having been made by running the Program).\nWhether that is true depends on what the Program does.\n\n 1. You may copy and distribute verbatim copies of the Program's\nsource code as you receive it, in any medium, provided that you\nconspicuously and appropriately publish on each copy an appropriate\ncopyright notice and disclaimer of warranty; keep intact all the\nnotices that refer to this License and to the absence of any warranty;\nand give any other recipients of the Program a copy of this License\nalong with the Program.\n\nYou may charge a fee for the physical act of transferring a copy, and\nyou may at your option offer warranty protection in exchange for a fee.\n\n 2. You may modify your copy or copies of the Program or any portion\nof it, thus forming a work based on the Program, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n a) You must cause the modified files to carry prominent notices\n stating that you changed the files and the date of any change.\n\n b) You must cause any work that you distribute or publish, that in\n whole or in part contains or is derived from the Program or any\n part thereof, to be licensed as a whole at no charge to all third\n parties under the terms of this License.\n\n c) If the modified program normally reads commands interactively\n when run, you must cause it, when started running for such\n interactive use in the most ordinary way, to print or display an\n announcement including an appropriate copyright notice and a\n notice that there is no warranty (or else, saying that you provide\n a warranty) and that users may redistribute the program under\n these conditions, and telling the user how to view a copy of this\n License. (Exception: if the Program itself is interactive but\n does not normally print such an announcement, your work based on\n the Program is not required to print an announcement.)\n\nThese requirements apply to the modified work as a whole. If\nidentifiable sections of that work are not derived from the Program,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works. But when you\ndistribute the same sections as part of a whole which is a work based\non the Program, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote it.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Program.\n\nIn addition, mere aggregation of another work not based on the Program\nwith the Program (or with a work based on the Program) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n 3. You may copy and distribute the Program (or a work based on it,\nunder Section 2) in object code or executable form under the terms of\nSections 1 and 2 above provided that you also do one of the following:\n\n a) Accompany it with the complete corresponding machine-readable\n source code, which must be distributed under the terms of Sections\n 1 and 2 above on a medium customarily used for software interchange; or,\n\n b) Accompany it with a written offer, valid for at least three\n years, to give any third party, for a charge no more than your\n cost of physically performing source distribution, a complete\n machine-readable copy of the corresponding source code, to be\n distributed under the terms of Sections 1 and 2 above on a medium\n customarily used for software interchange; or,\n\n c) Accompany it with the information you received as to the offer\n to distribute corresponding source code. (This alternative is\n allowed only for noncommercial distribution and only if you\n received the program in object code or executable form with such\n an offer, in accord with Subsection b above.)\n\nThe source code for a work means the preferred form of the work for\nmaking modifications to it. For an executable work, complete source\ncode means all the source code for all modules it contains, plus any\nassociated interface definition files, plus the scripts used to\ncontrol compilation and installation of the executable. However, as a\nspecial exception, the source code distributed need not include\nanything that is normally distributed (in either source or binary\nform) with the major components (compiler, kernel, and so on) of the\noperating system on which the executable runs, unless that component\nitself accompanies the executable.\n\nIf distribution of executable or object code is made by offering\naccess to copy from a designated place, then offering equivalent\naccess to copy the source code from the same place counts as\ndistribution of the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n 4. You may not copy, modify, sublicense, or distribute the Program\nexcept as expressly provided under this License. Any attempt\notherwise to copy, modify, sublicense or distribute the Program is\nvoid, and will automatically terminate your rights under this License.\nHowever, parties who have received copies, or rights, from you under\nthis License will not have their licenses terminated so long as such\nparties remain in full compliance.\n\n 5. You are not required to accept this License, since you have not\nsigned it. However, nothing else grants you permission to modify or\ndistribute the Program or its derivative works. These actions are\nprohibited by law if you do not accept this License. Therefore, by\nmodifying or distributing the Program (or any work based on the\nProgram), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Program or works based on it.\n\n 6. Each time you redistribute the Program (or any work based on the\nProgram), the recipient automatically receives a license from the\noriginal licensor to copy, distribute or modify the Program subject to\nthese terms and conditions. You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties to\nthis License.\n\n 7. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Program at all. For example, if a patent\nlicense would not permit royalty-free redistribution of the Program by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Program.\n\nIf any portion of this section is held invalid or unenforceable under\nany particular circumstance, the balance of the section is intended to\napply and the section as a whole is intended to apply in other\ncircumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system, which is\nimplemented by public license practices. Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n 8. If the distribution and/or use of the Program is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Program under this License\nmay add an explicit geographical distribution limitation excluding\nthose countries, so that distribution is permitted only in or among\ncountries not thus excluded. In such case, this License incorporates\nthe limitation as if written in the body of this License.\n\n 9. The Free Software Foundation may publish revised and/or new versions\nof the General Public License from time to time. Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\nEach version is given a distinguishing version number. If the Program\nspecifies a version number of this License which applies to it and \"any\nlater version\", you have the option of following the terms and conditions\neither of that version or of any later version published by the Free\nSoftware Foundation. If the Program does not specify a version number of\nthis License, you may choose any version ever published by the Free Software\nFoundation.\n\n 10. If you wish to incorporate parts of the Program into other free\nprograms whose distribution conditions are different, write to the author\nto ask for permission. For software which is copyrighted by the Free\nSoftware Foundation, write to the Free Software Foundation; we sometimes\nmake exceptions for this. Our decision will be guided by the two goals\nof preserving the free status of all derivatives of our free software and\nof promoting the sharing and reuse of software generally.\n\n NO WARRANTY\n\n 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\nFOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN\nOTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\nPROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\nOR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS\nTO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE\nPROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\nREPAIR OR CORRECTION.\n\n 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\nREDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\nINCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\nOUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\nTO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\nYOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\nPROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGES.\n\n END OF TERMS AND CONDITIONS\n\n How to Apply These Terms to Your New Programs\n\n If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n To do so, attach the following notices to the program. It is safest\nto attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n <one line to give the program's name and a brief idea of what it does.>\n Copyright (C) <year> <name of author>\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program; if not, write to the Free Software\n Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n\nAlso add information on how to contact you by electronic and paper mail.\n\nIf the program is interactive, make it output a short notice like this\nwhen it starts in an interactive mode:\n\n Gnomovision version 69, Copyright (C) year name of author\n Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n This is free software, and you are welcome to redistribute it\n under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License. Of course, the commands you use may\nbe called something other than `show w' and `show c'; they could even be\nmouse-clicks or menu items--whatever suits your program.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the program, if\nnecessary. Here is a sample; alter the names:\n\n Yoyodyne, Inc., hereby disclaims all copyright interest in the program\n `Gnomovision' (which makes passes at compilers) written by James Hacker.\n\n <signature of Ty Coon>, 1 April 1989\n Ty Coon, President of Vice\n\nThis General Public License does not permit incorporating your program into\nproprietary programs. If your program is a subroutine library, you may\nconsider it more useful to permit linking proprietary applications with the\nlibrary. If this is what you want to do, use the GNU Library General\nPublic License instead of this License.\n"); // NOI18N scroll.setViewportView(licenseTextPane); getContentPane().add(scroll, java.awt.BorderLayout.CENTER); java.awt.Dimension screenSize = java.awt.Toolkit.getDefaultToolkit().getScreenSize(); int width = 400; setBounds((screenSize.width-width)/2, (screenSize.height-400)/2, width, 400); javax.swing.JScrollBar hsb = scroll.getHorizontalScrollBar(); hsb.setValue(hsb.getMaximum()); width=Math.min(screenSize.width-100, Math.max(width, hsb.getMaximum())+20); setBounds((screenSize.width-width)/2, (screenSize.height-400)/2, width, 400); }
private void initComponents() { buttonPanel = new javax.swing.JPanel(); okButton = new javax.swing.JButton(); scroll = new javax.swing.JScrollPane(); licenseTextPane = new javax.swing.JTextPane(); setTitle(OStrings.getString("LICENSEDIALOG_TITLE")); setResizable(false); addWindowListener(new java.awt.event.WindowAdapter() { public void windowClosing(java.awt.event.WindowEvent evt) { closeDialog(evt); } }); // HP // Handle escape key to close the window KeyStroke escape = KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0, false); Action escapeAction = new AbstractAction() { public void actionPerformed(ActionEvent e) { dispose(); } }; getRootPane().getInputMap(JComponent.WHEN_IN_FOCUSED_WINDOW). put(escape, "ESCAPE"); // NOI18N getRootPane().getActionMap().put("ESCAPE", escapeAction); // NOI18N // END HP buttonPanel.setLayout(new java.awt.FlowLayout(java.awt.FlowLayout.RIGHT)); Mnemonics.setLocalizedText(okButton, OStrings.getString("BUTTON_OK")); okButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { okButtonActionPerformed(evt); } }); buttonPanel.add(okButton); getContentPane().add(buttonPanel, java.awt.BorderLayout.SOUTH); licenseTextPane.setEditable(false); licenseTextPane.setText( "===================================================\n\n" + // NOI18N OStrings.getString("LICENSEDIALOG_PREFACE") + "\n\n===================================================\n\n" + // NOI18N " GNU GENERAL PUBLIC LICENSE\n Version 2, June 1991\n\n Copyright (C) 1989, 1991 Free Software Foundation, Inc.\n 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n Preamble\n\n The licenses for most software are designed to take away your\nfreedom to share and change it. By contrast, the GNU General Public\nLicense is intended to guarantee your freedom to share and change free\nsoftware--to make sure the software is free for all its users. This\nGeneral Public License applies to most of the Free Software\nFoundation's software and to any other program whose authors commit to\nusing it. (Some other Free Software Foundation software is covered by\nthe GNU Library General Public License instead.) You can apply it to\nyour programs, too.\n\n When we speak of free software, we are referring to freedom, not\nprice. Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthis service if you wish), that you receive source code or can get it\nif you want it, that you can change the software or use pieces of it\nin new free programs; and that you know you can do these things.\n\n To protect your rights, we need to make restrictions that forbid\nanyone to deny you these rights or to ask you to surrender the rights.\nThese restrictions translate to certain responsibilities for you if you\ndistribute copies of the software, or if you modify it.\n\n For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must give the recipients all the rights that\nyou have. You must make sure that they, too, receive or can get the\nsource code. And you must show them these terms so they know their\nrights.\n\n We protect your rights with two steps: (1) copyright the software, and\n(2) offer you this license which gives you legal permission to copy,\ndistribute and/or modify the software.\n\n Also, for each author's protection and ours, we want to make certain\nthat everyone understands that there is no warranty for this free\nsoftware. If the software is modified by someone else and passed on, we\nwant its recipients to know that what they have is not the original, so\nthat any problems introduced by others will not reflect on the original\nauthors' reputations.\n\n Finally, any free program is threatened constantly by software\npatents. We wish to avoid the danger that redistributors of a free\nprogram will individually obtain patent licenses, in effect making the\nprogram proprietary. To prevent this, we have made it clear that any\npatent must be licensed for everyone's free use or not licensed at all.\n\n The precise terms and conditions for copying, distribution and\nmodification follow.\n\n GNU GENERAL PUBLIC LICENSE\n TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n 0. This License applies to any program or other work which contains\na notice placed by the copyright holder saying it may be distributed\nunder the terms of this General Public License. The \"Program\", below,\nrefers to any such program or work, and a \"work based on the Program\"\nmeans either the Program or any derivative work under copyright law:\nthat is to say, a work containing the Program or a portion of it,\neither verbatim or with modifications and/or translated into another\nlanguage. (Hereinafter, translation is included without limitation in\nthe term \"modification\".) Each licensee is addressed as \"you\".\n\nActivities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope. The act of\nrunning the Program is not restricted, and the output from the Program\nis covered only if its contents constitute a work based on the\nProgram (independent of having been made by running the Program).\nWhether that is true depends on what the Program does.\n\n 1. You may copy and distribute verbatim copies of the Program's\nsource code as you receive it, in any medium, provided that you\nconspicuously and appropriately publish on each copy an appropriate\ncopyright notice and disclaimer of warranty; keep intact all the\nnotices that refer to this License and to the absence of any warranty;\nand give any other recipients of the Program a copy of this License\nalong with the Program.\n\nYou may charge a fee for the physical act of transferring a copy, and\nyou may at your option offer warranty protection in exchange for a fee.\n\n 2. You may modify your copy or copies of the Program or any portion\nof it, thus forming a work based on the Program, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n a) You must cause the modified files to carry prominent notices\n stating that you changed the files and the date of any change.\n\n b) You must cause any work that you distribute or publish, that in\n whole or in part contains or is derived from the Program or any\n part thereof, to be licensed as a whole at no charge to all third\n parties under the terms of this License.\n\n c) If the modified program normally reads commands interactively\n when run, you must cause it, when started running for such\n interactive use in the most ordinary way, to print or display an\n announcement including an appropriate copyright notice and a\n notice that there is no warranty (or else, saying that you provide\n a warranty) and that users may redistribute the program under\n these conditions, and telling the user how to view a copy of this\n License. (Exception: if the Program itself is interactive but\n does not normally print such an announcement, your work based on\n the Program is not required to print an announcement.)\n\nThese requirements apply to the modified work as a whole. If\nidentifiable sections of that work are not derived from the Program,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works. But when you\ndistribute the same sections as part of a whole which is a work based\non the Program, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote it.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Program.\n\nIn addition, mere aggregation of another work not based on the Program\nwith the Program (or with a work based on the Program) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n 3. You may copy and distribute the Program (or a work based on it,\nunder Section 2) in object code or executable form under the terms of\nSections 1 and 2 above provided that you also do one of the following:\n\n a) Accompany it with the complete corresponding machine-readable\n source code, which must be distributed under the terms of Sections\n 1 and 2 above on a medium customarily used for software interchange; or,\n\n b) Accompany it with a written offer, valid for at least three\n years, to give any third party, for a charge no more than your\n cost of physically performing source distribution, a complete\n machine-readable copy of the corresponding source code, to be\n distributed under the terms of Sections 1 and 2 above on a medium\n customarily used for software interchange; or,\n\n c) Accompany it with the information you received as to the offer\n to distribute corresponding source code. (This alternative is\n allowed only for noncommercial distribution and only if you\n received the program in object code or executable form with such\n an offer, in accord with Subsection b above.)\n\nThe source code for a work means the preferred form of the work for\nmaking modifications to it. For an executable work, complete source\ncode means all the source code for all modules it contains, plus any\nassociated interface definition files, plus the scripts used to\ncontrol compilation and installation of the executable. However, as a\nspecial exception, the source code distributed need not include\nanything that is normally distributed (in either source or binary\nform) with the major components (compiler, kernel, and so on) of the\noperating system on which the executable runs, unless that component\nitself accompanies the executable.\n\nIf distribution of executable or object code is made by offering\naccess to copy from a designated place, then offering equivalent\naccess to copy the source code from the same place counts as\ndistribution of the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n 4. You may not copy, modify, sublicense, or distribute the Program\nexcept as expressly provided under this License. Any attempt\notherwise to copy, modify, sublicense or distribute the Program is\nvoid, and will automatically terminate your rights under this License.\nHowever, parties who have received copies, or rights, from you under\nthis License will not have their licenses terminated so long as such\nparties remain in full compliance.\n\n 5. You are not required to accept this License, since you have not\nsigned it. However, nothing else grants you permission to modify or\ndistribute the Program or its derivative works. These actions are\nprohibited by law if you do not accept this License. Therefore, by\nmodifying or distributing the Program (or any work based on the\nProgram), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Program or works based on it.\n\n 6. Each time you redistribute the Program (or any work based on the\nProgram), the recipient automatically receives a license from the\noriginal licensor to copy, distribute or modify the Program subject to\nthese terms and conditions. You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties to\nthis License.\n\n 7. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Program at all. For example, if a patent\nlicense would not permit royalty-free redistribution of the Program by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Program.\n\nIf any portion of this section is held invalid or unenforceable under\nany particular circumstance, the balance of the section is intended to\napply and the section as a whole is intended to apply in other\ncircumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system, which is\nimplemented by public license practices. Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n 8. If the distribution and/or use of the Program is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Program under this License\nmay add an explicit geographical distribution limitation excluding\nthose countries, so that distribution is permitted only in or among\ncountries not thus excluded. In such case, this License incorporates\nthe limitation as if written in the body of this License.\n\n 9. The Free Software Foundation may publish revised and/or new versions\nof the General Public License from time to time. Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\nEach version is given a distinguishing version number. If the Program\nspecifies a version number of this License which applies to it and \"any\nlater version\", you have the option of following the terms and conditions\neither of that version or of any later version published by the Free\nSoftware Foundation. If the Program does not specify a version number of\nthis License, you may choose any version ever published by the Free Software\nFoundation.\n\n 10. If you wish to incorporate parts of the Program into other free\nprograms whose distribution conditions are different, write to the author\nto ask for permission. For software which is copyrighted by the Free\nSoftware Foundation, write to the Free Software Foundation; we sometimes\nmake exceptions for this. Our decision will be guided by the two goals\nof preserving the free status of all derivatives of our free software and\nof promoting the sharing and reuse of software generally.\n\n NO WARRANTY\n\n 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\nFOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN\nOTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\nPROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\nOR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS\nTO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE\nPROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\nREPAIR OR CORRECTION.\n\n 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\nREDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\nINCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\nOUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\nTO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\nYOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\nPROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGES.\n\n END OF TERMS AND CONDITIONS\n\n How to Apply These Terms to Your New Programs\n\n If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n To do so, attach the following notices to the program. It is safest\nto attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n <one line to give the program's name and a brief idea of what it does.>\n Copyright (C) <year> <name of author>\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program; if not, write to the Free Software\n Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n\nAlso add information on how to contact you by electronic and paper mail.\n\nIf the program is interactive, make it output a short notice like this\nwhen it starts in an interactive mode:\n\n Gnomovision version 69, Copyright (C) year name of author\n Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n This is free software, and you are welcome to redistribute it\n under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License. Of course, the commands you use may\nbe called something other than `show w' and `show c'; they could even be\nmouse-clicks or menu items--whatever suits your program.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the program, if\nnecessary. Here is a sample; alter the names:\n\n Yoyodyne, Inc., hereby disclaims all copyright interest in the program\n `Gnomovision' (which makes passes at compilers) written by James Hacker.\n\n <signature of Ty Coon>, 1 April 1989\n Ty Coon, President of Vice\n\nThis General Public License does not permit incorporating your program into\nproprietary programs. If your program is a subroutine library, you may\nconsider it more useful to permit linking proprietary applications with the\nlibrary. If this is what you want to do, use the GNU Library General\nPublic License instead of this License.\n"); // NOI18N scroll.setViewportView(licenseTextPane); getContentPane().add(scroll, java.awt.BorderLayout.CENTER); java.awt.Dimension screenSize = java.awt.Toolkit.getDefaultToolkit().getScreenSize(); setBounds((screenSize.width-600)/2, (screenSize.height-400)/2, 600, 400); }
diff --git a/AL-Game/data/scripts/system/handlers/quest/eltnen/_1035RefreshingtheSprings.java b/AL-Game/data/scripts/system/handlers/quest/eltnen/_1035RefreshingtheSprings.java index 6beea54..373d9fc 100644 --- a/AL-Game/data/scripts/system/handlers/quest/eltnen/_1035RefreshingtheSprings.java +++ b/AL-Game/data/scripts/system/handlers/quest/eltnen/_1035RefreshingtheSprings.java @@ -1,309 +1,311 @@ /* * This file is part of aion-unique <aion-unique.org>. * * aion-unique is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * aion-unique is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with aion-unique. If not, see <http://www.gnu.org/licenses/>. */ package quest.eltnen; import java.util.Collections; import com.aionemu.gameserver.model.EmotionType; import com.aionemu.gameserver.model.gameobjects.Npc; import com.aionemu.gameserver.model.gameobjects.player.Player; import com.aionemu.gameserver.model.templates.quest.QuestItems; import com.aionemu.gameserver.network.aion.serverpackets.SM_DIALOG_WINDOW; import com.aionemu.gameserver.network.aion.serverpackets.SM_EMOTION; import com.aionemu.gameserver.network.aion.serverpackets.SM_USE_OBJECT; import com.aionemu.gameserver.questEngine.handlers.QuestHandler; import com.aionemu.gameserver.questEngine.model.QuestEnv; import com.aionemu.gameserver.questEngine.model.QuestState; import com.aionemu.gameserver.questEngine.model.QuestStatus; import com.aionemu.gameserver.services.ItemService; import com.aionemu.gameserver.services.QuestService; import com.aionemu.gameserver.utils.PacketSendUtility; import com.aionemu.gameserver.utils.ThreadPoolManager; /** * @author Rhys2002 -- TODO Timer and Fail dialog when implemented * */ public class _1035RefreshingtheSprings extends QuestHandler { private final static int questId = 1035; private final static int[] npc_ids = { 203917, 203992, 700158, 203965, 203968, 203987, 700160, 203934, 700159 }; public _1035RefreshingtheSprings() { super(questId); } @Override public void register() { qe.addQuestLvlUp(questId); for(int npc_id : npc_ids) qe.setNpcQuestData(npc_id).addOnTalkEvent(questId); } @Override public boolean onLvlUpEvent(QuestEnv env) { final Player player = env.getPlayer(); final QuestState qs = player.getQuestStateList().getQuestState(questId); boolean lvlCheck = QuestService.checkLevelRequirement(questId, player.getCommonData().getLevel()); if(qs == null || qs.getStatus() != QuestStatus.LOCKED || !lvlCheck) return false; qs.setStatus(QuestStatus.START); updateQuestStatus(player, qs); return true; } @Override public boolean onDialogEvent(QuestEnv env) { final Player player = env.getPlayer(); final QuestState qs = player.getQuestStateList().getQuestState(questId); if(qs == null) return false; int var = qs.getQuestVarById(0); int targetId = 0; if(env.getVisibleObject() instanceof Npc) targetId = ((Npc) env.getVisibleObject()).getNpcId(); if(qs.getStatus() == QuestStatus.REWARD) { if(targetId == 203917) return defaultQuestEndDialog(env); } else if(qs.getStatus() != QuestStatus.START) { return false; } if(targetId == 203917) { switch(env.getDialogId()) { case 25: if(var == 0) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1011); else if(var == 4) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1352); return false; case 10000: if(var == 0) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } case 10001: if(var == 4) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 203992) { switch(env.getDialogId()) { case 25: if(var == 1) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1352); else if(var == 3) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1693); case 10001: if(var == 1) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } case 10002: if(var == 3) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 700158 && var == 2) { if (env.getDialogId() == -1 && player.getInventory().getItemCountByItemId(182201014) == 1) { final int targetObjectId = env.getVisibleObject().getObjectId(); PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 1)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.NEUTRALMODE2, 0, targetObjectId), true); ThreadPoolManager.getInstance().schedule(new Runnable(){ @Override public void run() { PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 0)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.START_LOOT, 0, targetObjectId), true); player.getInventory().removeFromBagByItemId(182201014, 1); - qs.setQuestVarById(0, qs.getQuestVarById(0) + 1); + qs.setQuestVarById(0, 3); updateQuestStatus(player, qs); } }, 3000); } return false; } else if(targetId == 203965) { switch(env.getDialogId()) { case 25: if(var == 4) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 2034); case 10003: if(var == 4) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 203968) { switch(env.getDialogId()) { case 25: if(var == 5) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 2375); case 10004: if(var == 5) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 203987) { switch(env.getDialogId()) { case 25: if(var == 6) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 2716); else if(var == 8) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 3057); case 10005: if(var == 6) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); ItemService.addItems(player, Collections.singletonList(new QuestItems(182201024, 1))); return true; } case 10006: if(var == 8) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); ItemService.addItems(player, Collections.singletonList(new QuestItems(182201025, 1))); return true; } return false; } } else if(targetId == 700160 && var == 7) { if (env.getDialogId() == -1 && player.getInventory().getItemCountByItemId(182201024) == 1) { final int targetObjectId = env.getVisibleObject().getObjectId(); PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 1)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.NEUTRALMODE2, 0, targetObjectId), true); ThreadPoolManager.getInstance().schedule(new Runnable(){ @Override public void run() { PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 0)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.START_LOOT, 0, targetObjectId), true); player.getInventory().removeFromBagByItemId(182201024, 1); qs.setQuestVarById(0, qs.getQuestVarById(0) + 1); updateQuestStatus(player, qs); } }, 3000); } return false; } else if(targetId == 203934) { switch(env.getDialogId()) { case 25: if(var == 9) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 3398); else if(var == 11) - return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 3739); + return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 3739); + break; case 10007: if(var == 9) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); + return true; } else if(var == 11) { - PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); qs.setStatus(QuestStatus.REWARD); updateQuestStatus(player, qs); + PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 700159 && var == 10) { if (env.getDialogId() == -1 && player.getInventory().getItemCountByItemId(182201025) == 1) { final int targetObjectId = env.getVisibleObject().getObjectId(); PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 1)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.NEUTRALMODE2, 0, targetObjectId), true); ThreadPoolManager.getInstance().schedule(new Runnable(){ @Override public void run() { PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 0)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.START_LOOT, 0, targetObjectId), true); player.getInventory().removeFromBagByItemId(182201025, 1); - qs.setQuestVarById(0, qs.getQuestVarById(0) + 1); + qs.setQuestVarById(0, 11); updateQuestStatus(player, qs); } }, 3000); } return false; } return false; } }
false
true
public boolean onDialogEvent(QuestEnv env) { final Player player = env.getPlayer(); final QuestState qs = player.getQuestStateList().getQuestState(questId); if(qs == null) return false; int var = qs.getQuestVarById(0); int targetId = 0; if(env.getVisibleObject() instanceof Npc) targetId = ((Npc) env.getVisibleObject()).getNpcId(); if(qs.getStatus() == QuestStatus.REWARD) { if(targetId == 203917) return defaultQuestEndDialog(env); } else if(qs.getStatus() != QuestStatus.START) { return false; } if(targetId == 203917) { switch(env.getDialogId()) { case 25: if(var == 0) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1011); else if(var == 4) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1352); return false; case 10000: if(var == 0) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } case 10001: if(var == 4) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 203992) { switch(env.getDialogId()) { case 25: if(var == 1) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1352); else if(var == 3) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1693); case 10001: if(var == 1) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } case 10002: if(var == 3) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 700158 && var == 2) { if (env.getDialogId() == -1 && player.getInventory().getItemCountByItemId(182201014) == 1) { final int targetObjectId = env.getVisibleObject().getObjectId(); PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 1)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.NEUTRALMODE2, 0, targetObjectId), true); ThreadPoolManager.getInstance().schedule(new Runnable(){ @Override public void run() { PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 0)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.START_LOOT, 0, targetObjectId), true); player.getInventory().removeFromBagByItemId(182201014, 1); qs.setQuestVarById(0, qs.getQuestVarById(0) + 1); updateQuestStatus(player, qs); } }, 3000); } return false; } else if(targetId == 203965) { switch(env.getDialogId()) { case 25: if(var == 4) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 2034); case 10003: if(var == 4) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 203968) { switch(env.getDialogId()) { case 25: if(var == 5) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 2375); case 10004: if(var == 5) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 203987) { switch(env.getDialogId()) { case 25: if(var == 6) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 2716); else if(var == 8) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 3057); case 10005: if(var == 6) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); ItemService.addItems(player, Collections.singletonList(new QuestItems(182201024, 1))); return true; } case 10006: if(var == 8) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); ItemService.addItems(player, Collections.singletonList(new QuestItems(182201025, 1))); return true; } return false; } } else if(targetId == 700160 && var == 7) { if (env.getDialogId() == -1 && player.getInventory().getItemCountByItemId(182201024) == 1) { final int targetObjectId = env.getVisibleObject().getObjectId(); PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 1)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.NEUTRALMODE2, 0, targetObjectId), true); ThreadPoolManager.getInstance().schedule(new Runnable(){ @Override public void run() { PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 0)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.START_LOOT, 0, targetObjectId), true); player.getInventory().removeFromBagByItemId(182201024, 1); qs.setQuestVarById(0, qs.getQuestVarById(0) + 1); updateQuestStatus(player, qs); } }, 3000); } return false; } else if(targetId == 203934) { switch(env.getDialogId()) { case 25: if(var == 9) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 3398); else if(var == 11) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 3739); case 10007: if(var == 9) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); } else if(var == 11) { PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); qs.setStatus(QuestStatus.REWARD); updateQuestStatus(player, qs); return true; } return false; } } else if(targetId == 700159 && var == 10) { if (env.getDialogId() == -1 && player.getInventory().getItemCountByItemId(182201025) == 1) { final int targetObjectId = env.getVisibleObject().getObjectId(); PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 1)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.NEUTRALMODE2, 0, targetObjectId), true); ThreadPoolManager.getInstance().schedule(new Runnable(){ @Override public void run() { PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 0)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.START_LOOT, 0, targetObjectId), true); player.getInventory().removeFromBagByItemId(182201025, 1); qs.setQuestVarById(0, qs.getQuestVarById(0) + 1); updateQuestStatus(player, qs); } }, 3000); } return false; } return false; }
public boolean onDialogEvent(QuestEnv env) { final Player player = env.getPlayer(); final QuestState qs = player.getQuestStateList().getQuestState(questId); if(qs == null) return false; int var = qs.getQuestVarById(0); int targetId = 0; if(env.getVisibleObject() instanceof Npc) targetId = ((Npc) env.getVisibleObject()).getNpcId(); if(qs.getStatus() == QuestStatus.REWARD) { if(targetId == 203917) return defaultQuestEndDialog(env); } else if(qs.getStatus() != QuestStatus.START) { return false; } if(targetId == 203917) { switch(env.getDialogId()) { case 25: if(var == 0) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1011); else if(var == 4) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1352); return false; case 10000: if(var == 0) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } case 10001: if(var == 4) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 203992) { switch(env.getDialogId()) { case 25: if(var == 1) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1352); else if(var == 3) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 1693); case 10001: if(var == 1) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } case 10002: if(var == 3) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 700158 && var == 2) { if (env.getDialogId() == -1 && player.getInventory().getItemCountByItemId(182201014) == 1) { final int targetObjectId = env.getVisibleObject().getObjectId(); PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 1)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.NEUTRALMODE2, 0, targetObjectId), true); ThreadPoolManager.getInstance().schedule(new Runnable(){ @Override public void run() { PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 0)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.START_LOOT, 0, targetObjectId), true); player.getInventory().removeFromBagByItemId(182201014, 1); qs.setQuestVarById(0, 3); updateQuestStatus(player, qs); } }, 3000); } return false; } else if(targetId == 203965) { switch(env.getDialogId()) { case 25: if(var == 4) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 2034); case 10003: if(var == 4) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 203968) { switch(env.getDialogId()) { case 25: if(var == 5) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 2375); case 10004: if(var == 5) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 203987) { switch(env.getDialogId()) { case 25: if(var == 6) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 2716); else if(var == 8) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 3057); case 10005: if(var == 6) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); ItemService.addItems(player, Collections.singletonList(new QuestItems(182201024, 1))); return true; } case 10006: if(var == 8) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); ItemService.addItems(player, Collections.singletonList(new QuestItems(182201025, 1))); return true; } return false; } } else if(targetId == 700160 && var == 7) { if (env.getDialogId() == -1 && player.getInventory().getItemCountByItemId(182201024) == 1) { final int targetObjectId = env.getVisibleObject().getObjectId(); PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 1)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.NEUTRALMODE2, 0, targetObjectId), true); ThreadPoolManager.getInstance().schedule(new Runnable(){ @Override public void run() { PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 0)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.START_LOOT, 0, targetObjectId), true); player.getInventory().removeFromBagByItemId(182201024, 1); qs.setQuestVarById(0, qs.getQuestVarById(0) + 1); updateQuestStatus(player, qs); } }, 3000); } return false; } else if(targetId == 203934) { switch(env.getDialogId()) { case 25: if(var == 9) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 3398); else if(var == 11) return sendQuestDialog(player, env.getVisibleObject().getObjectId(), 3739); break; case 10007: if(var == 9) { qs.setQuestVarById(0, var + 1); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } else if(var == 11) { qs.setStatus(QuestStatus.REWARD); updateQuestStatus(player, qs); PacketSendUtility.sendPacket(player, new SM_DIALOG_WINDOW(env.getVisibleObject().getObjectId(), 10)); return true; } return false; } } else if(targetId == 700159 && var == 10) { if (env.getDialogId() == -1 && player.getInventory().getItemCountByItemId(182201025) == 1) { final int targetObjectId = env.getVisibleObject().getObjectId(); PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 1)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.NEUTRALMODE2, 0, targetObjectId), true); ThreadPoolManager.getInstance().schedule(new Runnable(){ @Override public void run() { PacketSendUtility.sendPacket(player, new SM_USE_OBJECT(player.getObjectId(), targetObjectId, 3000, 0)); PacketSendUtility.broadcastPacket(player, new SM_EMOTION(player, EmotionType.START_LOOT, 0, targetObjectId), true); player.getInventory().removeFromBagByItemId(182201025, 1); qs.setQuestVarById(0, 11); updateQuestStatus(player, qs); } }, 3000); } return false; } return false; }
diff --git a/eece210_assn1/src/org/mozilla/javascript/TokenStream.java b/eece210_assn1/src/org/mozilla/javascript/TokenStream.java index 703cb16..6058062 100755 --- a/eece210_assn1/src/org/mozilla/javascript/TokenStream.java +++ b/eece210_assn1/src/org/mozilla/javascript/TokenStream.java @@ -1,754 +1,754 @@ package org.mozilla.javascript; import java.io.*; /** * This class implements the JavaScript scanner. * * It is based on the C source files jsscan.c and jsscan.h * in the jsref package. * * @see org.mozilla.javascript.Parser * * @author Mike McCabe * @author Brendan Eich */ public class TokenStream { /* * For chars - because we need something out-of-range * to check. (And checking EOF by exception is annoying.) * Note distinction from EOF token type! */ private final static int EOF_CHAR = -1; private final static char BYTE_ORDER_MARK = '\uFEFF'; TokenStream(Reader sourceReader, String sourceString, int lineno) { this.lineno = lineno; if (sourceReader != null) { this.sourceReader = sourceReader; this.sourceBuffer = new char[512]; this.sourceEnd = 0; } else { this.sourceString = sourceString; this.sourceEnd = sourceString.length(); } this.sourceCursor = this.cursor = 0; } final String getSourceString() { return sourceString; } final int getLineno() { return lineno; } public final String getString() { return string; } final char getQuoteChar() { return (char) quoteChar; } final double getNumber() { return number; } final boolean isNumberOctal() { return isOctal; } final boolean eof() { return hitEOF; } public final int getToken() throws IOException { int c; retry: for (;;) { // Eat whitespace, possibly sensitive to newlines. for (;;) { c = getChar(); if (c == EOF_CHAR) { tokenBeg = cursor - 1; tokenEnd = cursor; return Token.EOF; } else if (c == '\n') { dirtyLine = false; tokenBeg = cursor - 1; tokenEnd = cursor; return Token.EOL; } else if (!isJSSpace(c)) { if (c != '-') { dirtyLine = true; } break; } } // Assume the token will be 1 char - fixed up below. tokenBeg = cursor - 1; tokenEnd = cursor; // identifier/keyword/instanceof? // watch out for starting with a <backslash> boolean identifierStart; boolean isUnicodeEscapeStart = false; if (c == '\\') { c = getChar(); if (c == 'u') { identifierStart = true; stringBufferTop = 0; } else { identifierStart = false; ungetChar(c); c = '\\'; } } else { identifierStart = Character.isJavaIdentifierStart((char)c); if (identifierStart) { stringBufferTop = 0; addToString(c); } } if (identifierStart) { boolean containsEscape = isUnicodeEscapeStart; for (;;) { if (isUnicodeEscapeStart) { // strictly speaking we should probably push-back // all the bad characters if the <backslash>uXXXX // sequence is malformed. But since there isn't a // correct context(is there?) for a bad Unicode // escape sequence in an identifier, we can report // an error here. int escapeVal = 0; for (int i = 0; i != 4; ++i) { c = getChar(); escapeVal = Kit.xDigitToInt(c, escapeVal); // Next check takes care about c < 0 and bad escape if (escapeVal < 0) { break; } } if (escapeVal < 0) { return Token.ERROR; } addToString(escapeVal); isUnicodeEscapeStart = false; } else { c = getChar(); if (c == '\\') { c = getChar(); if (c == 'u') { isUnicodeEscapeStart = true; containsEscape = true; } else { return Token.ERROR; } } else { if (c == EOF_CHAR || c == BYTE_ORDER_MARK || !Character.isJavaIdentifierPart((char)c)) { break; } addToString(c); } } } ungetChar(c); String str = getStringFromBuffer(); this.string = (String)allStrings.intern(str); return Token.NAME; } // is it a number? if (isDigit(c) || (c == '.' && isDigit(peekChar()))) { isOctal = false; stringBufferTop = 0; int base = 10; while ('0' <= c && c <= '9') { addToString(c); c = getChar(); } boolean isInteger = true; ungetChar(c); String numString = getStringFromBuffer(); this.string = numString; double dval; if (!isInteger) { try { // Use Java conversion to number from string... dval = Double.valueOf(numString).doubleValue(); } catch (NumberFormatException ex) { return Token.ERROR; } } else { dval = stringToNumber(numString, 0, base); } this.number = dval; return Token.NUMBER; } switch (c) { case ';': return Token.SEMI; case '[': return Token.LB; case ']': return Token.RB; case '{': return Token.LC; case '}': return Token.RC; case '(': return Token.LP; case ')': return Token.RP; case ',': return Token.COMMA; case '?': return Token.HOOK; case ':': if (matchChar(':')) { return Token.COLONCOLON; } else { return Token.COLON; } case '.': if (matchChar('.')) { return Token.DOTDOT; } else if (matchChar('(')) { return Token.DOTQUERY; } else { return Token.DOT; } case '|': if (matchChar('|')) { return Token.OR; } else if (matchChar('=')) { return Token.ASSIGN_BITOR; } else { return Token.BITOR; } case '^': if (matchChar('=')) { return Token.ASSIGN_BITXOR; } else { return Token.BITXOR; } case '&': if (matchChar('&')) { return Token.AND; } else if (matchChar('=')) { return Token.ASSIGN_BITAND; } else { return Token.BITAND; } case '=': if (matchChar('=')) { if (matchChar('=')) { return Token.SHEQ; } else { return Token.EQ; } } else { return Token.ASSIGN; } case '!': if (matchChar('=')) { if (matchChar('=')) { return Token.SHNE; } else { return Token.NE; } } else { return Token.NOT; } case '<': if (matchChar('<')) { if (matchChar('=')) { return Token.ASSIGN_LSH; } else { return Token.LSH; } } else { if (matchChar('=')) { return Token.LE; } else { return Token.LT; } } case '>': if (matchChar('>')) { if (matchChar('>')) { if (matchChar('=')) { return Token.ASSIGN_URSH; } else { return Token.URSH; } } else { if (matchChar('=')) { return Token.ASSIGN_RSH; } else { return Token.RSH; } } } else { if (matchChar('=')) { return Token.GE; } else { return Token.GT; } } case '*': if (matchChar('=')) { return Token.ASSIGN_MUL; } else { return Token.MUL; } case '/': if (matchChar('=')) { return Token.ASSIGN_DIV; } else { return Token.DIV; } case '%': if (matchChar('=')) { return Token.ASSIGN_MOD; } else { return Token.MOD; } case '~': return Token.BITNOT; case '+': if (matchChar('=')) { return Token.ASSIGN_ADD; } else if (matchChar('+')) { return Token.INC; } else { return Token.ADD; } case '-': if (matchChar('=')) { c = Token.ASSIGN_SUB; } else if (matchChar('-')) { c = Token.DEC; } else { - c = Token.ADD; + c = Token.SUB; } return c; default: System.out.println("Illegal character"); return Token.ERROR; } } } private static boolean isAlpha(int c) { // Use 'Z' < 'a' if (c <= 'Z') { return 'A' <= c; } else { return 'a' <= c && c <= 'z'; } } static boolean isDigit(int c) { return '0' <= c && c <= '9'; } /* As defined in ECMA. jsscan.c uses C isspace() (which allows * \v, I think.) note that code in getChar() implicitly accepts * '\r' == \u000D as well. */ static boolean isJSSpace(int c) { if (c <= 127) { return c == 0x20 || c == 0x9 || c == 0xC || c == 0xB; } else { return c == 0xA0 || c == BYTE_ORDER_MARK || Character.getType((char)c) == Character.SPACE_SEPARATOR; } } private static boolean isJSFormatChar(int c) { return c > 127 && Character.getType((char)c) == Character.FORMAT; } private String getStringFromBuffer() { tokenEnd = cursor; return new String(stringBuffer, 0, stringBufferTop); } private void addToString(int c) { int N = stringBufferTop; if (N == stringBuffer.length) { char[] tmp = new char[stringBuffer.length * 2]; System.arraycopy(stringBuffer, 0, tmp, 0, N); stringBuffer = tmp; } stringBuffer[N] = (char)c; stringBufferTop = N + 1; } private boolean canUngetChar() { return ungetCursor == 0 || ungetBuffer[ungetCursor - 1] != '\n'; } private void ungetChar(int c) { // can not unread past across line boundary if (ungetCursor != 0 && ungetBuffer[ungetCursor - 1] == '\n') Kit.codeBug(); ungetBuffer[ungetCursor++] = c; cursor--; } private boolean matchChar(int test) throws IOException { int c = getCharIgnoreLineEnd(); if (c == test) { tokenEnd = cursor; return true; } else { ungetCharIgnoreLineEnd(c); return false; } } private int peekChar() throws IOException { int c = getChar(); ungetChar(c); return c; } private int getChar() throws IOException { if (ungetCursor != 0) { cursor++; return ungetBuffer[--ungetCursor]; } for(;;) { int c; if (sourceString != null) { if (sourceCursor == sourceEnd) { hitEOF = true; return EOF_CHAR; } cursor++; c = sourceString.charAt(sourceCursor++); } else { if (sourceCursor == sourceEnd) { if (!fillSourceBuffer()) { hitEOF = true; return EOF_CHAR; } } cursor++; c = sourceBuffer[sourceCursor++]; } if (lineEndChar >= 0) { if (lineEndChar == '\r' && c == '\n') { lineEndChar = '\n'; continue; } lineEndChar = -1; lineStart = sourceCursor - 1; lineno++; } if (c <= 127) { if (c == '\n' || c == '\r') { lineEndChar = c; c = '\n'; } } else { if (c == BYTE_ORDER_MARK) return c; // BOM is considered whitespace if (isJSFormatChar(c)) { continue; } if (isJSLineTerminator(c)) { lineEndChar = c; c = '\n'; } } return c; } } private int getCharIgnoreLineEnd() throws IOException { if (ungetCursor != 0) { cursor++; return ungetBuffer[--ungetCursor]; } for(;;) { int c; if (sourceString != null) { if (sourceCursor == sourceEnd) { hitEOF = true; return EOF_CHAR; } cursor++; c = sourceString.charAt(sourceCursor++); } else { if (sourceCursor == sourceEnd) { if (!fillSourceBuffer()) { hitEOF = true; return EOF_CHAR; } } cursor++; c = sourceBuffer[sourceCursor++]; } if (c <= 127) { if (c == '\n' || c == '\r') { lineEndChar = c; c = '\n'; } } else { if (c == BYTE_ORDER_MARK) return c; // BOM is considered whitespace if (isJSFormatChar(c)) { continue; } if (isJSLineTerminator(c)) { lineEndChar = c; c = '\n'; } } return c; } } private void ungetCharIgnoreLineEnd(int c) { ungetBuffer[ungetCursor++] = c; cursor--; } private void skipLine() throws IOException { // skip to end of line int c; while ((c = getChar()) != EOF_CHAR && c != '\n') { } ungetChar(c); tokenEnd = cursor; } /** * Returns the offset into the current line. */ final int getOffset() { int n = sourceCursor - lineStart; if (lineEndChar >= 0) { --n; } return n; } final String getLine() { if (sourceString != null) { // String case int lineEnd = sourceCursor; if (lineEndChar >= 0) { --lineEnd; } else { for (; lineEnd != sourceEnd; ++lineEnd) { int c = sourceString.charAt(lineEnd); if (isJSLineTerminator(c)) { break; } } } return sourceString.substring(lineStart, lineEnd); } else { // Reader case int lineLength = sourceCursor - lineStart; if (lineEndChar >= 0) { --lineLength; } else { // Read until the end of line for (;; ++lineLength) { int i = lineStart + lineLength; if (i == sourceEnd) { try { if (!fillSourceBuffer()) { break; } } catch (IOException ioe) { // ignore it, we're already displaying an error... break; } // i recalculuation as fillSourceBuffer can move saved // line buffer and change lineStart i = lineStart + lineLength; } int c = sourceBuffer[i]; if (isJSLineTerminator(c)) { break; } } } return new String(sourceBuffer, lineStart, lineLength); } } private boolean fillSourceBuffer() throws IOException { if (sourceString != null) Kit.codeBug(); if (sourceEnd == sourceBuffer.length) { if (lineStart != 0) { System.arraycopy(sourceBuffer, lineStart, sourceBuffer, 0, sourceEnd - lineStart); sourceEnd -= lineStart; sourceCursor -= lineStart; lineStart = 0; } else { char[] tmp = new char[sourceBuffer.length * 2]; System.arraycopy(sourceBuffer, 0, tmp, 0, sourceEnd); sourceBuffer = tmp; } } int n = sourceReader.read(sourceBuffer, sourceEnd, sourceBuffer.length - sourceEnd); if (n < 0) { return false; } sourceEnd += n; return true; } /** * Return the current position of the scanner cursor. */ public int getCursor() { return cursor; } /** * Return the absolute source offset of the last scanned token. */ public int getTokenBeg() { return tokenBeg; } /** * Return the absolute source end-offset of the last scanned token. */ public int getTokenEnd() { return tokenEnd; } /** * Return tokenEnd - tokenBeg */ public int getTokenLength() { return tokenEnd - tokenBeg; } static double stringToNumber(String s, int start, int radix) { char digitMax = '9'; int len = s.length(); int end; double sum = 0.0; for (end=start; end < len; end++) { char c = s.charAt(end); int newDigit; if ('0' <= c && c < digitMax) newDigit = c - '0'; else break; sum = sum*radix + newDigit; } if (start == end) { return NaN; } if (sum >= 9007199254740992.0) { if (radix == 10) { /* If we're accumulating a decimal number and the number * is >= 2^53, then the result from the repeated multiply-add * above may be inaccurate. Call Java to get the correct * answer. */ try { return Double.valueOf(s.substring(start, end)).doubleValue(); } catch (NumberFormatException nfe) { return NaN; } } } return sum; } public static boolean isJSLineTerminator(int c) { // Optimization for faster check for eol character: // they do not have 0xDFD0 bits set if ((c & 0xDFD0) != 0) { return false; } return c == '\n' || c == '\r' || c == 0x2028 || c == 0x2029; } // stuff other than whitespace since start of line private boolean dirtyLine; // Set this to an initial non-null value so that the Parser has // something to retrieve even if an error has occurred and no // string is found. Fosters one class of error, but saves lots of // code. private String string = ""; private double number; private boolean isOctal; // delimiter for last string literal scanned private int quoteChar; private char[] stringBuffer = new char[128]; private int stringBufferTop; private ObjToIntMap allStrings = new ObjToIntMap(50); // Room to backtrace from to < on failed match of the last - in <!-- private final int[] ungetBuffer = new int[3]; private int ungetCursor; private boolean hitEOF = false; private int lineStart = 0; private int lineEndChar = -1; int lineno; private String sourceString; private Reader sourceReader; private char[] sourceBuffer; private int sourceEnd; // sourceCursor is an index into a small buffer that keeps a // sliding window of the source stream. int sourceCursor; // cursor is a monotonically increasing index into the original // source stream, tracking exactly how far scanning has progressed. // Its value is the index of the next character to be scanned. int cursor; // Record start and end positions of last scanned token. int tokenBeg; int tokenEnd; public static final double NaN = Double.longBitsToDouble(0x7ff8000000000000L); }
true
true
public final int getToken() throws IOException { int c; retry: for (;;) { // Eat whitespace, possibly sensitive to newlines. for (;;) { c = getChar(); if (c == EOF_CHAR) { tokenBeg = cursor - 1; tokenEnd = cursor; return Token.EOF; } else if (c == '\n') { dirtyLine = false; tokenBeg = cursor - 1; tokenEnd = cursor; return Token.EOL; } else if (!isJSSpace(c)) { if (c != '-') { dirtyLine = true; } break; } } // Assume the token will be 1 char - fixed up below. tokenBeg = cursor - 1; tokenEnd = cursor; // identifier/keyword/instanceof? // watch out for starting with a <backslash> boolean identifierStart; boolean isUnicodeEscapeStart = false; if (c == '\\') { c = getChar(); if (c == 'u') { identifierStart = true; stringBufferTop = 0; } else { identifierStart = false; ungetChar(c); c = '\\'; } } else { identifierStart = Character.isJavaIdentifierStart((char)c); if (identifierStart) { stringBufferTop = 0; addToString(c); } } if (identifierStart) { boolean containsEscape = isUnicodeEscapeStart; for (;;) { if (isUnicodeEscapeStart) { // strictly speaking we should probably push-back // all the bad characters if the <backslash>uXXXX // sequence is malformed. But since there isn't a // correct context(is there?) for a bad Unicode // escape sequence in an identifier, we can report // an error here. int escapeVal = 0; for (int i = 0; i != 4; ++i) { c = getChar(); escapeVal = Kit.xDigitToInt(c, escapeVal); // Next check takes care about c < 0 and bad escape if (escapeVal < 0) { break; } } if (escapeVal < 0) { return Token.ERROR; } addToString(escapeVal); isUnicodeEscapeStart = false; } else { c = getChar(); if (c == '\\') { c = getChar(); if (c == 'u') { isUnicodeEscapeStart = true; containsEscape = true; } else { return Token.ERROR; } } else { if (c == EOF_CHAR || c == BYTE_ORDER_MARK || !Character.isJavaIdentifierPart((char)c)) { break; } addToString(c); } } } ungetChar(c); String str = getStringFromBuffer(); this.string = (String)allStrings.intern(str); return Token.NAME; } // is it a number? if (isDigit(c) || (c == '.' && isDigit(peekChar()))) { isOctal = false; stringBufferTop = 0; int base = 10; while ('0' <= c && c <= '9') { addToString(c); c = getChar(); } boolean isInteger = true; ungetChar(c); String numString = getStringFromBuffer(); this.string = numString; double dval; if (!isInteger) { try { // Use Java conversion to number from string... dval = Double.valueOf(numString).doubleValue(); } catch (NumberFormatException ex) { return Token.ERROR; } } else { dval = stringToNumber(numString, 0, base); } this.number = dval; return Token.NUMBER; } switch (c) { case ';': return Token.SEMI; case '[': return Token.LB; case ']': return Token.RB; case '{': return Token.LC; case '}': return Token.RC; case '(': return Token.LP; case ')': return Token.RP; case ',': return Token.COMMA; case '?': return Token.HOOK; case ':': if (matchChar(':')) { return Token.COLONCOLON; } else { return Token.COLON; } case '.': if (matchChar('.')) { return Token.DOTDOT; } else if (matchChar('(')) { return Token.DOTQUERY; } else { return Token.DOT; } case '|': if (matchChar('|')) { return Token.OR; } else if (matchChar('=')) { return Token.ASSIGN_BITOR; } else { return Token.BITOR; } case '^': if (matchChar('=')) { return Token.ASSIGN_BITXOR; } else { return Token.BITXOR; } case '&': if (matchChar('&')) { return Token.AND; } else if (matchChar('=')) { return Token.ASSIGN_BITAND; } else { return Token.BITAND; } case '=': if (matchChar('=')) { if (matchChar('=')) { return Token.SHEQ; } else { return Token.EQ; } } else { return Token.ASSIGN; } case '!': if (matchChar('=')) { if (matchChar('=')) { return Token.SHNE; } else { return Token.NE; } } else { return Token.NOT; } case '<': if (matchChar('<')) { if (matchChar('=')) { return Token.ASSIGN_LSH; } else { return Token.LSH; } } else { if (matchChar('=')) { return Token.LE; } else { return Token.LT; } } case '>': if (matchChar('>')) { if (matchChar('>')) { if (matchChar('=')) { return Token.ASSIGN_URSH; } else { return Token.URSH; } } else { if (matchChar('=')) { return Token.ASSIGN_RSH; } else { return Token.RSH; } } } else { if (matchChar('=')) { return Token.GE; } else { return Token.GT; } } case '*': if (matchChar('=')) { return Token.ASSIGN_MUL; } else { return Token.MUL; } case '/': if (matchChar('=')) { return Token.ASSIGN_DIV; } else { return Token.DIV; } case '%': if (matchChar('=')) { return Token.ASSIGN_MOD; } else { return Token.MOD; } case '~': return Token.BITNOT; case '+': if (matchChar('=')) { return Token.ASSIGN_ADD; } else if (matchChar('+')) { return Token.INC; } else { return Token.ADD; } case '-': if (matchChar('=')) { c = Token.ASSIGN_SUB; } else if (matchChar('-')) { c = Token.DEC; } else { c = Token.ADD; } return c; default: System.out.println("Illegal character"); return Token.ERROR; } } }
public final int getToken() throws IOException { int c; retry: for (;;) { // Eat whitespace, possibly sensitive to newlines. for (;;) { c = getChar(); if (c == EOF_CHAR) { tokenBeg = cursor - 1; tokenEnd = cursor; return Token.EOF; } else if (c == '\n') { dirtyLine = false; tokenBeg = cursor - 1; tokenEnd = cursor; return Token.EOL; } else if (!isJSSpace(c)) { if (c != '-') { dirtyLine = true; } break; } } // Assume the token will be 1 char - fixed up below. tokenBeg = cursor - 1; tokenEnd = cursor; // identifier/keyword/instanceof? // watch out for starting with a <backslash> boolean identifierStart; boolean isUnicodeEscapeStart = false; if (c == '\\') { c = getChar(); if (c == 'u') { identifierStart = true; stringBufferTop = 0; } else { identifierStart = false; ungetChar(c); c = '\\'; } } else { identifierStart = Character.isJavaIdentifierStart((char)c); if (identifierStart) { stringBufferTop = 0; addToString(c); } } if (identifierStart) { boolean containsEscape = isUnicodeEscapeStart; for (;;) { if (isUnicodeEscapeStart) { // strictly speaking we should probably push-back // all the bad characters if the <backslash>uXXXX // sequence is malformed. But since there isn't a // correct context(is there?) for a bad Unicode // escape sequence in an identifier, we can report // an error here. int escapeVal = 0; for (int i = 0; i != 4; ++i) { c = getChar(); escapeVal = Kit.xDigitToInt(c, escapeVal); // Next check takes care about c < 0 and bad escape if (escapeVal < 0) { break; } } if (escapeVal < 0) { return Token.ERROR; } addToString(escapeVal); isUnicodeEscapeStart = false; } else { c = getChar(); if (c == '\\') { c = getChar(); if (c == 'u') { isUnicodeEscapeStart = true; containsEscape = true; } else { return Token.ERROR; } } else { if (c == EOF_CHAR || c == BYTE_ORDER_MARK || !Character.isJavaIdentifierPart((char)c)) { break; } addToString(c); } } } ungetChar(c); String str = getStringFromBuffer(); this.string = (String)allStrings.intern(str); return Token.NAME; } // is it a number? if (isDigit(c) || (c == '.' && isDigit(peekChar()))) { isOctal = false; stringBufferTop = 0; int base = 10; while ('0' <= c && c <= '9') { addToString(c); c = getChar(); } boolean isInteger = true; ungetChar(c); String numString = getStringFromBuffer(); this.string = numString; double dval; if (!isInteger) { try { // Use Java conversion to number from string... dval = Double.valueOf(numString).doubleValue(); } catch (NumberFormatException ex) { return Token.ERROR; } } else { dval = stringToNumber(numString, 0, base); } this.number = dval; return Token.NUMBER; } switch (c) { case ';': return Token.SEMI; case '[': return Token.LB; case ']': return Token.RB; case '{': return Token.LC; case '}': return Token.RC; case '(': return Token.LP; case ')': return Token.RP; case ',': return Token.COMMA; case '?': return Token.HOOK; case ':': if (matchChar(':')) { return Token.COLONCOLON; } else { return Token.COLON; } case '.': if (matchChar('.')) { return Token.DOTDOT; } else if (matchChar('(')) { return Token.DOTQUERY; } else { return Token.DOT; } case '|': if (matchChar('|')) { return Token.OR; } else if (matchChar('=')) { return Token.ASSIGN_BITOR; } else { return Token.BITOR; } case '^': if (matchChar('=')) { return Token.ASSIGN_BITXOR; } else { return Token.BITXOR; } case '&': if (matchChar('&')) { return Token.AND; } else if (matchChar('=')) { return Token.ASSIGN_BITAND; } else { return Token.BITAND; } case '=': if (matchChar('=')) { if (matchChar('=')) { return Token.SHEQ; } else { return Token.EQ; } } else { return Token.ASSIGN; } case '!': if (matchChar('=')) { if (matchChar('=')) { return Token.SHNE; } else { return Token.NE; } } else { return Token.NOT; } case '<': if (matchChar('<')) { if (matchChar('=')) { return Token.ASSIGN_LSH; } else { return Token.LSH; } } else { if (matchChar('=')) { return Token.LE; } else { return Token.LT; } } case '>': if (matchChar('>')) { if (matchChar('>')) { if (matchChar('=')) { return Token.ASSIGN_URSH; } else { return Token.URSH; } } else { if (matchChar('=')) { return Token.ASSIGN_RSH; } else { return Token.RSH; } } } else { if (matchChar('=')) { return Token.GE; } else { return Token.GT; } } case '*': if (matchChar('=')) { return Token.ASSIGN_MUL; } else { return Token.MUL; } case '/': if (matchChar('=')) { return Token.ASSIGN_DIV; } else { return Token.DIV; } case '%': if (matchChar('=')) { return Token.ASSIGN_MOD; } else { return Token.MOD; } case '~': return Token.BITNOT; case '+': if (matchChar('=')) { return Token.ASSIGN_ADD; } else if (matchChar('+')) { return Token.INC; } else { return Token.ADD; } case '-': if (matchChar('=')) { c = Token.ASSIGN_SUB; } else if (matchChar('-')) { c = Token.DEC; } else { c = Token.SUB; } return c; default: System.out.println("Illegal character"); return Token.ERROR; } } }
diff --git a/src/plugins/WebOfTrust/WebOfTrust.java b/src/plugins/WebOfTrust/WebOfTrust.java index b47479cf..7fc135e4 100644 --- a/src/plugins/WebOfTrust/WebOfTrust.java +++ b/src/plugins/WebOfTrust/WebOfTrust.java @@ -1,3242 +1,3244 @@ /* This code is part of WoT, a plugin for Freenet. It is distributed * under the GNU General Public License, version 2 (or at your option * any later version). See http://www.gnu.org/ for details of the GPL. */ package plugins.WebOfTrust; import java.io.File; import java.io.IOException; import java.lang.reflect.Field; import java.net.MalformedURLException; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.Random; import plugins.WebOfTrust.Identity.FetchState; import plugins.WebOfTrust.Identity.IdentityID; import plugins.WebOfTrust.Score.ScoreID; import plugins.WebOfTrust.Trust.TrustID; import plugins.WebOfTrust.exceptions.DuplicateIdentityException; import plugins.WebOfTrust.exceptions.DuplicateScoreException; import plugins.WebOfTrust.exceptions.DuplicateTrustException; import plugins.WebOfTrust.exceptions.InvalidParameterException; import plugins.WebOfTrust.exceptions.NotInTrustTreeException; import plugins.WebOfTrust.exceptions.NotTrustedException; import plugins.WebOfTrust.exceptions.UnknownIdentityException; import plugins.WebOfTrust.introduction.IntroductionClient; import plugins.WebOfTrust.introduction.IntroductionPuzzle; import plugins.WebOfTrust.introduction.IntroductionPuzzleStore; import plugins.WebOfTrust.introduction.IntroductionServer; import plugins.WebOfTrust.introduction.OwnIntroductionPuzzle; import plugins.WebOfTrust.ui.fcp.FCPInterface; import plugins.WebOfTrust.ui.web.WebInterface; import com.db4o.Db4o; import com.db4o.ObjectContainer; import com.db4o.ObjectSet; import com.db4o.defragment.Defragment; import com.db4o.defragment.DefragmentConfig; import com.db4o.ext.ExtObjectContainer; import com.db4o.query.Query; import com.db4o.reflect.jdk.JdkReflector; import freenet.keys.FreenetURI; import freenet.keys.USK; import freenet.l10n.BaseL10n; import freenet.l10n.BaseL10n.LANGUAGE; import freenet.l10n.PluginL10n; import freenet.node.RequestClient; import freenet.pluginmanager.FredPlugin; import freenet.pluginmanager.FredPluginBaseL10n; import freenet.pluginmanager.FredPluginFCP; import freenet.pluginmanager.FredPluginL10n; import freenet.pluginmanager.FredPluginRealVersioned; import freenet.pluginmanager.FredPluginThreadless; import freenet.pluginmanager.FredPluginVersioned; import freenet.pluginmanager.PluginReplySender; import freenet.pluginmanager.PluginRespirator; import freenet.support.CurrentTimeUTC; import freenet.support.Logger; import freenet.support.Logger.LogLevel; import freenet.support.SimpleFieldSet; import freenet.support.SizeUtil; import freenet.support.api.Bucket; import freenet.support.io.FileUtil; /** * A web of trust plugin based on Freenet. * * @author xor ([email protected]), Julien Cornuwel ([email protected]) */ public class WebOfTrust implements FredPlugin, FredPluginThreadless, FredPluginFCP, FredPluginVersioned, FredPluginRealVersioned, FredPluginL10n, FredPluginBaseL10n { /* Constants */ public static final boolean FAST_DEBUG_MODE = false; /** The relative path of the plugin on Freenet's web interface */ public static final String SELF_URI = "/WebOfTrust"; /** Package-private method to allow unit tests to bypass some assert()s */ /** * The "name" of this web of trust. It is included in the document name of identity URIs. For an example, see the SEED_IDENTITIES * constant below. The purpose of this constant is to allow anyone to create his own custom web of trust which is completely disconnected * from the "official" web of trust of the Freenet project. It is also used as the session cookie namespace. */ public static final String WOT_NAME = "WebOfTrust"; public static final String DATABASE_FILENAME = WOT_NAME + ".db4o"; public static final int DATABASE_FORMAT_VERSION = 2; /** * The official seed identities of the WoT plugin: If a newbie wants to download the whole offficial web of trust, he needs at least one * trust list from an identity which is well-connected to the web of trust. To prevent newbies from having to add this identity manually, * the Freenet development team provides a list of seed identities - each of them is one of the developers. */ private static final String[] SEED_IDENTITIES = new String[] { "USK@QeTBVWTwBldfI-lrF~xf0nqFVDdQoSUghT~PvhyJ1NE,OjEywGD063La2H-IihD7iYtZm3rC0BP6UTvvwyF5Zh4,AQACAAE/WebOfTrust/1344", // xor "USK@z9dv7wqsxIBCiFLW7VijMGXD9Gl-EXAqBAwzQ4aq26s,4Uvc~Fjw3i9toGeQuBkDARUV5mF7OTKoAhqOA9LpNdo,AQACAAE/WebOfTrust/1270", // Toad "USK@o2~q8EMoBkCNEgzLUL97hLPdddco9ix1oAnEa~VzZtg,X~vTpL2LSyKvwQoYBx~eleI2RF6QzYJpzuenfcKDKBM,AQACAAE/WebOfTrust/9379", // Bombe // "USK@cI~w2hrvvyUa1E6PhJ9j5cCoG1xmxSooi7Nez4V2Gd4,A3ArC3rrJBHgAJV~LlwY9kgxM8kUR2pVYXbhGFtid78,AQACAAE/WebOfTrust/19", // TheSeeker. Disabled because he is using LCWoT and it does not support identity introduction ATM. "USK@D3MrAR-AVMqKJRjXnpKW2guW9z1mw5GZ9BB15mYVkVc,xgddjFHx2S~5U6PeFkwqO5V~1gZngFLoM-xaoMKSBI8,AQACAAE/WebOfTrust/4959", // zidel }; /* References from the node */ /** The node's interface to connect the plugin with the node, needed for retrieval of all other interfaces */ private PluginRespirator mPR; private static PluginL10n l10n; /* References from the plugin itself */ /* Database & configuration of the plugin */ private ExtObjectContainer mDB; private Configuration mConfig; private IntroductionPuzzleStore mPuzzleStore; /** Used for exporting identities, identity introductions and introduction puzzles to XML and importing them from XML. */ private XMLTransformer mXMLTransformer; private RequestClient mRequestClient; /* Worker objects which actually run the plugin */ /** * Periodically wakes up and inserts any OwnIdentity which needs to be inserted. */ private IdentityInserter mInserter; /** * Fetches identities when it is told to do so by the plugin: * - At startup, all known identities are fetched * - When a new identity is received from a trust list it is fetched * - When a new identity is received by the IntrouductionServer it is fetched * - When an identity is manually added it is also fetched. * - ... */ private IdentityFetcher mFetcher; /** * Uploads captchas belonging to our own identities which others can solve to get on the trust list of them. Checks whether someone * uploaded solutions for them periodically and adds the new identities if a solution is received. */ private IntroductionServer mIntroductionServer; /** * Downloads captchas which the user can solve to announce his identities on other people's trust lists, provides the interface for * the UI to obtain the captchas and enter solutions. Uploads the solutions if the UI enters them. */ private IntroductionClient mIntroductionClient; /* Actual data of the WoT */ private boolean mFullScoreComputationNeeded = false; private boolean mTrustListImportInProgress = false; /* User interfaces */ private WebInterface mWebInterface; private FCPInterface mFCPInterface; /* Statistics */ private int mFullScoreRecomputationCount = 0; private long mFullScoreRecomputationMilliseconds = 0; private int mIncrementalScoreRecomputationCount = 0; private long mIncrementalScoreRecomputationMilliseconds = 0; /* These booleans are used for preventing the construction of log-strings if logging is disabled (for saving some cpu cycles) */ private static transient volatile boolean logDEBUG = false; private static transient volatile boolean logMINOR = false; static { Logger.registerClass(WebOfTrust.class); } public void runPlugin(PluginRespirator myPR) { try { Logger.normal(this, "Web Of Trust plugin version " + Version.getMarketingVersion() + " starting up..."); /* Catpcha generation needs headless mode on linux */ System.setProperty("java.awt.headless", "true"); mPR = myPR; /* TODO: This can be used for clean copies of the database to get rid of corrupted internal db4o structures. /* We should provide an option on the web interface to run this once during next startup and switch to the cloned database */ // cloneDatabase(new File(getUserDataDirectory(), DATABASE_FILENAME), new File(getUserDataDirectory(), DATABASE_FILENAME + ".clone")); mDB = openDatabase(new File(getUserDataDirectory(), DATABASE_FILENAME)); mConfig = getOrCreateConfig(); if(mConfig.getDatabaseFormatVersion() > WebOfTrust.DATABASE_FORMAT_VERSION) throw new RuntimeException("The WoT plugin's database format is newer than the WoT plugin which is being used."); mPuzzleStore = new IntroductionPuzzleStore(this); upgradeDB(); // Please ensure that no threads are using the IntroductionPuzzleStore / IdentityFetcher while this is executing. mXMLTransformer = new XMLTransformer(this); mRequestClient = new RequestClient() { public boolean persistent() { return false; } public void removeFrom(ObjectContainer container) { throw new UnsupportedOperationException(); } public boolean realTimeFlag() { return false; } }; mInserter = new IdentityInserter(this); mFetcher = new IdentityFetcher(this, getPluginRespirator()); // We only do this if debug logging is enabled since the integrity verification cannot repair anything anyway, // if the user does not read his logs there is no need to check the integrity. // TODO: Do this once every few startups and notify the user in the web ui if errors are found. if(logDEBUG) verifyDatabaseIntegrity(); // TODO: Only do this once every few startups once we are certain that score computation does not have any serious bugs. verifyAndCorrectStoredScores(); // Database is up now, integrity is checked. We can start to actually do stuff // TODO: This can be used for doing backups. Implement auto backup, maybe once a week or month //backupDatabase(new File(getUserDataDirectory(), DATABASE_FILENAME + ".backup")); createSeedIdentities(); Logger.normal(this, "Starting fetches of all identities..."); synchronized(this) { synchronized(mFetcher) { for(Identity identity : getAllIdentities()) { if(shouldFetchIdentity(identity)) { try { mFetcher.fetch(identity.getID()); } catch(Exception e) { Logger.error(this, "Fetching identity failed!", e); } } } } } mInserter.start(); mIntroductionServer = new IntroductionServer(this, mFetcher); mIntroductionServer.start(); mIntroductionClient = new IntroductionClient(this); mIntroductionClient.start(); mWebInterface = new WebInterface(this, SELF_URI); mFCPInterface = new FCPInterface(this); Logger.normal(this, "Web Of Trust plugin starting up completed."); } catch(RuntimeException e){ Logger.error(this, "Error during startup", e); /* We call it so the database is properly closed */ terminate(); throw e; } } /** * Constructor for being used by the node and unit tests. Does not do anything. */ public WebOfTrust() { } /** * Constructor which does not generate an IdentityFetcher, IdentityInster, IntroductionPuzzleStore, user interface, etc. * For use by the unit tests to be able to run WoT without a node. * @param databaseFilename The filename of the database. */ public WebOfTrust(String databaseFilename) { mDB = openDatabase(new File(databaseFilename)); mConfig = getOrCreateConfig(); if(mConfig.getDatabaseFormatVersion() != WebOfTrust.DATABASE_FORMAT_VERSION) throw new RuntimeException("Database format version mismatch. Found: " + mConfig.getDatabaseFormatVersion() + "; expected: " + WebOfTrust.DATABASE_FORMAT_VERSION); mPuzzleStore = new IntroductionPuzzleStore(this); mFetcher = new IdentityFetcher(this, null); } private File getUserDataDirectory() { final File wotDirectory = new File(mPR.getNode().getUserDir(), WOT_NAME); if(!wotDirectory.exists() && !wotDirectory.mkdir()) throw new RuntimeException("Unable to create directory " + wotDirectory); return wotDirectory; } private com.db4o.config.Configuration getNewDatabaseConfiguration() { com.db4o.config.Configuration cfg = Db4o.newConfiguration(); // Required config options: cfg.reflectWith(new JdkReflector(getPluginClassLoader())); // TODO: Optimization: We do explicit activation everywhere. We could change this to 0 and test whether everything still works. // Ideally, we would benchmark both 0 and 1 and make it configurable. cfg.activationDepth(1); cfg.updateDepth(1); // This must not be changed: We only activate(this, 1) before store(this). Logger.normal(this, "Default activation depth: " + cfg.activationDepth()); cfg.exceptionsOnNotStorable(true); // The shutdown hook does auto-commit. We do NOT want auto-commit: if a transaction hasn't commit()ed, it's not safe to commit it. cfg.automaticShutDown(false); // Performance config options: cfg.callbacks(false); // We don't use callbacks yet. TODO: Investigate whether we might want to use them cfg.classActivationDepthConfigurable(false); // Registration of indices (also performance) // ATTENTION: Also update cloneDatabase() when adding new classes! @SuppressWarnings("unchecked") final Class<? extends Persistent>[] persistentClasses = new Class[] { Configuration.class, Identity.class, OwnIdentity.class, Trust.class, Score.class, IdentityFetcher.IdentityFetcherCommand.class, IdentityFetcher.AbortFetchCommand.class, IdentityFetcher.StartFetchCommand.class, IdentityFetcher.UpdateEditionHintCommand.class, IntroductionPuzzle.class, OwnIntroductionPuzzle.class }; for(Class<? extends Persistent> clazz : persistentClasses) { boolean classHasIndex = clazz.getAnnotation(Persistent.IndexedClass.class) != null; // TODO: We enable class indexes for all classes to make sure nothing breaks because it is the db4o default, check whether enabling // them only for the classes where we need them does not cause any harm. classHasIndex = true; if(logDEBUG) Logger.debug(this, "Persistent class: " + clazz.getCanonicalName() + "; hasIndex==" + classHasIndex); // TODO: Make very sure that it has no negative side effects if we disable class indices for some classes // Maybe benchmark in comparison to a database which has class indices enabled for ALL classes. cfg.objectClass(clazz).indexed(classHasIndex); // Check the class' fields for @IndexedField annotations for(Field field : clazz.getDeclaredFields()) { if(field.getAnnotation(Persistent.IndexedField.class) != null) { if(logDEBUG) Logger.debug(this, "Registering indexed field " + clazz.getCanonicalName() + '.' + field.getName()); cfg.objectClass(clazz).objectField(field.getName()).indexed(true); } } // Check whether the class itself has an @IndexedField annotation final Persistent.IndexedField annotation = clazz.getAnnotation(Persistent.IndexedField.class); if(annotation != null) { for(String fieldName : annotation.names()) { if(logDEBUG) Logger.debug(this, "Registering indexed field " + clazz.getCanonicalName() + '.' + fieldName); cfg.objectClass(clazz).objectField(fieldName).indexed(true); } } } // TODO: We should check whether db4o inherits the indexed attribute to child classes, for example for this one: // Unforunately, db4o does not provide any way to query the indexed() property of fields, you can only set it // We might figure out whether inheritance works by writing a benchmark. return cfg; } private synchronized void restoreDatabaseBackup(File databaseFile, File backupFile) throws IOException { Logger.warning(this, "Trying to restore database backup: " + backupFile.getAbsolutePath()); if(mDB != null) throw new RuntimeException("Database is opened already!"); if(backupFile.exists()) { try { FileUtil.secureDelete(databaseFile, mPR.getNode().fastWeakRandom); } catch(IOException e) { Logger.warning(this, "Deleting of the database failed: " + databaseFile.getAbsolutePath()); } if(backupFile.renameTo(databaseFile)) { Logger.warning(this, "Backup restored!"); } else { throw new IOException("Unable to rename backup file back to database file: " + databaseFile.getAbsolutePath()); } } else { throw new IOException("Cannot restore backup, it does not exist!"); } } private synchronized void defragmentDatabase(File databaseFile) throws IOException { Logger.normal(this, "Defragmenting database ..."); if(mDB != null) throw new RuntimeException("Database is opened already!"); if(mPR == null) { Logger.normal(this, "No PluginRespirator found, probably running as unit test, not defragmenting."); return; } final Random random = mPR.getNode().fastWeakRandom; // Open it first, because defrag will throw if it needs to upgrade the file. { final ObjectContainer database = Db4o.openFile(getNewDatabaseConfiguration(), databaseFile.getAbsolutePath()); // Db4o will throw during defragmentation if new fields were added to classes and we didn't initialize their values on existing // objects before defragmenting. So we just don't defragment if the database format version has changed. final boolean canDefragment = peekDatabaseFormatVersion(this, database.ext()) == WebOfTrust.DATABASE_FORMAT_VERSION; while(!database.close()); if(!canDefragment) { Logger.normal(this, "Not defragmenting, database format version changed!"); return; } if(!databaseFile.exists()) { Logger.error(this, "Database file does not exist after openFile: " + databaseFile.getAbsolutePath()); return; } } final File backupFile = new File(databaseFile.getAbsolutePath() + ".backup"); if(backupFile.exists()) { Logger.error(this, "Not defragmenting database: Backup file exists, maybe the node was shot during defrag: " + backupFile.getAbsolutePath()); return; } final File tmpFile = new File(databaseFile.getAbsolutePath() + ".temp"); FileUtil.secureDelete(tmpFile, random); /* As opposed to the default, BTreeIDMapping uses an on-disk file instead of in-memory for mapping IDs. /* Reduces memory usage during defragmentation while being slower. /* However as of db4o 7.4.63.11890, it is bugged and prevents defragmentation from succeeding for my database, so we don't use it for now. */ final DefragmentConfig config = new DefragmentConfig(databaseFile.getAbsolutePath(), backupFile.getAbsolutePath() // ,new BTreeIDMapping(tmpFile.getAbsolutePath()) ); /* Delete classes which are not known to the classloader anymore - We do NOT do this because: /* - It is buggy and causes exceptions often as of db4o 7.4.63.11890 /* - WOT has always had proper database upgrade code (function upgradeDB()) and does not rely on automatic schema evolution. /* If we need to get rid of certain objects we should do it in the database upgrade code, */ // config.storedClassFilter(new AvailableClassFilter()); config.db4oConfig(getNewDatabaseConfiguration()); try { Defragment.defrag(config); } catch (Exception e) { Logger.error(this, "Defragment failed", e); try { restoreDatabaseBackup(databaseFile, backupFile); return; } catch(IOException e2) { Logger.error(this, "Unable to restore backup", e2); throw new IOException(e); } } final long oldSize = backupFile.length(); final long newSize = databaseFile.length(); if(newSize <= 0) { Logger.error(this, "Defrag produced an empty file! Trying to restore old database file..."); databaseFile.delete(); try { restoreDatabaseBackup(databaseFile, backupFile); } catch(IOException e2) { Logger.error(this, "Unable to restore backup", e2); throw new IOException(e2); } } else { final double change = 100.0 * (((double)(oldSize - newSize)) / ((double)oldSize)); FileUtil.secureDelete(tmpFile, random); FileUtil.secureDelete(backupFile, random); Logger.normal(this, "Defragment completed. "+SizeUtil.formatSize(oldSize)+" ("+oldSize+") -> " +SizeUtil.formatSize(newSize)+" ("+newSize+") ("+(int)change+"% shrink)"); } } /** * ATTENTION: This function is duplicated in the Freetalk plugin, please backport any changes. * * Initializes the plugin's db4o database. */ private synchronized ExtObjectContainer openDatabase(File file) { Logger.normal(this, "Opening database using db4o " + Db4o.version()); if(mDB != null) throw new RuntimeException("Database is opened already!"); try { defragmentDatabase(file); } catch (IOException e) { throw new RuntimeException(e); } return Db4o.openFile(getNewDatabaseConfiguration(), file.getAbsolutePath()).ext(); } /** * ATTENTION: Please ensure that no threads are using the IntroductionPuzzleStore / IdentityFetcher while this is executing. * It doesn't synchronize on the IntroductionPuzzleStore and IdentityFetcher because it assumes that they are not being used yet. * (I didn't upgrade this function to do the locking because it would be much work to test the changes for little benefit) */ @SuppressWarnings("deprecation") private synchronized void upgradeDB() { int databaseVersion = mConfig.getDatabaseFormatVersion(); if(databaseVersion == WebOfTrust.DATABASE_FORMAT_VERSION) return; // Insert upgrade code here. See Freetalk.java for a skeleton. if(databaseVersion == 1) { Logger.normal(this, "Upgrading database version " + databaseVersion); //synchronized(this) { // Already done at function level //synchronized(mPuzzleStore) { // Normally would be needed for deleteWithoutCommit(Identity) but IntroductionClient/Server are not running yet //synchronized(mFetcher) { // Normally would be needed for deleteWithoutCommit(Identity) but the IdentityFetcher is not running yet synchronized(Persistent.transactionLock(mDB)) { try { Logger.normal(this, "Generating Score IDs..."); for(Score score : getAllScores()) { score.generateID(); score.storeWithoutCommit(); } Logger.normal(this, "Generating Trust IDs..."); for(Trust trust : getAllTrusts()) { trust.generateID(); trust.storeWithoutCommit(); } Logger.normal(this, "Searching for identities with mixed up insert/request URIs..."); for(Identity identity : getAllIdentities()) { try { USK.create(identity.getRequestURI()); } catch (MalformedURLException e) { if(identity instanceof OwnIdentity) { Logger.error(this, "Insert URI specified as request URI for OwnIdentity, not correcting the URIs as the insert URI" + "might have been published by solving captchas - the identity could be compromised: " + identity); } else { Logger.error(this, "Insert URI specified as request URI for non-own Identity, deleting: " + identity); deleteWithoutCommit(identity); } } } mConfig.setDatabaseFormatVersion(++databaseVersion); mConfig.storeAndCommit(); Logger.normal(this, "Upgraded database to version " + databaseVersion); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } //} } if(databaseVersion != WebOfTrust.DATABASE_FORMAT_VERSION) throw new RuntimeException("Your database is too outdated to be upgraded automatically, please create a new one by deleting " + DATABASE_FILENAME + ". Contact the developers if you really need your old data."); } /** * DO NOT USE THIS FUNCTION ON A DATABASE WHICH YOU WANT TO CONTINUE TO USE! * * Debug function for finding object leaks in the database. * * - Deletes all identities in the database - This should delete ALL objects in the database. * - Then it checks for whether any objects still exist - those are leaks. */ private synchronized void checkForDatabaseLeaks() { Logger.normal(this, "Checking for database leaks... This will delete all identities!"); { Logger.debug(this, "Checking FetchState leakage..."); final Query query = mDB.query(); query.constrain(FetchState.class); @SuppressWarnings("unchecked") ObjectSet<FetchState> result = (ObjectSet<FetchState>)query.execute(); for(FetchState state : result) { Logger.debug(this, "Checking " + state); final Query query2 = mDB.query(); query2.constrain(Identity.class); query.descend("mCurrentEditionFetchState").constrain(state).identity(); @SuppressWarnings("unchecked") ObjectSet<FetchState> result2 = (ObjectSet<FetchState>)query.execute(); switch(result2.size()) { case 0: Logger.error(this, "Found leaked FetchState!"); break; case 1: break; default: Logger.error(this, "Found re-used FetchState, count: " + result2.size()); break; } } Logger.debug(this, "Finished checking FetchState leakage, amount:" + result.size()); } Logger.normal(this, "Deleting ALL identities..."); synchronized(mPuzzleStore) { synchronized(mFetcher) { synchronized(Persistent.transactionLock(mDB)) { try { beginTrustListImport(); for(Identity identity : getAllIdentities()) { deleteWithoutCommit(identity); } finishTrustListImport(); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } } } Logger.normal(this, "Deleting ALL identities finished."); Query query = mDB.query(); query.constrain(Object.class); @SuppressWarnings("unchecked") ObjectSet<Object> result = query.execute(); for(Object leak : result) { Logger.error(this, "Found leaked object: " + leak); } Logger.warning(this, "Finished checking for database leaks. This database is empty now, delete it."); } private synchronized boolean verifyDatabaseIntegrity() { deleteDuplicateObjects(); deleteOrphanObjects(); Logger.debug(this, "Testing database integrity..."); final Query q = mDB.query(); q.constrain(Persistent.class); boolean result = true; for(final Persistent p : new Persistent.InitializingObjectSet<Persistent>(this, q)) { try { p.startupDatabaseIntegrityTest(); } catch(Exception e) { result = false; try { Logger.error(this, "Integrity test failed for " + p, e); } catch(Exception e2) { Logger.error(this, "Integrity test failed for Persistent of class " + p.getClass(), e); Logger.error(this, "Exception thrown by toString() was:", e2); } } } Logger.debug(this, "Database integrity test finished."); return result; } /** * Does not do proper synchronization! Only use it in single-thread-mode during startup. * * Does a backup of the database using db4o's backup mechanism. * * This will NOT fix corrupted internal structures of databases - use cloneDatabase if you need to fix your database. */ private synchronized void backupDatabase(File newDatabase) { Logger.normal(this, "Backing up database to " + newDatabase.getAbsolutePath()); if(newDatabase.exists()) throw new RuntimeException("Target exists already: " + newDatabase.getAbsolutePath()); WebOfTrust backup = null; boolean success = false; try { mDB.backup(newDatabase.getAbsolutePath()); if(logDEBUG) { backup = new WebOfTrust(newDatabase.getAbsolutePath()); // We do not throw to make the clone mechanism more robust in case it is being used for creating backups Logger.debug(this, "Checking database integrity of clone..."); if(backup.verifyDatabaseIntegrity()) Logger.debug(this, "Checking database integrity of clone finished."); else Logger.error(this, "Database integrity check of clone failed!"); Logger.debug(this, "Checking this.equals(clone)..."); if(equals(backup)) Logger.normal(this, "Clone is equal!"); else Logger.error(this, "Clone is not equal!"); } success = true; } finally { if(backup != null) backup.terminate(); if(!success) newDatabase.delete(); } Logger.normal(this, "Backing up database finished."); } /** * Does not do proper synchronization! Only use it in single-thread-mode during startup. * * Creates a clone of the source database by reading all objects of it into memory and then writing them out to the target database. * Does NOT copy the Configuration, the IntroductionPuzzles or the IdentityFetcher command queue. * * The difference to backupDatabase is that it does NOT use db4o's backup mechanism, instead it creates the whole database from scratch. * This is useful because the backup mechanism of db4o does nothing but copying the raw file: * It wouldn't fix databases which cannot be defragmented anymore due to internal corruption. * - Databases which were cloned by this function CAN be defragmented even if the original database couldn't. * * HOWEVER this function uses lots of memory as the whole database is copied into memory. */ private synchronized void cloneDatabase(File sourceDatabase, File targetDatabase) { Logger.normal(this, "Cloning " + sourceDatabase.getAbsolutePath() + " to " + targetDatabase.getAbsolutePath()); if(targetDatabase.exists()) throw new RuntimeException("Target exists already: " + targetDatabase.getAbsolutePath()); WebOfTrust original = null; WebOfTrust clone = null; boolean success = false; try { original = new WebOfTrust(sourceDatabase.getAbsolutePath()); // We need to copy all objects into memory and then close & unload the source database before writing the objects to the target one. // - I tried implementing this function in a way where it directly takes the objects from the source database and stores them // in the target database while the source is still open. This did not work: Identity objects disappeared magically, resulting // in Trust objects .storeWithoutCommit throwing "Mandatory object not found" on their associated identities. final HashSet<Identity> allIdentities = new HashSet<Identity>(original.getAllIdentities()); final HashSet<Trust> allTrusts = new HashSet<Trust>(original.getAllTrusts()); final HashSet<Score> allScores = new HashSet<Score>(original.getAllScores()); for(Identity identity : allIdentities) { identity.checkedActivate(16); identity.mWebOfTrust = null; identity.mDB = null; } for(Trust trust : allTrusts) { trust.checkedActivate(16); trust.mWebOfTrust = null; trust.mDB = null; } for(Score score : allScores) { score.checkedActivate(16); score.mWebOfTrust = null; score.mDB = null; } original.terminate(); original = null; System.gc(); // Now we write out the in-memory copies ... clone = new WebOfTrust(targetDatabase.getAbsolutePath()); for(Identity identity : allIdentities) { identity.initializeTransient(clone); identity.storeWithoutCommit(); } Persistent.checkedCommit(clone.getDatabase(), clone); for(Trust trust : allTrusts) { trust.initializeTransient(clone); trust.storeWithoutCommit(); } Persistent.checkedCommit(clone.getDatabase(), clone); for(Score score : allScores) { score.initializeTransient(clone); score.storeWithoutCommit(); } Persistent.checkedCommit(clone.getDatabase(), clone); // And because cloning is a complex operation we do a mandatory database integrity check Logger.normal(this, "Checking database integrity of clone..."); if(clone.verifyDatabaseIntegrity()) Logger.normal(this, "Checking database integrity of clone finished."); else throw new RuntimeException("Database integrity check of clone failed!"); // ... and also test whether the Web Of Trust is equals() to the clone. This does a deep check of all identities, scores & trusts! original = new WebOfTrust(sourceDatabase.getAbsolutePath()); Logger.normal(this, "Checking original.equals(clone)..."); if(original.equals(clone)) Logger.normal(this, "Clone is equal!"); else throw new RuntimeException("Clone is not equal!"); success = true; } finally { if(original != null) original.terminate(); if(clone != null) clone.terminate(); if(!success) targetDatabase.delete(); } Logger.normal(this, "Cloning database finished."); } /** * Recomputes the {@link Score} of all identities and checks whether the score which is stored in the database is correct. * Incorrect scores are corrected & stored. * * The function is synchronized and does a transaction, no outer synchronization is needed. * ATTENTION: It is NOT synchronized on the IntroductionPuzzleStore or the IdentityFetcher. They must NOT be running yet when using this function! */ protected synchronized void verifyAndCorrectStoredScores() { Logger.normal(this, "Veriying all stored scores ..."); synchronized(Persistent.transactionLock(mDB)) { try { computeAllScoresWithoutCommit(); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } Logger.normal(this, "Veriying all stored scores finished."); } /** * Debug function for deleting duplicate identities etc. which might have been created due to bugs :) */ private synchronized void deleteDuplicateObjects() { synchronized(mPuzzleStore) { // Needed for deleteIdentity() synchronized(mFetcher) { // // Needed for deleteIdentity() synchronized(Persistent.transactionLock(mDB)) { try { HashSet<String> deleted = new HashSet<String>(); if(logDEBUG) Logger.debug(this, "Searching for duplicate identities ..."); for(Identity identity : getAllIdentities()) { Query q = mDB.query(); q.constrain(Identity.class); q.descend("mID").constrain(identity.getID()); q.constrain(identity).identity().not(); ObjectSet<Identity> duplicates = new Persistent.InitializingObjectSet<Identity>(this, q); for(Identity duplicate : duplicates) { if(deleted.contains(duplicate.getID()) == false) { Logger.error(duplicate, "Deleting duplicate identity " + duplicate.getRequestURI()); deleteWithoutCommit(duplicate); Persistent.checkedCommit(mDB, this); } } deleted.add(identity.getID()); } Persistent.checkedCommit(mDB, this); if(logDEBUG) Logger.debug(this, "Finished searching for duplicate identities."); } catch(RuntimeException e) { Persistent.checkedRollback(mDB, this, e); } } } } // synchronized(this) { // For removeTrustWithoutCommit. Done at function level already. synchronized(mFetcher) { // For removeTrustWithoutCommit synchronized(Persistent.transactionLock(mDB)) { try { if(logDEBUG) Logger.debug(this, "Searching for duplicate Trust objects ..."); boolean duplicateTrustFound = false; for(OwnIdentity truster : getAllOwnIdentities()) { HashSet<String> givenTo = new HashSet<String>(); for(Trust trust : getGivenTrusts(truster)) { if(givenTo.contains(trust.getTrustee().getID()) == false) givenTo.add(trust.getTrustee().getID()); else { Logger.error(this, "Deleting duplicate given trust:" + trust); removeTrustWithoutCommit(trust); duplicateTrustFound = true; } } } if(duplicateTrustFound) { computeAllScoresWithoutCommit(); } Persistent.checkedCommit(mDB, this); if(logDEBUG) Logger.debug(this, "Finished searching for duplicate trust objects."); } catch(RuntimeException e) { Persistent.checkedRollback(mDB, this, e); } } // synchronized(Persistent.transactionLock(mDB)) { } // synchronized(mFetcher) { /* TODO: Also delete duplicate score */ } /** * Debug function for deleting trusts or scores of which one of the involved partners is missing. */ private synchronized void deleteOrphanObjects() { synchronized(Persistent.transactionLock(mDB)) { try { boolean orphanTrustFound = false; Query q = mDB.query(); q.constrain(Trust.class); q.descend("mTruster").constrain(null).identity().or(q.descend("mTrustee").constrain(null).identity()); ObjectSet<Trust> orphanTrusts = new Persistent.InitializingObjectSet<Trust>(this, q); for(Trust trust : orphanTrusts) { if(trust.getTruster() != null && trust.getTrustee() != null) { // TODO: Remove this workaround for the db4o bug as soon as we are sure that it does not happen anymore. Logger.error(this, "Db4o bug: constrain(null).identity() did not work for " + trust); continue; } Logger.error(trust, "Deleting orphan trust, truster = " + trust.getTruster() + ", trustee = " + trust.getTrustee()); orphanTrustFound = true; trust.deleteWithoutCommit(); } if(orphanTrustFound) { computeAllScoresWithoutCommit(); Persistent.checkedCommit(mDB, this); } } catch(Exception e) { Persistent.checkedRollback(mDB, this, e); } } synchronized(Persistent.transactionLock(mDB)) { try { boolean orphanScoresFound = false; Query q = mDB.query(); q.constrain(Score.class); q.descend("mTruster").constrain(null).identity().or(q.descend("mTrustee").constrain(null).identity()); ObjectSet<Score> orphanScores = new Persistent.InitializingObjectSet<Score>(this, q); for(Score score : orphanScores) { if(score.getTruster() != null && score.getTrustee() != null) { // TODO: Remove this workaround for the db4o bug as soon as we are sure that it does not happen anymore. Logger.error(this, "Db4o bug: constrain(null).identity() did not work for " + score); continue; } Logger.error(score, "Deleting orphan score, truster = " + score.getTruster() + ", trustee = " + score.getTrustee()); orphanScoresFound = true; score.deleteWithoutCommit(); } if(orphanScoresFound) { computeAllScoresWithoutCommit(); Persistent.checkedCommit(mDB, this); } } catch(Exception e) { Persistent.checkedRollback(mDB, this, e); } } } /** * Warning: This function is not synchronized, use it only in single threaded mode. * @return The WOT database format version of the given database. -1 if there is no Configuration stored in it or multiple configurations exist. */ @SuppressWarnings("deprecation") private static int peekDatabaseFormatVersion(WebOfTrust wot, ExtObjectContainer database) { final Query query = database.query(); query.constrain(Configuration.class); @SuppressWarnings("unchecked") ObjectSet<Configuration> result = (ObjectSet<Configuration>)query.execute(); switch(result.size()) { case 1: { final Configuration config = (Configuration)result.next(); config.initializeTransient(wot, database); // For the HashMaps to stay alive we need to activate to full depth. config.checkedActivate(4); return config.getDatabaseFormatVersion(); } default: return -1; } } /** * Loads an existing Config object from the database and adds any missing default values to it, creates and stores a new one if none exists. * @return The config object. */ private synchronized Configuration getOrCreateConfig() { final Query query = mDB.query(); query.constrain(Configuration.class); final ObjectSet<Configuration> result = new Persistent.InitializingObjectSet<Configuration>(this, query); switch(result.size()) { case 1: { final Configuration config = result.next(); // For the HashMaps to stay alive we need to activate to full depth. config.checkedActivate(4); config.setDefaultValues(false); config.storeAndCommit(); return config; } case 0: { final Configuration config = new Configuration(this); config.initializeTransient(this); config.storeAndCommit(); return config; } default: throw new RuntimeException("Multiple config objects found: " + result.size()); } } /** Capacity is the maximum amount of points an identity can give to an other by trusting it. * * Values choice : * Advogato Trust metric recommends that values decrease by rounded 2.5 times. * This makes sense, making the need of 3 N+1 ranked people to overpower * the trust given by a N ranked identity. * * Number of ranks choice : * When someone creates a fresh identity, he gets the seed identity at * rank 1 and freenet developpers at rank 2. That means that * he will see people that were : * - given 7 trust by freenet devs (rank 2) * - given 17 trust by rank 3 * - given 50 trust by rank 4 * - given 100 trust by rank 5 and above. * This makes the range small enough to avoid a newbie * to even see spam, and large enough to make him see a reasonnable part * of the community right out-of-the-box. * Of course, as soon as he will start to give trust, he will put more * people at rank 1 and enlarge his WoT. */ protected static final int capacities[] = { 100,// Rank 0 : Own identities 40, // Rank 1 : Identities directly trusted by ownIdenties 16, // Rank 2 : Identities trusted by rank 1 identities 6, // So on... 2, 1 // Every identity above rank 5 can give 1 point }; // Identities with negative score have zero capacity /** * Computes the capacity of a truster. The capacity is a weight function in percent which is used to decide how much * trust points an identity can add to the score of identities which it has assigned trust values to. * The higher the rank of an identity, the less is it's capacity. * * If the rank of the identity is Integer.MAX_VALUE (infinite, this means it has only received negative or 0 trust values from identities with rank >= 0 and less * than infinite) or -1 (this means that it has only received trust values from identities with infinite rank) then its capacity is 0. * * If the truster has assigned a trust value to the trustee the capacity will be computed only from that trust value: * The decision of the truster should always overpower the view of remote identities. * * Notice that 0 is included in infinite rank to prevent identities which have only solved introduction puzzles from having a capacity. * * @param truster The {@link OwnIdentity} in whose trust tree the capacity shall be computed * @param trustee The {@link Identity} of which the capacity shall be computed. * @param rank The rank of the identity. The rank is the distance in trust steps from the OwnIdentity which views the web of trust, * - its rank is 0, the rank of its trustees is 1 and so on. Must be -1 if the truster has no rank in the tree owners view. */ protected int computeCapacity(OwnIdentity truster, Identity trustee, int rank) { if(truster == trustee) return 100; try { if(getTrust(truster, trustee).getValue() <= 0) { // Security check, if rank computation breaks this will hit. assert(rank == Integer.MAX_VALUE); return 0; } } catch(NotTrustedException e) { } if(rank == -1 || rank == Integer.MAX_VALUE) return 0; return (rank < capacities.length) ? capacities[rank] : 1; } /** * Reference-implementation of score computation. This means:<br /> * - It is not used by the real WoT code because its slow<br /> * - It is used by unit tests (and WoT) to check whether the real implementation works<br /> * - It is the function which you should read if you want to understand how WoT works.<br /> * * Computes all rank and score values and checks whether the database is correct. If wrong values are found, they are correct.<br /> * * There was a bug in the score computation for a long time which resulted in wrong computation when trust values very removed under certain conditions.<br /> * * Further, rank values are shortest paths and the path-finding algorithm is not executed from the source * to the target upon score computation: It uses the rank of the neighbor nodes to find a shortest path. * Therefore, the algorithm is very vulnerable to bugs since one wrong value will stay in the database * and affect many others. So it is useful to have this function. * * @return True if all stored scores were correct. False if there were any errors in stored scores. */ protected synchronized boolean computeAllScoresWithoutCommit() { if(logMINOR) Logger.minor(this, "Doing a full computation of all Scores..."); final long beginTime = CurrentTimeUTC.getInMillis(); boolean returnValue = true; final ObjectSet<Identity> allIdentities = getAllIdentities(); // Scores are a rating of an identity from the view of an OwnIdentity so we compute them per OwnIdentity. for(OwnIdentity treeOwner : getAllOwnIdentities()) { // At the end of the loop body, this table will be filled with the ranks of all identities which are visible for treeOwner. // An identity is visible if there is a trust chain from the owner to it. // The rank is the distance in trust steps from the treeOwner. // So the treeOwner is rank 0, the trustees of the treeOwner are rank 1 and so on. final HashMap<Identity, Integer> rankValues = new HashMap<Identity, Integer>(allIdentities.size() * 2); // Compute the rank values { // For each identity which is added to rankValues, all its trustees are added to unprocessedTrusters. // The inner loop then pulls out one unprocessed identity and computes the rank of its trustees: // All trustees which have received positive (> 0) trust will get his rank + 1 // Trustees with negative trust or 0 trust will get a rank of Integer.MAX_VALUE. // Trusters with rank Integer.MAX_VALUE cannot inherit their rank to their trustees so the trustees will get no rank at all. // Identities with no rank are considered to be not in the trust tree of the own identity and their score will be null / none. // // Further, if the treeOwner has assigned a trust value to an identity, the rank decision is done by only considering this trust value: // The decision of the own identity shall not be overpowered by the view of the remote identities. // // The purpose of differentiation between Integer.MAX_VALUE and -1 is: // Score objects of identities with rank Integer.MAX_VALUE are kept in the database because WoT will usually "hear" about those identities by seeing // them in the trust lists of trusted identities (with 0 or negative trust values). So it must store the trust values to those identities and // have a way of telling the user "this identity is not trusted" by keeping a score object of them. // Score objects of identities with rank -1 are deleted because they are the trustees of distrusted identities and we will not get to the point where // we hear about those identities because the only way of hearing about them is importing a trust list of a identity with Integer.MAX_VALUE rank // - and we never import their trust lists. // We include trust values of 0 in the set of rank Integer.MAX_VALUE (instead of only NEGATIVE trust) so that identities which only have solved // introduction puzzles cannot inherit their rank to their trustees. final LinkedList<Identity> unprocessedTrusters = new LinkedList<Identity>(); // The own identity is the root of the trust tree, it should assign itself a rank of 0 , a capacity of 100 and a symbolic score of Integer.MAX_VALUE try { Score selfScore = getScore(treeOwner, treeOwner); if(selfScore.getRank() >= 0) { // It can only give it's rank if it has a valid one rankValues.put(treeOwner, selfScore.getRank()); unprocessedTrusters.addLast(treeOwner); } else { rankValues.put(treeOwner, null); } } catch(NotInTrustTreeException e) { // This only happens in unit tests. } while(!unprocessedTrusters.isEmpty()) { final Identity truster = unprocessedTrusters.removeFirst(); final Integer trusterRank = rankValues.get(truster); // The truster cannot give his rank to his trustees because he has none (or infinite), they receive no rank at all. if(trusterRank == null || trusterRank == Integer.MAX_VALUE) { // (Normally this does not happen because we do not enqueue the identities if they have no rank but we check for security) continue; } final int trusteeRank = trusterRank + 1; for(Trust trust : getGivenTrusts(truster)) { final Identity trustee = trust.getTrustee(); final Integer oldTrusteeRank = rankValues.get(trustee); if(oldTrusteeRank == null) { // The trustee was not processed yet if(trust.getValue() > 0) { rankValues.put(trustee, trusteeRank); unprocessedTrusters.addLast(trustee); } else rankValues.put(trustee, Integer.MAX_VALUE); } else { // Breadth first search will process all rank one identities are processed before any rank two identities, etc. assert(oldTrusteeRank == Integer.MAX_VALUE || trusteeRank >= oldTrusteeRank); if(oldTrusteeRank == Integer.MAX_VALUE) { // If we found a rank less than infinite we can overwrite the old rank with this one, but only if the infinite rank was not // given by the tree owner. try { final Trust treeOwnerTrust = getTrust(treeOwner, trustee); assert(treeOwnerTrust.getValue() <= 0); // TODO: Is this correct? } catch(NotTrustedException e) { if(trust.getValue() > 0) { rankValues.put(trustee, trusteeRank); unprocessedTrusters.addLast(trustee); } } } } } } } // Rank values of all visible identities are computed now. // Next step is to compute the scores of all identities for(Identity target : allIdentities) { // The score of an identity is the sum of all weighted trust values it has received. // Each trust value is weighted with the capacity of the truster - the capacity decays with increasing rank. Integer targetScore; final Integer targetRank = rankValues.get(target); if(targetRank == null) { targetScore = null; } else { // The treeOwner trusts himself. if(targetRank == 0) { targetScore = Integer.MAX_VALUE; } else { // If the treeOwner has assigned a trust value to the target, it always overrides the "remote" score. try { targetScore = (int)getTrust(treeOwner, target).getValue(); } catch(NotTrustedException e) { targetScore = 0; for(Trust receivedTrust : getReceivedTrusts(target)) { final Identity truster = receivedTrust.getTruster(); final Integer trusterRank = rankValues.get(truster); // The capacity is a weight function for trust values which are given from an identity: // The higher the rank, the less the capacity. // If the rank is Integer.MAX_VALUE (infinite) or -1 (no rank at all) the capacity will be 0. final int capacity = computeCapacity(treeOwner, truster, trusterRank != null ? trusterRank : -1); targetScore += (receivedTrust.getValue() * capacity) / 100; } } } } Score newScore = null; if(targetScore != null) { newScore = new Score(this, treeOwner, target, targetScore, targetRank, computeCapacity(treeOwner, target, targetRank)); } boolean needToCheckFetchStatus = false; boolean oldShouldFetch = false; int oldCapacity = 0; // Now we have the rank and the score of the target computed and can check whether the database-stored score object is correct. try { Score currentStoredScore = getScore(treeOwner, target); oldCapacity = currentStoredScore.getCapacity(); if(newScore == null) { returnValue = false; if(!mFullScoreComputationNeeded) Logger.error(this, "Correcting wrong score: The identity has no rank and should have no score but score was " + currentStoredScore, new RuntimeException()); needToCheckFetchStatus = true; oldShouldFetch = shouldFetchIdentity(target); currentStoredScore.deleteWithoutCommit(); } else { if(!newScore.equals(currentStoredScore)) { returnValue = false; if(!mFullScoreComputationNeeded) Logger.error(this, "Correcting wrong score: Should have been " + newScore + " but was " + currentStoredScore, new RuntimeException()); needToCheckFetchStatus = true; oldShouldFetch = shouldFetchIdentity(target); currentStoredScore.setRank(newScore.getRank()); currentStoredScore.setCapacity(newScore.getCapacity()); currentStoredScore.setValue(newScore.getScore()); currentStoredScore.storeWithoutCommit(); } } } catch(NotInTrustTreeException e) { oldCapacity = 0; if(newScore != null) { returnValue = false; if(!mFullScoreComputationNeeded) Logger.error(this, "Correcting wrong score: No score was stored for the identity but it should be " + newScore, new RuntimeException()); needToCheckFetchStatus = true; oldShouldFetch = shouldFetchIdentity(target); newScore.storeWithoutCommit(); } } if(needToCheckFetchStatus) { // If fetch status changed from false to true, we need to start fetching it // If the capacity changed from 0 to positive, we need to refetch the current edition: Identities with capacity 0 cannot // cause new identities to be imported from their trust list, capacity > 0 allows this. // If the fetch status changed from true to false, we need to stop fetching it if((!oldShouldFetch || (oldCapacity == 0 && newScore != null && newScore.getCapacity() > 0)) && shouldFetchIdentity(target) ) { if(!oldShouldFetch) if(logDEBUG) Logger.debug(this, "Fetch status changed from false to true, refetching " + target); else if(logDEBUG) Logger.debug(this, "Capacity changed from 0 to " + newScore.getCapacity() + ", refetching" + target); target.markForRefetch(); target.storeWithoutCommit(); mFetcher.storeStartFetchCommandWithoutCommit(target); } else if(oldShouldFetch && !shouldFetchIdentity(target)) { if(logDEBUG) Logger.debug(this, "Fetch status changed from true to false, aborting fetch of " + target); mFetcher.storeAbortFetchCommandWithoutCommit(target); } } } } mFullScoreComputationNeeded = false; ++mFullScoreRecomputationCount; mFullScoreRecomputationMilliseconds += CurrentTimeUTC.getInMillis() - beginTime; if(logMINOR) { Logger.minor(this, "Full score computation finished. Amount: " + mFullScoreRecomputationCount + "; Avg Time:" + getAverageFullScoreRecomputationTime() + "s"); } return returnValue; } private synchronized void createSeedIdentities() { for(String seedURI : SEED_IDENTITIES) { Identity seed; synchronized(Persistent.transactionLock(mDB)) { try { seed = getIdentityByURI(seedURI); if(seed instanceof OwnIdentity) { OwnIdentity ownSeed = (OwnIdentity)seed; ownSeed.addContext(IntroductionPuzzle.INTRODUCTION_CONTEXT); ownSeed.setProperty(IntroductionServer.PUZZLE_COUNT_PROPERTY, Integer.toString(IntroductionServer.SEED_IDENTITY_PUZZLE_COUNT)); ownSeed.storeAndCommit(); } else { try { seed.setEdition(new FreenetURI(seedURI).getEdition()); seed.storeAndCommit(); } catch(InvalidParameterException e) { /* We already have the latest edition stored */ } } } catch (UnknownIdentityException uie) { try { seed = new Identity(this, seedURI, null, true); // We have to explicitely set the edition number because the constructor only considers the given edition as a hint. seed.setEdition(new FreenetURI(seedURI).getEdition()); seed.storeAndCommit(); } catch (Exception e) { Logger.error(this, "Seed identity creation error", e); } } catch (Exception e) { Persistent.checkedRollback(mDB, this, e); } } } } public void terminate() { if(logDEBUG) Logger.debug(this, "WoT plugin terminating ..."); /* We use single try/catch blocks so that failure of termination of one service does not prevent termination of the others */ try { if(mWebInterface != null) this.mWebInterface.unload(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mIntroductionClient != null) mIntroductionClient.terminate(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mIntroductionServer != null) mIntroductionServer.terminate(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mInserter != null) mInserter.terminate(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mFetcher != null) mFetcher.stop(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mDB != null) { /* TODO: At 2009-06-15, it does not seem possible to ask db4o for whether a transaction is pending. * If it becomes possible some day, we should check that here, and log an error if there is an uncommitted transaction. * - All transactions should be committed after obtaining the lock() on the database. */ synchronized(Persistent.transactionLock(mDB)) { System.gc(); mDB.rollback(); System.gc(); mDB.close(); } } } catch(Exception e) { Logger.error(this, "Error during termination.", e); } if(logDEBUG) Logger.debug(this, "WoT plugin terminated."); } /** * Inherited event handler from FredPluginFCP, handled in <code>class FCPInterface</code>. */ public void handle(PluginReplySender replysender, SimpleFieldSet params, Bucket data, int accesstype) { mFCPInterface.handle(replysender, params, data, accesstype); } /** * Loads an own or normal identity from the database, querying on its ID. * * @param id The ID of the identity to load * @return The identity matching the supplied ID. * @throws DuplicateIdentityException if there are more than one identity with this id in the database * @throws UnknownIdentityException if there is no identity with this id in the database */ public synchronized Identity getIdentityByID(String id) throws UnknownIdentityException { final Query query = mDB.query(); query.constrain(Identity.class); query.descend("mID").constrain(id); final ObjectSet<Identity> result = new Persistent.InitializingObjectSet<Identity>(this, query); switch(result.size()) { case 1: return result.next(); case 0: throw new UnknownIdentityException(id); default: throw new DuplicateIdentityException(id, result.size()); } } /** * Gets an OwnIdentity by its ID. * * @param id The unique identifier to query an OwnIdentity * @return The requested OwnIdentity * @throws UnknownIdentityException if there is now OwnIdentity with that id */ public synchronized OwnIdentity getOwnIdentityByID(String id) throws UnknownIdentityException { final Query query = mDB.query(); query.constrain(OwnIdentity.class); query.descend("mID").constrain(id); final ObjectSet<OwnIdentity> result = new Persistent.InitializingObjectSet<OwnIdentity>(this, query); switch(result.size()) { case 1: return result.next(); case 0: throw new UnknownIdentityException(id); default: throw new DuplicateIdentityException(id, result.size()); } } /** * Loads an identity from the database, querying on its requestURI (a valid {@link FreenetURI}) * * @param uri The requestURI of the identity * @return The identity matching the supplied requestURI * @throws UnknownIdentityException if there is no identity with this id in the database */ public Identity getIdentityByURI(FreenetURI uri) throws UnknownIdentityException { return getIdentityByID(IdentityID.constructAndValidateFromURI(uri).toString()); } /** * Loads an identity from the database, querying on its requestURI (as String) * * @param uri The requestURI of the identity which will be converted to {@link FreenetURI} * @return The identity matching the supplied requestURI * @throws UnknownIdentityException if there is no identity with this id in the database * @throws MalformedURLException if the requestURI isn't a valid FreenetURI */ public Identity getIdentityByURI(String uri) throws UnknownIdentityException, MalformedURLException { return getIdentityByURI(new FreenetURI(uri)); } /** * Gets an OwnIdentity by its requestURI (a {@link FreenetURI}). * The OwnIdentity's unique identifier is extracted from the supplied requestURI. * * @param uri The requestURI of the desired OwnIdentity * @return The requested OwnIdentity * @throws UnknownIdentityException if the OwnIdentity isn't in the database */ public OwnIdentity getOwnIdentityByURI(FreenetURI uri) throws UnknownIdentityException { return getOwnIdentityByID(IdentityID.constructAndValidateFromURI(uri).toString()); } /** * Gets an OwnIdentity by its requestURI (as String). * The given String is converted to {@link FreenetURI} in order to extract a unique id. * * @param uri The requestURI (as String) of the desired OwnIdentity * @return The requested OwnIdentity * @throws UnknownIdentityException if the OwnIdentity isn't in the database * @throws MalformedURLException if the supplied requestURI is not a valid FreenetURI */ public OwnIdentity getOwnIdentityByURI(String uri) throws UnknownIdentityException, MalformedURLException { return getOwnIdentityByURI(new FreenetURI(uri)); } /** * Returns all identities that are in the database * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all identities present in the database */ public ObjectSet<Identity> getAllIdentities() { final Query query = mDB.query(); query.constrain(Identity.class); return new Persistent.InitializingObjectSet<Identity>(this, query); } public static enum SortOrder { ByNicknameAscending, ByNicknameDescending, ByScoreAscending, ByScoreDescending, ByLocalTrustAscending, ByLocalTrustDescending } /** * Get a filtered and sorted list of identities. * You have to synchronize on this WoT when calling the function and processing the returned list. */ public ObjectSet<Identity> getAllIdentitiesFilteredAndSorted(OwnIdentity truster, String nickFilter, SortOrder sortInstruction) { Query q = mDB.query(); switch(sortInstruction) { case ByNicknameAscending: q.constrain(Identity.class); q.descend("mNickname").orderAscending(); break; case ByNicknameDescending: q.constrain(Identity.class); q.descend("mNickname").orderDescending(); break; case ByScoreAscending: q.constrain(Score.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderAscending(); q = q.descend("mTrustee"); break; case ByScoreDescending: // TODO: This excludes identities which have no score q.constrain(Score.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderDescending(); q = q.descend("mTrustee"); break; case ByLocalTrustAscending: q.constrain(Trust.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderAscending(); q = q.descend("mTrustee"); break; case ByLocalTrustDescending: // TODO: This excludes untrusted identities. q.constrain(Trust.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderDescending(); q = q.descend("mTrustee"); break; } if(nickFilter != null) { nickFilter = nickFilter.trim(); if(!nickFilter.equals("")) q.descend("mNickname").constrain(nickFilter).like(); } return new Persistent.InitializingObjectSet<Identity>(this, q); } /** * Returns all non-own identities that are in the database. * * You have to synchronize on this WoT when calling the function and processing the returned list! */ public ObjectSet<Identity> getAllNonOwnIdentities() { final Query q = mDB.query(); q.constrain(Identity.class); q.constrain(OwnIdentity.class).not(); return new Persistent.InitializingObjectSet<Identity>(this, q); } /** * Returns all non-own identities that are in the database, sorted descending by their date of modification, i.e. recently * modified identities will be at the beginning of the list. * * You have to synchronize on this WoT when calling the function and processing the returned list! * * Used by the IntroductionClient for fetching puzzles from recently modified identities. */ public ObjectSet<Identity> getAllNonOwnIdentitiesSortedByModification () { final Query q = mDB.query(); q.constrain(Identity.class); q.constrain(OwnIdentity.class).not(); /* TODO: As soon as identities announce that they were online every day, uncomment the following line */ /* q.descend("mLastChangedDate").constrain(new Date(CurrentTimeUTC.getInMillis() - 1 * 24 * 60 * 60 * 1000)).greater(); */ q.descend("mLastFetchedDate").orderDescending(); return new Persistent.InitializingObjectSet<Identity>(this, q); } /** * Returns all own identities that are in the database * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all identities present in the database. */ public ObjectSet<OwnIdentity> getAllOwnIdentities() { final Query q = mDB.query(); q.constrain(OwnIdentity.class); return new Persistent.InitializingObjectSet<OwnIdentity>(this, q); } /** * DO NOT USE THIS FUNCTION FOR DELETING OWN IDENTITIES UPON USER REQUEST! * IN FACT BE VERY CAREFUL WHEN USING IT FOR ANYTHING FOR THE FOLLOWING REASONS: * - This function deletes ALL given and received trust values of the given identity. This modifies the trust list of the trusters against their will. * - Especially it might be an information leak if the trust values of other OwnIdentities are deleted! * - If WOT one day is designed to be used by many different users at once, the deletion of other OwnIdentity's trust values would even be corruption. * * The intended purpose of this function is: * - To specify which objects have to be dealt with when messing with storage of an identity. * - To be able to do database object leakage tests: Many classes have a deleteWithoutCommit function and there are valid usecases for them. * However, the implementations of those functions might cause leaks by forgetting to delete certain object members. * If you call this function for ALL identities in a database, EVERYTHING should be deleted and the database SHOULD be empty. * You then can check whether the database actually IS empty to test for leakage. * * You have to lock the WebOfTrust, the IntroductionPuzzleStore and the IdentityFetcher before calling this function. */ private void deleteWithoutCommit(Identity identity) { // We want to use beginTrustListImport, finishTrustListImport / abortTrustListImport. // If the caller already handles that for us though, we should not call those function again. // So we check whether the caller already started an import. boolean trustListImportWasInProgress = mTrustListImportInProgress; try { if(!trustListImportWasInProgress) beginTrustListImport(); if(logDEBUG) Logger.debug(this, "Deleting identity " + identity + " ..."); if(logDEBUG) Logger.debug(this, "Deleting received scores..."); for(Score score : getScores(identity)) score.deleteWithoutCommit(); if(identity instanceof OwnIdentity) { if(logDEBUG) Logger.debug(this, "Deleting given scores..."); for(Score score : getGivenScores((OwnIdentity)identity)) score.deleteWithoutCommit(); } if(logDEBUG) Logger.debug(this, "Deleting received trusts..."); for(Trust trust : getReceivedTrusts(identity)) trust.deleteWithoutCommit(); if(logDEBUG) Logger.debug(this, "Deleting given trusts..."); for(Trust givenTrust : getGivenTrusts(identity)) { givenTrust.deleteWithoutCommit(); // We call computeAllScores anyway so we do not use removeTrustWithoutCommit() } mFullScoreComputationNeeded = true; // finishTrustListImport will call computeAllScoresWithoutCommit for us. if(logDEBUG) Logger.debug(this, "Deleting associated introduction puzzles ..."); mPuzzleStore.onIdentityDeletion(identity); if(logDEBUG) Logger.debug(this, "Storing an abort-fetch-command..."); if(mFetcher != null) { // Can be null if we use this function in upgradeDB() mFetcher.storeAbortFetchCommandWithoutCommit(identity); // NOTICE: // If the fetcher did store a db4o object reference to the identity, we would have to trigger command processing // now to prevent leakage of the identity object. // But the fetcher does NOT store a db4o object reference to the given identity. It stores its ID as String only. // Therefore, it is OK that the fetcher does not immediately process the commands now. } if(logDEBUG) Logger.debug(this, "Deleting the identity..."); identity.deleteWithoutCommit(); if(!trustListImportWasInProgress) finishTrustListImport(); } catch(RuntimeException e) { if(!trustListImportWasInProgress) abortTrustListImport(e); Persistent.checkedRollbackAndThrow(mDB, this, e); } } /** * Gets the score of this identity in a trust tree. * Each {@link OwnIdentity} has its own trust tree. * * @param truster The owner of the trust tree * @return The {@link Score} of this Identity in the required trust tree * @throws NotInTrustTreeException if this identity is not in the required trust tree */ public synchronized Score getScore(final OwnIdentity truster, final Identity trustee) throws NotInTrustTreeException { final Query query = mDB.query(); query.constrain(Score.class); query.descend("mID").constrain(new ScoreID(truster, trustee).toString()); final ObjectSet<Score> result = new Persistent.InitializingObjectSet<Score>(this, query); switch(result.size()) { case 1: final Score score = result.next(); assert(score.getTruster() == truster); assert(score.getTrustee() == trustee); return score; case 0: throw new NotInTrustTreeException(truster, trustee); default: throw new DuplicateScoreException(truster, trustee, result.size()); } } /** * Gets a list of all this Identity's Scores. * You have to synchronize on this WoT around the call to this function and the processing of the returned list! * * @return An {@link ObjectSet} containing all {@link Score} this Identity has. */ public ObjectSet<Score> getScores(final Identity identity) { final Query query = mDB.query(); query.constrain(Score.class); query.descend("mTrustee").constrain(identity).identity(); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Get a list of all scores which the passed own identity has assigned to other identities. * * You have to synchronize on this WoT around the call to this function and the processing of the returned list! * @return An {@link ObjectSet} containing all {@link Score} this Identity has given. */ public ObjectSet<Score> getGivenScores(final OwnIdentity truster) { final Query query = mDB.query(); query.constrain(Score.class); query.descend("mTruster").constrain(truster).identity(); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Gets the best score this Identity has in existing trust trees. * * @return the best score this Identity has * @throws NotInTrustTreeException If the identity has no score in any trusttree. */ public synchronized int getBestScore(final Identity identity) throws NotInTrustTreeException { int bestScore = Integer.MIN_VALUE; final ObjectSet<Score> scores = getScores(identity); if(scores.size() == 0) throw new NotInTrustTreeException(identity); // TODO: Cache the best score of an identity as a member variable. for(final Score score : scores) bestScore = Math.max(score.getScore(), bestScore); return bestScore; } /** * Gets the best capacity this identity has in any trust tree. * @throws NotInTrustTreeException If the identity is not in any trust tree. Can be interpreted as capacity 0. */ public int getBestCapacity(final Identity identity) throws NotInTrustTreeException { int bestCapacity = 0; final ObjectSet<Score> scores = getScores(identity); if(scores.size() == 0) throw new NotInTrustTreeException(identity); // TODO: Cache the best score of an identity as a member variable. for(final Score score : scores) bestCapacity = Math.max(score.getCapacity(), bestCapacity); return bestCapacity; } /** * Get all scores in the database. * You have to synchronize on this WoT when calling the function and processing the returned list! */ public ObjectSet<Score> getAllScores() { final Query query = mDB.query(); query.constrain(Score.class); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Checks whether the given identity should be downloaded. * @return Returns true if the identity has any capacity > 0, any score >= 0 or if it is an own identity. */ public boolean shouldFetchIdentity(final Identity identity) { if(identity instanceof OwnIdentity) return true; int bestScore = Integer.MIN_VALUE; int bestCapacity = 0; final ObjectSet<Score> scores = getScores(identity); if(scores.size() == 0) return false; // TODO: Cache the best score of an identity as a member variable. for(Score score : scores) { bestCapacity = Math.max(score.getCapacity(), bestCapacity); bestScore = Math.max(score.getScore(), bestScore); if(bestCapacity > 0 || bestScore >= 0) return true; } return false; } /** * Gets non-own Identities matching a specified score criteria. * TODO: Rename to getNonOwnIdentitiesByScore. Or even better: Make it return own identities as well, this will speed up the database query and clients might be ok with it. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @param truster The owner of the trust tree, null if you want the trusted identities of all owners. * @param select Score criteria, can be > zero, zero or negative. Greater than zero returns all identities with score >= 0, zero with score equal to 0 * and negative with score < 0. Zero is included in the positive range by convention because solving an introduction puzzle gives you a trust value of 0. * @return an {@link ObjectSet} containing Scores of the identities that match the criteria */ public ObjectSet<Score> getIdentitiesByScore(final OwnIdentity truster, final int select) { final Query query = mDB.query(); query.constrain(Score.class); if(truster != null) query.descend("mTruster").constrain(truster).identity(); query.descend("mTrustee").constrain(OwnIdentity.class).not(); /* We include 0 in the list of identities with positive score because solving captchas gives no points to score */ if(select > 0) query.descend("mValue").constrain(0).smaller().not(); else if(select < 0) query.descend("mValue").constrain(0).smaller(); else query.descend("mValue").constrain(0); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Gets {@link Trust} from a specified truster to a specified trustee. * * @param truster The identity that gives trust to this Identity * @param trustee The identity which receives the trust * @return The trust given to the trustee by the specified truster * @throws NotTrustedException if the truster doesn't trust the trustee */ public synchronized Trust getTrust(final Identity truster, final Identity trustee) throws NotTrustedException, DuplicateTrustException { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mID").constrain(new TrustID(truster, trustee).toString()); final ObjectSet<Trust> result = new Persistent.InitializingObjectSet<Trust>(this, query); switch(result.size()) { case 1: final Trust trust = result.next(); assert(trust.getTruster() == truster); assert(trust.getTrustee() == trustee); return trust; case 0: throw new NotTrustedException(truster, trustee); default: throw new DuplicateTrustException(truster, trustee, result.size()); } } /** * Gets all trusts given by the given truster. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has given. */ public ObjectSet<Trust> getGivenTrusts(final Identity truster) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTruster").constrain(truster).identity(); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets all trusts given by the given truster. * The result is sorted descending by the time we last fetched the trusted identity. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has given. */ public ObjectSet<Trust> getGivenTrustsSortedDescendingByLastSeen(final Identity truster) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTruster").constrain(truster).identity(); query.descend("mTrustee").descend("mLastFetchedDate").orderDescending(); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets given trust values of an identity matching a specified trust value criteria. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @param truster The identity which given the trust values. * @param select Trust value criteria, can be > zero, zero or negative. Greater than zero returns all trust values >= 0, zero returns trust values equal to 0. * Negative returns trust values < 0. Zero is included in the positive range by convention because solving an introduction puzzle gives you a value of 0. * @return an {@link ObjectSet} containing received trust values that match the criteria. */ public ObjectSet<Trust> getGivenTrusts(final Identity truster, final int select) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTruster").constrain(truster).identity(); /* We include 0 in the list of identities with positive trust because solving captchas gives 0 trust */ if(select > 0) query.descend("mValue").constrain(0).smaller().not(); else if(select < 0 ) query.descend("mValue").constrain(0).smaller(); else query.descend("mValue").constrain(0); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets all trusts given by the given truster in a trust list with a different edition than the passed in one. * You have to synchronize on this WoT when calling the function and processing the returned list! */ protected ObjectSet<Trust> getGivenTrustsOfDifferentEdition(final Identity truster, final long edition) { final Query q = mDB.query(); q.constrain(Trust.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mTrusterTrustListEdition").constrain(edition).not(); return new Persistent.InitializingObjectSet<Trust>(this, q); } /** * Gets all trusts received by the given trustee. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has received. */ public ObjectSet<Trust> getReceivedTrusts(final Identity trustee) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTrustee").constrain(trustee).identity(); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets received trust values of an identity matching a specified trust value criteria. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @param trustee The identity which has received the trust values. * @param select Trust value criteria, can be > zero, zero or negative. Greater than zero returns all trust values >= 0, zero returns trust values equal to 0. * Negative returns trust values < 0. Zero is included in the positive range by convention because solving an introduction puzzle gives you a value of 0. * @return an {@link ObjectSet} containing received trust values that match the criteria. */ public ObjectSet<Trust> getReceivedTrusts(final Identity trustee, final int select) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTrustee").constrain(trustee).identity(); /* We include 0 in the list of identities with positive trust because solving captchas gives 0 trust */ if(select > 0) query.descend("mValue").constrain(0).smaller().not(); else if(select < 0 ) query.descend("mValue").constrain(0).smaller(); else query.descend("mValue").constrain(0); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets all trusts. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has received. */ public ObjectSet<Trust> getAllTrusts() { final Query query = mDB.query(); query.constrain(Trust.class); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gives some {@link Trust} to another Identity. * It creates or updates an existing Trust object and make the trustee compute its {@link Score}. * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(WebOfTrust.this) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... setTrustWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * } * } * * @param truster The Identity that gives the trust * @param trustee The Identity that receives the trust * @param newValue Numeric value of the trust * @param newComment A comment to explain the given value * @throws InvalidParameterException if a given parameter isn't valid, see {@link Trust} for details on accepted values. */ protected void setTrustWithoutCommit(Identity truster, Identity trustee, byte newValue, String newComment) throws InvalidParameterException { try { // Check if we are updating an existing trust value final Trust trust = getTrust(truster, trustee); final Trust oldTrust = trust.clone(); trust.trusterEditionUpdated(); trust.setComment(newComment); trust.storeWithoutCommit(); if(trust.getValue() != newValue) { trust.setValue(newValue); trust.storeWithoutCommit(); if(logDEBUG) Logger.debug(this, "Updated trust value ("+ trust +"), now updating Score."); updateScoresWithoutCommit(oldTrust, trust); } } catch (NotTrustedException e) { final Trust trust = new Trust(this, truster, trustee, newValue, newComment); trust.storeWithoutCommit(); if(logDEBUG) Logger.debug(this, "New trust value ("+ trust +"), now updating Score."); updateScoresWithoutCommit(null, trust); } truster.updated(); truster.storeWithoutCommit(); } /** * Only for being used by WoT internally and by unit tests! */ synchronized void setTrust(OwnIdentity truster, Identity trustee, byte newValue, String newComment) throws InvalidParameterException { synchronized(Persistent.transactionLock(mDB)) { try { setTrustWithoutCommit(truster, trustee, newValue, newComment); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } } protected synchronized void removeTrust(OwnIdentity truster, Identity trustee) { synchronized(Persistent.transactionLock(mDB)) { try { removeTrustWithoutCommit(truster, trustee); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } } /** * Deletes a trust object. * * This function does neither lock the database nor commit the transaction. You have to surround it with + * synchronized(this) { + * synchronized(mFetcher) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... removeTrustWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } - * } + * }}} * * @param truster * @param trustee */ - protected synchronized void removeTrustWithoutCommit(OwnIdentity truster, Identity trustee) { + protected void removeTrustWithoutCommit(OwnIdentity truster, Identity trustee) { try { try { removeTrustWithoutCommit(getTrust(truster, trustee)); } catch (NotTrustedException e) { Logger.error(this, "Cannot remove trust - there is none - from " + truster.getNickname() + " to " + trustee.getNickname()); } } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } /** * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(this) { * synchronized(mFetcher) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... setTrustWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * }}} * */ protected void removeTrustWithoutCommit(Trust trust) { trust.deleteWithoutCommit(); updateScoresWithoutCommit(trust, null); } /** * Initializes this OwnIdentity's trust tree without commiting the transaction. * Meaning : It creates a Score object for this OwnIdentity in its own trust so it can give trust to other Identities. * * The score will have a rank of 0, a capacity of 100 (= 100 percent) and a score value of Integer.MAX_VALUE. * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(Persistent.transactionLock(mDB)) { * try { ... initTrustTreeWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * } * * @throws DuplicateScoreException if there already is more than one Score for this identity (should never happen) */ private synchronized void initTrustTreeWithoutCommit(OwnIdentity identity) throws DuplicateScoreException { try { getScore(identity, identity); Logger.error(this, "initTrustTreeWithoutCommit called even though there is already one for " + identity); return; } catch (NotInTrustTreeException e) { final Score score = new Score(this, identity, identity, Integer.MAX_VALUE, 0, 100); score.storeWithoutCommit(); } } /** * Computes the trustee's Score value according to the trusts it has received and the capacity of its trusters in the specified * trust tree. * * @param truster The OwnIdentity that owns the trust tree * @param trustee The identity for which the score shall be computed. * @return The new Score of the identity. Integer.MAX_VALUE if the trustee is equal to the truster. * @throws DuplicateScoreException if there already exist more than one {@link Score} objects for the trustee (should never happen) */ private synchronized int computeScoreValue(OwnIdentity truster, Identity trustee) throws DuplicateScoreException { if(trustee == truster) return Integer.MAX_VALUE; int value = 0; try { return getTrust(truster, trustee).getValue(); } catch(NotTrustedException e) { } for(Trust trust : getReceivedTrusts(trustee)) { try { final Score trusterScore = getScore(truster, trust.getTruster()); value += ( trust.getValue() * trusterScore.getCapacity() ) / 100; } catch (NotInTrustTreeException e) {} } return value; } /** * Computes the trustees's rank in the trust tree of the truster. * It gets its best ranked non-zero-capacity truster's rank, plus one. * If it has only received negative trust values from identities which have a non-zero-capacity it gets a rank of Integer.MAX_VALUE (infinite). * If it has only received trust values from identities with rank of Integer.MAX_VALUE it gets a rank of -1. * * If the tree owner has assigned a trust value to the identity, the rank computation is only done from that value because the score decisions of the * tree owner are always absolute (if you distrust someone, the remote identities should not be allowed to overpower your decision). * * The purpose of differentiation between Integer.MAX_VALUE and -1 is: * Score objects of identities with rank Integer.MAX_VALUE are kept in the database because WoT will usually "hear" about those identities by seeing them * in the trust lists of trusted identities (with negative trust values). So it must store the trust values to those identities and have a way of telling the * user "this identity is not trusted" by keeping a score object of them. * Score objects of identities with rank -1 are deleted because they are the trustees of distrusted identities and we will not get to the point where we * hear about those identities because the only way of hearing about them is downloading a trust list of a identity with Integer.MAX_VALUE rank - and * we never download their trust lists. * * Notice that 0 is included in infinite rank to prevent identities which have only solved introduction puzzles from having a capacity. * * @param truster The OwnIdentity that owns the trust tree * @return The new Rank if this Identity * @throws DuplicateScoreException if there already exist more than one {@link Score} objects for the trustee (should never happen) */ private synchronized int computeRank(OwnIdentity truster, Identity trustee) throws DuplicateScoreException { if(trustee == truster) return 0; int rank = -1; try { Trust treeOwnerTrust = getTrust(truster, trustee); if(treeOwnerTrust.getValue() > 0) return 1; else return Integer.MAX_VALUE; } catch(NotTrustedException e) { } for(Trust trust : getReceivedTrusts(trustee)) { try { Score score = getScore(truster, trust.getTruster()); if(score.getCapacity() != 0) { // If the truster has no capacity, he can't give his rank // A truster only gives his rank to a trustee if he has assigned a strictly positive trust value if(trust.getValue() > 0 ) { // We give the rank to the trustee if it is better than its current rank or he has no rank yet. if(rank == -1 || score.getRank() < rank) rank = score.getRank(); } else { // If the trustee has no rank yet we give him an infinite rank. because he is distrusted by the truster. if(rank == -1) rank = Integer.MAX_VALUE; } } } catch (NotInTrustTreeException e) {} } if(rank == -1) return -1; else if(rank == Integer.MAX_VALUE) return Integer.MAX_VALUE; else return rank+1; } /** * Begins the import of a trust list. This sets a flag on this WoT which signals that the import of a trust list is in progress. * This speeds up setTrust/removeTrust as the score calculation is only performed when endTrustListImport is called. * * You MUST synchronize on this WoT around beginTrustListImport, abortTrustListImport and finishTrustListImport! * You MUST create a database transaction by synchronizing on Persistent.transactionLock(db). */ protected void beginTrustListImport() { if(logMINOR) Logger.minor(this, "beginTrustListImport()"); if(mTrustListImportInProgress) { abortTrustListImport(new RuntimeException("There was already a trust list import in progress!")); mFullScoreComputationNeeded = true; computeAllScoresWithoutCommit(); assert(mFullScoreComputationNeeded == false); } mTrustListImportInProgress = true; assert(!mFullScoreComputationNeeded); assert(computeAllScoresWithoutCommit()); // The database is intact before the import } /** * See {@link beginTrustListImport} for an explanation of the purpose of this function. * * Aborts the import of a trust list and rolls back the current transaction. * * @param e The exception which triggered the abort. Will be logged to the Freenet log file. * @param logLevel The {@link LogLevel} to use when logging the abort to the Freenet log file. */ protected void abortTrustListImport(Exception e, LogLevel logLevel) { if(logMINOR) Logger.minor(this, "abortTrustListImport()"); assert(mTrustListImportInProgress); mTrustListImportInProgress = false; mFullScoreComputationNeeded = false; Persistent.checkedRollback(mDB, this, e, logLevel); assert(computeAllScoresWithoutCommit()); // Test rollback. } /** * See {@link beginTrustListImport} for an explanation of the purpose of this function. * * Aborts the import of a trust list and rolls back the current transaction. * * @param e The exception which triggered the abort. Will be logged to the Freenet log file with log level {@link LogLevel.ERROR} */ protected void abortTrustListImport(Exception e) { abortTrustListImport(e, Logger.LogLevel.ERROR); } /** * See {@link beginTrustListImport} for an explanation of the purpose of this function. * * Finishes the import of the current trust list and clears the "trust list * * Does NOT commit the transaction, you must do this. */ protected void finishTrustListImport() { if(logMINOR) Logger.minor(this, "finishTrustListImport()"); if(!mTrustListImportInProgress) { Logger.error(this, "There was no trust list import in progress!"); return; } if(mFullScoreComputationNeeded) { computeAllScoresWithoutCommit(); assert(!mFullScoreComputationNeeded); // It properly clears the flag assert(computeAllScoresWithoutCommit()); // computeAllScoresWithoutCommit() is stable } else assert(computeAllScoresWithoutCommit()); // Verify whether updateScoresWithoutCommit worked. mTrustListImportInProgress = false; } /** * Updates all trust trees which are affected by the given modified score. * For understanding how score calculation works you should first read {@link computeAllScores * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(this) { * synchronized(mFetcher) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... updateScoreWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e);; } * }}} */ private void updateScoresWithoutCommit(final Trust oldTrust, final Trust newTrust) { if(logMINOR) Logger.minor(this, "Doing an incremental computation of all Scores..."); final long beginTime = CurrentTimeUTC.getInMillis(); // We only include the time measurement if we actually do something. // If we figure out that a full score recomputation is needed just by looking at the initial parameters, the measurement won't be included. boolean includeMeasurement = false; final boolean trustWasCreated = (oldTrust == null); final boolean trustWasDeleted = (newTrust == null); final boolean trustWasModified = !trustWasCreated && !trustWasDeleted; if(trustWasCreated && trustWasDeleted) throw new NullPointerException("No old/new trust specified."); if(trustWasModified && oldTrust.getTruster() != newTrust.getTruster()) throw new IllegalArgumentException("oldTrust has different truster, oldTrust:" + oldTrust + "; newTrust: " + newTrust); if(trustWasModified && oldTrust.getTrustee() != newTrust.getTrustee()) throw new IllegalArgumentException("oldTrust has different trustee, oldTrust:" + oldTrust + "; newTrust: " + newTrust); // We cannot iteratively REMOVE an inherited rank from the trustees because we don't know whether there is a circle in the trust values // which would make the current identity get its old rank back via the circle: computeRank searches the trusters of an identity for the best // rank, if we remove the rank from an identity, all its trustees will have a better rank and if one of them trusts the original identity // then this function would run into an infinite loop. Decreasing or incrementing an existing rank is possible with this function because // the rank received from the trustees will always be higher (that is exactly 1 more) than this identities rank. if(trustWasDeleted) { mFullScoreComputationNeeded = true; } if(!mFullScoreComputationNeeded && (trustWasCreated || trustWasModified)) { includeMeasurement = true; for(OwnIdentity treeOwner : getAllOwnIdentities()) { try { // Throws to abort the update of the trustee's score: If the truster has no rank or capacity in the tree owner's view then we don't need to update the trustee's score. if(getScore(treeOwner, newTrust.getTruster()).getCapacity() == 0) continue; } catch(NotInTrustTreeException e) { continue; } // See explanation above "We cannot iteratively REMOVE an inherited rank..." if(trustWasModified && oldTrust.getValue() > 0 && newTrust.getValue() <= 0) { mFullScoreComputationNeeded = true; break; } final LinkedList<Trust> unprocessedEdges = new LinkedList<Trust>(); unprocessedEdges.add(newTrust); while(!unprocessedEdges.isEmpty()) { final Trust trust = unprocessedEdges.removeFirst(); final Identity trustee = trust.getTrustee(); if(trustee == treeOwner) continue; Score currentStoredTrusteeScore; try { currentStoredTrusteeScore = getScore(treeOwner, trustee); } catch(NotInTrustTreeException e) { currentStoredTrusteeScore = new Score(this, treeOwner, trustee, 0, -1, 0); } final Score oldScore = currentStoredTrusteeScore.clone(); boolean oldShouldFetch = shouldFetchIdentity(trustee); final int newScoreValue = computeScoreValue(treeOwner, trustee); final int newRank = computeRank(treeOwner, trustee); final int newCapacity = computeCapacity(treeOwner, trustee, newRank); final Score newScore = new Score(this, treeOwner, trustee, newScoreValue, newRank, newCapacity); // Normally we couldn't detect the following two cases due to circular trust values. However, if an own identity assigns a trust value, // the rank and capacity are always computed based on the trust value of the own identity so we must also check this here: if((oldScore.getRank() >= 0 && oldScore.getRank() < Integer.MAX_VALUE) // It had an inheritable rank && (newScore.getRank() == -1 || newScore.getRank() == Integer.MAX_VALUE)) { // It has no inheritable rank anymore mFullScoreComputationNeeded = true; break; } if(oldScore.getCapacity() > 0 && newScore.getCapacity() == 0) { mFullScoreComputationNeeded = true; break; } // We are OK to update it now. We must not update the values of the stored score object before determining whether we need // a full score computation - the full computation needs the old values of the object. currentStoredTrusteeScore.setValue(newScore.getScore()); currentStoredTrusteeScore.setRank(newScore.getRank()); currentStoredTrusteeScore.setCapacity(newScore.getCapacity()); // Identities should not get into the queue if they have no rank, see the large if() about 20 lines below assert(currentStoredTrusteeScore.getRank() >= 0); if(currentStoredTrusteeScore.getRank() >= 0) currentStoredTrusteeScore.storeWithoutCommit(); // If fetch status changed from false to true, we need to start fetching it // If the capacity changed from 0 to positive, we need to refetch the current edition: Identities with capacity 0 cannot // cause new identities to be imported from their trust list, capacity > 0 allows this. // If the fetch status changed from true to false, we need to stop fetching it if((!oldShouldFetch || (oldScore.getCapacity()== 0 && newScore.getCapacity() > 0)) && shouldFetchIdentity(trustee)) { if(!oldShouldFetch) if(logDEBUG) Logger.debug(this, "Fetch status changed from false to true, refetching " + trustee); else if(logDEBUG) Logger.debug(this, "Capacity changed from 0 to " + newScore.getCapacity() + ", refetching" + trustee); trustee.markForRefetch(); trustee.storeWithoutCommit(); mFetcher.storeStartFetchCommandWithoutCommit(trustee); } else if(oldShouldFetch && !shouldFetchIdentity(trustee)) { if(logDEBUG) Logger.debug(this, "Fetch status changed from true to false, aborting fetch of " + trustee); mFetcher.storeAbortFetchCommandWithoutCommit(trustee); } // If the rank or capacity changed then the trustees might be affected because the could have inherited theirs if(oldScore.getRank() != newScore.getRank() || oldScore.getCapacity() != newScore.getCapacity()) { // If this identity has no capacity or no rank then it cannot affect its trustees: // (- If it had none and it has none now then there is none which can be inherited, this is obvious) // - If it had one before and it was removed, this algorithm will have aborted already because a full computation is needed if(newScore.getCapacity() > 0 || (newScore.getRank() >= 0 && newScore.getRank() < Integer.MAX_VALUE)) { // We need to update the trustees of trustee for(Trust givenTrust : getGivenTrusts(trustee)) { unprocessedEdges.add(givenTrust); } } } } if(mFullScoreComputationNeeded) break; } } if(includeMeasurement) { ++mIncrementalScoreRecomputationCount; mIncrementalScoreRecomputationMilliseconds += CurrentTimeUTC.getInMillis() - beginTime; } if(logMINOR) { final String time = includeMeasurement ? ("Stats: Amount: " + mIncrementalScoreRecomputationCount + "; Avg Time:" + getAverageIncrementalScoreRecomputationTime() + "s") : ("Time not measured: Computation was aborted before doing anything."); if(!mFullScoreComputationNeeded) Logger.minor(this, "Incremental computation of all Scores finished. " + time); else Logger.minor(this, "Incremental computation of all Scores not possible, full computation is needed. " + time); } if(!mTrustListImportInProgress) { if(mFullScoreComputationNeeded) { // TODO: Optimization: This uses very much CPU and memory. Write a partial computation function... // TODO: Optimization: While we do not have a partial computation function, we could at least optimize computeAllScores to NOT // keep all objects in memory etc. computeAllScoresWithoutCommit(); assert(computeAllScoresWithoutCommit()); // computeAllScoresWithoutCommit is stable } else { assert(computeAllScoresWithoutCommit()); // This function worked correctly. } } else { // a trust list import is in progress // We not do the following here because it would cause too much CPU usage during debugging: Trust lists are large and therefore // updateScoresWithoutCommit is called often during import of a single trust list // assert(computeAllScoresWithoutCommit()); } } /* Client interface functions */ public synchronized Identity addIdentity(String requestURI) throws MalformedURLException, InvalidParameterException { try { getIdentityByURI(requestURI); throw new InvalidParameterException("We already have this identity"); } catch(UnknownIdentityException e) { final Identity identity = new Identity(this, requestURI, null, false); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Created identity " + identity); // The identity hasn't received a trust value. Therefore, there is no reason to fetch it and we don't notify the IdentityFetcher. // TODO: Document this function and the UI which uses is to warn the user that the identity won't be fetched without trust. return identity; } } public OwnIdentity createOwnIdentity(String nickName, boolean publishTrustList, String context) throws MalformedURLException, InvalidParameterException { FreenetURI[] keypair = getPluginRespirator().getHLSimpleClient().generateKeyPair(WOT_NAME); return createOwnIdentity(keypair[0], nickName, publishTrustList, context); } /** * @param context A context with which you want to use the identity. Null if you want to add it later. */ public synchronized OwnIdentity createOwnIdentity(FreenetURI insertURI, String nickName, boolean publishTrustList, String context) throws MalformedURLException, InvalidParameterException { synchronized(Persistent.transactionLock(mDB)) { OwnIdentity identity; try { identity = getOwnIdentityByURI(insertURI); throw new InvalidParameterException("The URI you specified is already used by the own identity " + identity.getNickname() + "."); } catch(UnknownIdentityException uie) { identity = new OwnIdentity(this, insertURI, nickName, publishTrustList); if(context != null) identity.addContext(context); if(publishTrustList) { identity.addContext(IntroductionPuzzle.INTRODUCTION_CONTEXT); /* TODO: make configureable */ identity.setProperty(IntroductionServer.PUZZLE_COUNT_PROPERTY, Integer.toString(IntroductionServer.DEFAULT_PUZZLE_COUNT)); } try { identity.storeWithoutCommit(); initTrustTreeWithoutCommit(identity); beginTrustListImport(); // Incremental score computation has proven to be very very slow when creating identities so we just schedule a full computation. mFullScoreComputationNeeded = true; for(String seedURI : SEED_IDENTITIES) { try { setTrustWithoutCommit(identity, getIdentityByURI(seedURI), (byte)100, "Automatically assigned trust to a seed identity."); } catch(UnknownIdentityException e) { Logger.error(this, "SHOULD NOT HAPPEN: Seed identity not known: " + e); } } finishTrustListImport(); Persistent.checkedCommit(mDB, this); if(mIntroductionClient != null) mIntroductionClient.nextIteration(); // This will make it fetch more introduction puzzles. if(logDEBUG) Logger.debug(this, "Successfully created a new OwnIdentity (" + identity.getNickname() + ")"); return identity; } catch(RuntimeException e) { abortTrustListImport(e); // Rolls back for us throw e; // Satisfy the compiler } } } } /** * This "deletes" an {@link OwnIdentity} by replacing it with an {@link Identity}. * * The {@link OwnIdentity} is not deleted because this would be a security issue: * If other {@link OwnIdentity}s have assigned a trust value to it, the trust value would be gone if there is no {@link Identity} object to be the target * * @param id The {@link Identity.IdentityID} of the identity. * @throws UnknownIdentityException If there is no {@link OwnIdentity} with the given ID. Also thrown if a non-own identity exists with the given ID. */ public synchronized void deleteOwnIdentity(String id) throws UnknownIdentityException { Logger.normal(this, "deleteOwnIdentity(): Starting... "); synchronized(mPuzzleStore) { synchronized(mFetcher) { synchronized(Persistent.transactionLock(mDB)) { final OwnIdentity oldIdentity = getOwnIdentityByID(id); try { Logger.normal(this, "Deleting an OwnIdentity by converting it to a non-own Identity: " + oldIdentity); // We don't need any score computations to happen (explanation will follow below) so we don't need the following: /* beginTrustListImport(); */ // This function messes with the score graph manually so it is a good idea to check whether it is intact before and afterwards. assert(computeAllScoresWithoutCommit()); final Identity newIdentity; try { newIdentity = new Identity(this, oldIdentity.getRequestURI(), oldIdentity.getNickname(), oldIdentity.doesPublishTrustList()); } catch(MalformedURLException e) { // The data was taken from the OwnIdentity so this shouldn't happen throw new RuntimeException(e); } catch (InvalidParameterException e) { // The data was taken from the OwnIdentity so this shouldn't happen throw new RuntimeException(e); } newIdentity.setContexts(oldIdentity.getContexts()); newIdentity.setProperties(oldIdentity.getProperties()); try { newIdentity.setEdition(oldIdentity.getEdition()); } catch (InvalidParameterException e) { // The data was taken from old identity so this shouldn't happen throw new RuntimeException(e); } // In theory we do not need to re-fetch the current trust list edition: // The trust list of an own identity is always stored completely in the database, i.e. all trustees exist. // HOWEVER if the user had used the restoreOwnIdentity feature and then used this function, it might be the case that // the current edition of the old OwndIdentity was not fetched yet. // So we set the fetch state to FetchState.Fetched if the oldIdentity's fetch state was like that as well. if(oldIdentity.getCurrentEditionFetchState() == FetchState.Fetched) { newIdentity.onFetched(oldIdentity.getLastFetchedDate()); } // An else to set the fetch state to FetchState.NotFetched is not necessary, newIdentity.setEdition() did that already. newIdentity.storeWithoutCommit(); // Copy all received trusts. // We don't have to modify them because they are user-assigned values and the assignment // of the user does not change just because the type of the identity changes. for(Trust oldReceivedTrust : getReceivedTrusts(oldIdentity)) { Trust newReceivedTrust; try { newReceivedTrust = new Trust(this, oldReceivedTrust.getTruster(), newIdentity, oldReceivedTrust.getValue(), oldReceivedTrust.getComment()); } catch (InvalidParameterException e) { // The data was taken from the old Trust so this shouldn't happen throw new RuntimeException(e); } // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newReceivedTrust.equals(oldReceivedTrust)); */ oldReceivedTrust.deleteWithoutCommit(); newReceivedTrust.storeWithoutCommit(); } assert(getReceivedTrusts(oldIdentity).size() == 0); // Copy all received scores. // We don't have to modify them because the rating of the identity from the perspective of a // different own identity should NOT be dependent upon whether it is an own identity or not. for(Score oldScore : getScores(oldIdentity)) { Score newScore = new Score(this, oldScore.getTruster(), newIdentity, oldScore.getScore(), oldScore.getRank(), oldScore.getCapacity()); // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newScore.equals(oldScore)); */ oldScore.deleteWithoutCommit(); newScore.storeWithoutCommit(); } assert(getScores(oldIdentity).size() == 0); // Delete all given scores: // Non-own identities do not assign scores to other identities so we can just delete them. for(Score oldScore : getGivenScores(oldIdentity)) { final Identity trustee = oldScore.getTrustee(); final boolean oldShouldFetchTrustee = shouldFetchIdentity(trustee); oldScore.deleteWithoutCommit(); // If the OwnIdentity which we are converting was the only source of trust to the trustee // of this Score value, the should-fetch state of the trustee might change to false. if(oldShouldFetchTrustee && shouldFetchIdentity(trustee) == false) { mFetcher.storeAbortFetchCommandWithoutCommit(trustee); } } assert(getGivenScores(oldIdentity).size() == 0); // Copy all given trusts: // We don't have to use the removeTrust/setTrust functions because the score graph does not need updating: // - To the rating of the converted identity in the score graphs of other own identities it is irrelevant // whether it is an own identity or not. The rating should never depend on whether it is an own identity! // - Non-own identities do not have a score graph. So the score graph of the converted identity is deleted // completely and therefore it does not need to be updated. for(Trust oldGivenTrust : getGivenTrusts(oldIdentity)) { Trust newGivenTrust; try { newGivenTrust = new Trust(this, newIdentity, oldGivenTrust.getTrustee(), oldGivenTrust.getValue(), oldGivenTrust.getComment()); } catch (InvalidParameterException e) { // The data was taken from the old Trust so this shouldn't happen throw new RuntimeException(e); } // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newGivenTrust.equals(oldGivenTrust)); */ oldGivenTrust.deleteWithoutCommit(); newGivenTrust.storeWithoutCommit(); } mPuzzleStore.onIdentityDeletion(oldIdentity); mFetcher.storeAbortFetchCommandWithoutCommit(oldIdentity); // NOTICE: // If the fetcher did store a db4o object reference to the identity, we would have to trigger command processing // now to prevent leakage of the identity object. // But the fetcher does NOT store a db4o object reference to the given identity. It stores its ID as String only. // Therefore, it is OK that the fetcher does not immediately process the commands now. oldIdentity.deleteWithoutCommit(); mFetcher.storeStartFetchCommandWithoutCommit(newIdentity); // This function messes with the score graph manually so it is a good idea to check whether it is intact before and afterwards. assert(computeAllScoresWithoutCommit()); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } } } Logger.normal(this, "deleteOwnIdentity(): Finished."); } /** * NOTICE: When changing this function, please also take care of {@link OwnIdentity.isRestoreInProgress()} */ public synchronized void restoreOwnIdentity(FreenetURI insertFreenetURI) throws MalformedURLException, InvalidParameterException { Logger.normal(this, "restoreOwnIdentity(): Starting... "); OwnIdentity identity; synchronized(mPuzzleStore) { synchronized(mFetcher) { synchronized(Persistent.transactionLock(mDB)) { try { long edition = 0; try { edition = Math.max(edition, insertFreenetURI.getEdition()); } catch(IllegalStateException e) { // The user supplied URI did not have an edition specified } try { // Try replacing an existing non-own version of the identity with an OwnIdentity Identity oldIdentity = getIdentityByURI(insertFreenetURI); if(oldIdentity instanceof OwnIdentity) throw new InvalidParameterException("There is already an own identity with the given URI pair."); Logger.normal(this, "Restoring an already known identity from Freenet: " + oldIdentity); // Normally, one would expect beginTrustListImport() to happen close to the actual trust list changes later on in this function. // But beginTrustListImport() contains an assert(computeAllScoresWithoutCommit()) and that call to the score computation reference // implementation will fail if two identities with the same ID exist. // This would be the case later on - we cannot delete the non-own version of the OwnIdentity before we modified the trust graph // but we must also store the own version to be able to modify the trust graph. beginTrustListImport(); // We already have fetched this identity as a stranger's one. We need to update the database. identity = new OwnIdentity(this, insertFreenetURI, oldIdentity.getNickname(), oldIdentity.doesPublishTrustList()); /* We re-fetch the most recent edition to make sure all trustees are imported */ edition = Math.max(edition, oldIdentity.getEdition()); identity.restoreEdition(edition, oldIdentity.getLastFetchedDate()); identity.setContexts(oldIdentity.getContexts()); identity.setProperties(oldIdentity.getProperties()); identity.storeWithoutCommit(); initTrustTreeWithoutCommit(identity); // Copy all received trusts. // We don't have to modify them because they are user-assigned values and the assignment // of the user does not change just because the type of the identity changes. for(Trust oldReceivedTrust : getReceivedTrusts(oldIdentity)) { Trust newReceivedTrust = new Trust(this, oldReceivedTrust.getTruster(), identity, oldReceivedTrust.getValue(), oldReceivedTrust.getComment()); // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newReceivedTrust.equals(oldReceivedTrust)); */ oldReceivedTrust.deleteWithoutCommit(); newReceivedTrust.storeWithoutCommit(); } assert(getReceivedTrusts(oldIdentity).size() == 0); // Copy all received scores. // We don't have to modify them because the rating of the identity from the perspective of a // different own identity should NOT be dependent upon whether it is an own identity or not. for(Score oldScore : getScores(oldIdentity)) { Score newScore = new Score(this, oldScore.getTruster(), identity, oldScore.getScore(), oldScore.getRank(), oldScore.getCapacity()); // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newScore.equals(oldScore)); */ oldScore.deleteWithoutCommit(); newScore.storeWithoutCommit(); } assert(getScores(oldIdentity).size() == 0); // What we do NOT have to deal with is the given scores of the old identity: // Given scores do NOT exist for non-own identities, so there are no old ones to update. // Of cause there WILL be scores because it is an own identity now. // They will be created automatically when updating the given trusts // - so thats what we will do now. // Update all given trusts for(Trust givenTrust : getGivenTrusts(oldIdentity)) { // TODO: Instead of using the regular removeTrustWithoutCommit on all trust values, we could: // - manually delete the old Trust objects from the database // - manually store the new trust objects // - Realize that only the trust graph of the restored identity needs to be updated and write an optimized version // of setTrustWithoutCommit which deals with that. // But before we do that, we should first do the existing possible optimization of removeTrustWithoutCommit: // To get rid of removeTrustWithoutCommit always triggering a FULL score recomputation and instead make // it only update the parts of the trust graph which are affected. // Maybe the optimized version is fast enough that we don't have to do the optimization which this TODO suggests. removeTrustWithoutCommit(givenTrust); setTrustWithoutCommit(identity, givenTrust.getTrustee(), givenTrust.getValue(), givenTrust.getComment()); } // We do not call finishTrustListImport() now: It might trigger execution of computeAllScoresWithoutCommit // which would re-create scores of the old identity. We later call it AFTER deleting the old identity. /* finishTrustListImport(); */ mPuzzleStore.onIdentityDeletion(oldIdentity); mFetcher.storeAbortFetchCommandWithoutCommit(oldIdentity); // NOTICE: // If the fetcher did store a db4o object reference to the identity, we would have to trigger command processing // now to prevent leakage of the identity object. // But the fetcher does NOT store a db4o object reference to the given identity. It stores its ID as String only. // Therefore, it is OK that the fetcher does not immediately process the commands now. oldIdentity.deleteWithoutCommit(); finishTrustListImport(); } catch (UnknownIdentityException e) { // The identity did NOT exist as non-own identity yet so we can just create an OwnIdentity and store it. identity = new OwnIdentity(this, insertFreenetURI, null, false); Logger.normal(this, "Restoring not-yet-known identity from Freenet: " + identity); identity.restoreEdition(edition, null); // Store the new identity identity.storeWithoutCommit(); initTrustTreeWithoutCommit(identity); } mFetcher.storeStartFetchCommandWithoutCommit(identity); // This function messes with the trust graph manually so it is a good idea to check whether it is intact afterwards. assert(computeAllScoresWithoutCommit()); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { abortTrustListImport(e); Persistent.checkedRollbackAndThrow(mDB, this, e); } } } } Logger.normal(this, "restoreOwnIdentity(): Finished."); } public synchronized void setTrust(String ownTrusterID, String trusteeID, byte value, String comment) throws UnknownIdentityException, NumberFormatException, InvalidParameterException { final OwnIdentity truster = getOwnIdentityByID(ownTrusterID); Identity trustee = getIdentityByID(trusteeID); setTrust(truster, trustee, value, comment); } public synchronized void removeTrust(String ownTrusterID, String trusteeID) throws UnknownIdentityException { final OwnIdentity truster = getOwnIdentityByID(ownTrusterID); final Identity trustee = getIdentityByID(trusteeID); removeTrust(truster, trustee); } public synchronized void addContext(String ownIdentityID, String newContext) throws UnknownIdentityException, InvalidParameterException { final Identity identity = getOwnIdentityByID(ownIdentityID); identity.addContext(newContext); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Added context '" + newContext + "' to identity '" + identity.getNickname() + "'"); } public synchronized void removeContext(String ownIdentityID, String context) throws UnknownIdentityException, InvalidParameterException { final Identity identity = getOwnIdentityByID(ownIdentityID); identity.removeContext(context); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Removed context '" + context + "' from identity '" + identity.getNickname() + "'"); } public synchronized String getProperty(String identityID, String property) throws InvalidParameterException, UnknownIdentityException { return getIdentityByID(identityID).getProperty(property); } public synchronized void setProperty(String ownIdentityID, String property, String value) throws UnknownIdentityException, InvalidParameterException { Identity identity = getOwnIdentityByID(ownIdentityID); identity.setProperty(property, value); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Added property '" + property + "=" + value + "' to identity '" + identity.getNickname() + "'"); } public synchronized void removeProperty(String ownIdentityID, String property) throws UnknownIdentityException, InvalidParameterException { final Identity identity = getOwnIdentityByID(ownIdentityID); identity.removeProperty(property); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Removed property '" + property + "' from identity '" + identity.getNickname() + "'"); } public String getVersion() { return Version.getMarketingVersion(); } public long getRealVersion() { return Version.getRealVersion(); } public String getString(String key) { return getBaseL10n().getString(key); } public void setLanguage(LANGUAGE newLanguage) { WebOfTrust.l10n = new PluginL10n(this, newLanguage); if(logDEBUG) Logger.debug(this, "Set LANGUAGE to: " + newLanguage.isoCode); } public PluginRespirator getPluginRespirator() { return mPR; } public ExtObjectContainer getDatabase() { return mDB; } public Configuration getConfig() { return mConfig; } public IdentityFetcher getIdentityFetcher() { return mFetcher; } public XMLTransformer getXMLTransformer() { return mXMLTransformer; } public IntroductionPuzzleStore getIntroductionPuzzleStore() { return mPuzzleStore; } public IntroductionClient getIntroductionClient() { return mIntroductionClient; } public RequestClient getRequestClient() { return mRequestClient; } /** * This is where our L10n files are stored. * @return Path of our L10n files. */ public String getL10nFilesBasePath() { return "plugins/WebOfTrust/l10n/"; } /** * This is the mask of our L10n files : lang_en.l10n, lang_de.10n, ... * @return Mask of the L10n files. */ public String getL10nFilesMask() { return "lang_${lang}.l10n"; } /** * Override L10n files are stored on the disk, their names should be explicit * we put here the plugin name, and the "override" indication. Plugin L10n * override is not implemented in the node yet. * @return Mask of the override L10n files. */ public String getL10nOverrideFilesMask() { return "WebOfTrust_lang_${lang}.override.l10n"; } /** * Get the ClassLoader of this plugin. This is necessary when getting * resources inside the plugin's Jar, for example L10n files. * @return ClassLoader object */ public ClassLoader getPluginClassLoader() { return WebOfTrust.class.getClassLoader(); } /** * Access to the current L10n data. * * @return L10n object. */ public BaseL10n getBaseL10n() { return WebOfTrust.l10n.getBase(); } public int getNumberOfFullScoreRecomputations() { return mFullScoreRecomputationCount; } public synchronized double getAverageFullScoreRecomputationTime() { return (double)mFullScoreRecomputationMilliseconds / ((mFullScoreRecomputationCount!= 0 ? mFullScoreRecomputationCount : 1) * 1000); } public int getNumberOfIncrementalScoreRecomputations() { return mIncrementalScoreRecomputationCount; } public synchronized double getAverageIncrementalScoreRecomputationTime() { return (double)mIncrementalScoreRecomputationMilliseconds / ((mIncrementalScoreRecomputationCount!= 0 ? mIncrementalScoreRecomputationCount : 1) * 1000); } /** * Tests whether two WoT are equal. * This is a complex operation in terms of execution time and memory usage and only intended for being used in unit tests. */ public synchronized boolean equals(Object obj) { if(obj == this) return true; if(!(obj instanceof WebOfTrust)) return false; WebOfTrust other = (WebOfTrust)obj; synchronized(other) { { // Compare own identities final ObjectSet<OwnIdentity> allIdentities = getAllOwnIdentities(); if(allIdentities.size() != other.getAllOwnIdentities().size()) return false; for(OwnIdentity identity : allIdentities) { try { if(!identity.equals(other.getOwnIdentityByID(identity.getID()))) return false; } catch(UnknownIdentityException e) { return false; } } } { // Compare identities final ObjectSet<Identity> allIdentities = getAllIdentities(); if(allIdentities.size() != other.getAllIdentities().size()) return false; for(Identity identity : allIdentities) { try { if(!identity.equals(other.getIdentityByID(identity.getID()))) return false; } catch(UnknownIdentityException e) { return false; } } } { // Compare trusts final ObjectSet<Trust> allTrusts = getAllTrusts(); if(allTrusts.size() != other.getAllTrusts().size()) return false; for(Trust trust : allTrusts) { try { Identity otherTruster = other.getIdentityByID(trust.getTruster().getID()); Identity otherTrustee = other.getIdentityByID(trust.getTrustee().getID()); if(!trust.equals(other.getTrust(otherTruster, otherTrustee))) return false; } catch(UnknownIdentityException e) { return false; } catch(NotTrustedException e) { return false; } } } { // Compare scores final ObjectSet<Score> allScores = getAllScores(); if(allScores.size() != other.getAllScores().size()) return false; for(Score score : allScores) { try { OwnIdentity otherTruster = other.getOwnIdentityByID(score.getTruster().getID()); Identity otherTrustee = other.getIdentityByID(score.getTrustee().getID()); if(!score.equals(other.getScore(otherTruster, otherTrustee))) return false; } catch(UnknownIdentityException e) { return false; } catch(NotInTrustTreeException e) { return false; } } } } return true; } }
false
true
private synchronized void deleteDuplicateObjects() { synchronized(mPuzzleStore) { // Needed for deleteIdentity() synchronized(mFetcher) { // // Needed for deleteIdentity() synchronized(Persistent.transactionLock(mDB)) { try { HashSet<String> deleted = new HashSet<String>(); if(logDEBUG) Logger.debug(this, "Searching for duplicate identities ..."); for(Identity identity : getAllIdentities()) { Query q = mDB.query(); q.constrain(Identity.class); q.descend("mID").constrain(identity.getID()); q.constrain(identity).identity().not(); ObjectSet<Identity> duplicates = new Persistent.InitializingObjectSet<Identity>(this, q); for(Identity duplicate : duplicates) { if(deleted.contains(duplicate.getID()) == false) { Logger.error(duplicate, "Deleting duplicate identity " + duplicate.getRequestURI()); deleteWithoutCommit(duplicate); Persistent.checkedCommit(mDB, this); } } deleted.add(identity.getID()); } Persistent.checkedCommit(mDB, this); if(logDEBUG) Logger.debug(this, "Finished searching for duplicate identities."); } catch(RuntimeException e) { Persistent.checkedRollback(mDB, this, e); } } } } // synchronized(this) { // For removeTrustWithoutCommit. Done at function level already. synchronized(mFetcher) { // For removeTrustWithoutCommit synchronized(Persistent.transactionLock(mDB)) { try { if(logDEBUG) Logger.debug(this, "Searching for duplicate Trust objects ..."); boolean duplicateTrustFound = false; for(OwnIdentity truster : getAllOwnIdentities()) { HashSet<String> givenTo = new HashSet<String>(); for(Trust trust : getGivenTrusts(truster)) { if(givenTo.contains(trust.getTrustee().getID()) == false) givenTo.add(trust.getTrustee().getID()); else { Logger.error(this, "Deleting duplicate given trust:" + trust); removeTrustWithoutCommit(trust); duplicateTrustFound = true; } } } if(duplicateTrustFound) { computeAllScoresWithoutCommit(); } Persistent.checkedCommit(mDB, this); if(logDEBUG) Logger.debug(this, "Finished searching for duplicate trust objects."); } catch(RuntimeException e) { Persistent.checkedRollback(mDB, this, e); } } // synchronized(Persistent.transactionLock(mDB)) { } // synchronized(mFetcher) { /* TODO: Also delete duplicate score */ } /** * Debug function for deleting trusts or scores of which one of the involved partners is missing. */ private synchronized void deleteOrphanObjects() { synchronized(Persistent.transactionLock(mDB)) { try { boolean orphanTrustFound = false; Query q = mDB.query(); q.constrain(Trust.class); q.descend("mTruster").constrain(null).identity().or(q.descend("mTrustee").constrain(null).identity()); ObjectSet<Trust> orphanTrusts = new Persistent.InitializingObjectSet<Trust>(this, q); for(Trust trust : orphanTrusts) { if(trust.getTruster() != null && trust.getTrustee() != null) { // TODO: Remove this workaround for the db4o bug as soon as we are sure that it does not happen anymore. Logger.error(this, "Db4o bug: constrain(null).identity() did not work for " + trust); continue; } Logger.error(trust, "Deleting orphan trust, truster = " + trust.getTruster() + ", trustee = " + trust.getTrustee()); orphanTrustFound = true; trust.deleteWithoutCommit(); } if(orphanTrustFound) { computeAllScoresWithoutCommit(); Persistent.checkedCommit(mDB, this); } } catch(Exception e) { Persistent.checkedRollback(mDB, this, e); } } synchronized(Persistent.transactionLock(mDB)) { try { boolean orphanScoresFound = false; Query q = mDB.query(); q.constrain(Score.class); q.descend("mTruster").constrain(null).identity().or(q.descend("mTrustee").constrain(null).identity()); ObjectSet<Score> orphanScores = new Persistent.InitializingObjectSet<Score>(this, q); for(Score score : orphanScores) { if(score.getTruster() != null && score.getTrustee() != null) { // TODO: Remove this workaround for the db4o bug as soon as we are sure that it does not happen anymore. Logger.error(this, "Db4o bug: constrain(null).identity() did not work for " + score); continue; } Logger.error(score, "Deleting orphan score, truster = " + score.getTruster() + ", trustee = " + score.getTrustee()); orphanScoresFound = true; score.deleteWithoutCommit(); } if(orphanScoresFound) { computeAllScoresWithoutCommit(); Persistent.checkedCommit(mDB, this); } } catch(Exception e) { Persistent.checkedRollback(mDB, this, e); } } } /** * Warning: This function is not synchronized, use it only in single threaded mode. * @return The WOT database format version of the given database. -1 if there is no Configuration stored in it or multiple configurations exist. */ @SuppressWarnings("deprecation") private static int peekDatabaseFormatVersion(WebOfTrust wot, ExtObjectContainer database) { final Query query = database.query(); query.constrain(Configuration.class); @SuppressWarnings("unchecked") ObjectSet<Configuration> result = (ObjectSet<Configuration>)query.execute(); switch(result.size()) { case 1: { final Configuration config = (Configuration)result.next(); config.initializeTransient(wot, database); // For the HashMaps to stay alive we need to activate to full depth. config.checkedActivate(4); return config.getDatabaseFormatVersion(); } default: return -1; } } /** * Loads an existing Config object from the database and adds any missing default values to it, creates and stores a new one if none exists. * @return The config object. */ private synchronized Configuration getOrCreateConfig() { final Query query = mDB.query(); query.constrain(Configuration.class); final ObjectSet<Configuration> result = new Persistent.InitializingObjectSet<Configuration>(this, query); switch(result.size()) { case 1: { final Configuration config = result.next(); // For the HashMaps to stay alive we need to activate to full depth. config.checkedActivate(4); config.setDefaultValues(false); config.storeAndCommit(); return config; } case 0: { final Configuration config = new Configuration(this); config.initializeTransient(this); config.storeAndCommit(); return config; } default: throw new RuntimeException("Multiple config objects found: " + result.size()); } } /** Capacity is the maximum amount of points an identity can give to an other by trusting it. * * Values choice : * Advogato Trust metric recommends that values decrease by rounded 2.5 times. * This makes sense, making the need of 3 N+1 ranked people to overpower * the trust given by a N ranked identity. * * Number of ranks choice : * When someone creates a fresh identity, he gets the seed identity at * rank 1 and freenet developpers at rank 2. That means that * he will see people that were : * - given 7 trust by freenet devs (rank 2) * - given 17 trust by rank 3 * - given 50 trust by rank 4 * - given 100 trust by rank 5 and above. * This makes the range small enough to avoid a newbie * to even see spam, and large enough to make him see a reasonnable part * of the community right out-of-the-box. * Of course, as soon as he will start to give trust, he will put more * people at rank 1 and enlarge his WoT. */ protected static final int capacities[] = { 100,// Rank 0 : Own identities 40, // Rank 1 : Identities directly trusted by ownIdenties 16, // Rank 2 : Identities trusted by rank 1 identities 6, // So on... 2, 1 // Every identity above rank 5 can give 1 point }; // Identities with negative score have zero capacity /** * Computes the capacity of a truster. The capacity is a weight function in percent which is used to decide how much * trust points an identity can add to the score of identities which it has assigned trust values to. * The higher the rank of an identity, the less is it's capacity. * * If the rank of the identity is Integer.MAX_VALUE (infinite, this means it has only received negative or 0 trust values from identities with rank >= 0 and less * than infinite) or -1 (this means that it has only received trust values from identities with infinite rank) then its capacity is 0. * * If the truster has assigned a trust value to the trustee the capacity will be computed only from that trust value: * The decision of the truster should always overpower the view of remote identities. * * Notice that 0 is included in infinite rank to prevent identities which have only solved introduction puzzles from having a capacity. * * @param truster The {@link OwnIdentity} in whose trust tree the capacity shall be computed * @param trustee The {@link Identity} of which the capacity shall be computed. * @param rank The rank of the identity. The rank is the distance in trust steps from the OwnIdentity which views the web of trust, * - its rank is 0, the rank of its trustees is 1 and so on. Must be -1 if the truster has no rank in the tree owners view. */ protected int computeCapacity(OwnIdentity truster, Identity trustee, int rank) { if(truster == trustee) return 100; try { if(getTrust(truster, trustee).getValue() <= 0) { // Security check, if rank computation breaks this will hit. assert(rank == Integer.MAX_VALUE); return 0; } } catch(NotTrustedException e) { } if(rank == -1 || rank == Integer.MAX_VALUE) return 0; return (rank < capacities.length) ? capacities[rank] : 1; } /** * Reference-implementation of score computation. This means:<br /> * - It is not used by the real WoT code because its slow<br /> * - It is used by unit tests (and WoT) to check whether the real implementation works<br /> * - It is the function which you should read if you want to understand how WoT works.<br /> * * Computes all rank and score values and checks whether the database is correct. If wrong values are found, they are correct.<br /> * * There was a bug in the score computation for a long time which resulted in wrong computation when trust values very removed under certain conditions.<br /> * * Further, rank values are shortest paths and the path-finding algorithm is not executed from the source * to the target upon score computation: It uses the rank of the neighbor nodes to find a shortest path. * Therefore, the algorithm is very vulnerable to bugs since one wrong value will stay in the database * and affect many others. So it is useful to have this function. * * @return True if all stored scores were correct. False if there were any errors in stored scores. */ protected synchronized boolean computeAllScoresWithoutCommit() { if(logMINOR) Logger.minor(this, "Doing a full computation of all Scores..."); final long beginTime = CurrentTimeUTC.getInMillis(); boolean returnValue = true; final ObjectSet<Identity> allIdentities = getAllIdentities(); // Scores are a rating of an identity from the view of an OwnIdentity so we compute them per OwnIdentity. for(OwnIdentity treeOwner : getAllOwnIdentities()) { // At the end of the loop body, this table will be filled with the ranks of all identities which are visible for treeOwner. // An identity is visible if there is a trust chain from the owner to it. // The rank is the distance in trust steps from the treeOwner. // So the treeOwner is rank 0, the trustees of the treeOwner are rank 1 and so on. final HashMap<Identity, Integer> rankValues = new HashMap<Identity, Integer>(allIdentities.size() * 2); // Compute the rank values { // For each identity which is added to rankValues, all its trustees are added to unprocessedTrusters. // The inner loop then pulls out one unprocessed identity and computes the rank of its trustees: // All trustees which have received positive (> 0) trust will get his rank + 1 // Trustees with negative trust or 0 trust will get a rank of Integer.MAX_VALUE. // Trusters with rank Integer.MAX_VALUE cannot inherit their rank to their trustees so the trustees will get no rank at all. // Identities with no rank are considered to be not in the trust tree of the own identity and their score will be null / none. // // Further, if the treeOwner has assigned a trust value to an identity, the rank decision is done by only considering this trust value: // The decision of the own identity shall not be overpowered by the view of the remote identities. // // The purpose of differentiation between Integer.MAX_VALUE and -1 is: // Score objects of identities with rank Integer.MAX_VALUE are kept in the database because WoT will usually "hear" about those identities by seeing // them in the trust lists of trusted identities (with 0 or negative trust values). So it must store the trust values to those identities and // have a way of telling the user "this identity is not trusted" by keeping a score object of them. // Score objects of identities with rank -1 are deleted because they are the trustees of distrusted identities and we will not get to the point where // we hear about those identities because the only way of hearing about them is importing a trust list of a identity with Integer.MAX_VALUE rank // - and we never import their trust lists. // We include trust values of 0 in the set of rank Integer.MAX_VALUE (instead of only NEGATIVE trust) so that identities which only have solved // introduction puzzles cannot inherit their rank to their trustees. final LinkedList<Identity> unprocessedTrusters = new LinkedList<Identity>(); // The own identity is the root of the trust tree, it should assign itself a rank of 0 , a capacity of 100 and a symbolic score of Integer.MAX_VALUE try { Score selfScore = getScore(treeOwner, treeOwner); if(selfScore.getRank() >= 0) { // It can only give it's rank if it has a valid one rankValues.put(treeOwner, selfScore.getRank()); unprocessedTrusters.addLast(treeOwner); } else { rankValues.put(treeOwner, null); } } catch(NotInTrustTreeException e) { // This only happens in unit tests. } while(!unprocessedTrusters.isEmpty()) { final Identity truster = unprocessedTrusters.removeFirst(); final Integer trusterRank = rankValues.get(truster); // The truster cannot give his rank to his trustees because he has none (or infinite), they receive no rank at all. if(trusterRank == null || trusterRank == Integer.MAX_VALUE) { // (Normally this does not happen because we do not enqueue the identities if they have no rank but we check for security) continue; } final int trusteeRank = trusterRank + 1; for(Trust trust : getGivenTrusts(truster)) { final Identity trustee = trust.getTrustee(); final Integer oldTrusteeRank = rankValues.get(trustee); if(oldTrusteeRank == null) { // The trustee was not processed yet if(trust.getValue() > 0) { rankValues.put(trustee, trusteeRank); unprocessedTrusters.addLast(trustee); } else rankValues.put(trustee, Integer.MAX_VALUE); } else { // Breadth first search will process all rank one identities are processed before any rank two identities, etc. assert(oldTrusteeRank == Integer.MAX_VALUE || trusteeRank >= oldTrusteeRank); if(oldTrusteeRank == Integer.MAX_VALUE) { // If we found a rank less than infinite we can overwrite the old rank with this one, but only if the infinite rank was not // given by the tree owner. try { final Trust treeOwnerTrust = getTrust(treeOwner, trustee); assert(treeOwnerTrust.getValue() <= 0); // TODO: Is this correct? } catch(NotTrustedException e) { if(trust.getValue() > 0) { rankValues.put(trustee, trusteeRank); unprocessedTrusters.addLast(trustee); } } } } } } } // Rank values of all visible identities are computed now. // Next step is to compute the scores of all identities for(Identity target : allIdentities) { // The score of an identity is the sum of all weighted trust values it has received. // Each trust value is weighted with the capacity of the truster - the capacity decays with increasing rank. Integer targetScore; final Integer targetRank = rankValues.get(target); if(targetRank == null) { targetScore = null; } else { // The treeOwner trusts himself. if(targetRank == 0) { targetScore = Integer.MAX_VALUE; } else { // If the treeOwner has assigned a trust value to the target, it always overrides the "remote" score. try { targetScore = (int)getTrust(treeOwner, target).getValue(); } catch(NotTrustedException e) { targetScore = 0; for(Trust receivedTrust : getReceivedTrusts(target)) { final Identity truster = receivedTrust.getTruster(); final Integer trusterRank = rankValues.get(truster); // The capacity is a weight function for trust values which are given from an identity: // The higher the rank, the less the capacity. // If the rank is Integer.MAX_VALUE (infinite) or -1 (no rank at all) the capacity will be 0. final int capacity = computeCapacity(treeOwner, truster, trusterRank != null ? trusterRank : -1); targetScore += (receivedTrust.getValue() * capacity) / 100; } } } } Score newScore = null; if(targetScore != null) { newScore = new Score(this, treeOwner, target, targetScore, targetRank, computeCapacity(treeOwner, target, targetRank)); } boolean needToCheckFetchStatus = false; boolean oldShouldFetch = false; int oldCapacity = 0; // Now we have the rank and the score of the target computed and can check whether the database-stored score object is correct. try { Score currentStoredScore = getScore(treeOwner, target); oldCapacity = currentStoredScore.getCapacity(); if(newScore == null) { returnValue = false; if(!mFullScoreComputationNeeded) Logger.error(this, "Correcting wrong score: The identity has no rank and should have no score but score was " + currentStoredScore, new RuntimeException()); needToCheckFetchStatus = true; oldShouldFetch = shouldFetchIdentity(target); currentStoredScore.deleteWithoutCommit(); } else { if(!newScore.equals(currentStoredScore)) { returnValue = false; if(!mFullScoreComputationNeeded) Logger.error(this, "Correcting wrong score: Should have been " + newScore + " but was " + currentStoredScore, new RuntimeException()); needToCheckFetchStatus = true; oldShouldFetch = shouldFetchIdentity(target); currentStoredScore.setRank(newScore.getRank()); currentStoredScore.setCapacity(newScore.getCapacity()); currentStoredScore.setValue(newScore.getScore()); currentStoredScore.storeWithoutCommit(); } } } catch(NotInTrustTreeException e) { oldCapacity = 0; if(newScore != null) { returnValue = false; if(!mFullScoreComputationNeeded) Logger.error(this, "Correcting wrong score: No score was stored for the identity but it should be " + newScore, new RuntimeException()); needToCheckFetchStatus = true; oldShouldFetch = shouldFetchIdentity(target); newScore.storeWithoutCommit(); } } if(needToCheckFetchStatus) { // If fetch status changed from false to true, we need to start fetching it // If the capacity changed from 0 to positive, we need to refetch the current edition: Identities with capacity 0 cannot // cause new identities to be imported from their trust list, capacity > 0 allows this. // If the fetch status changed from true to false, we need to stop fetching it if((!oldShouldFetch || (oldCapacity == 0 && newScore != null && newScore.getCapacity() > 0)) && shouldFetchIdentity(target) ) { if(!oldShouldFetch) if(logDEBUG) Logger.debug(this, "Fetch status changed from false to true, refetching " + target); else if(logDEBUG) Logger.debug(this, "Capacity changed from 0 to " + newScore.getCapacity() + ", refetching" + target); target.markForRefetch(); target.storeWithoutCommit(); mFetcher.storeStartFetchCommandWithoutCommit(target); } else if(oldShouldFetch && !shouldFetchIdentity(target)) { if(logDEBUG) Logger.debug(this, "Fetch status changed from true to false, aborting fetch of " + target); mFetcher.storeAbortFetchCommandWithoutCommit(target); } } } } mFullScoreComputationNeeded = false; ++mFullScoreRecomputationCount; mFullScoreRecomputationMilliseconds += CurrentTimeUTC.getInMillis() - beginTime; if(logMINOR) { Logger.minor(this, "Full score computation finished. Amount: " + mFullScoreRecomputationCount + "; Avg Time:" + getAverageFullScoreRecomputationTime() + "s"); } return returnValue; } private synchronized void createSeedIdentities() { for(String seedURI : SEED_IDENTITIES) { Identity seed; synchronized(Persistent.transactionLock(mDB)) { try { seed = getIdentityByURI(seedURI); if(seed instanceof OwnIdentity) { OwnIdentity ownSeed = (OwnIdentity)seed; ownSeed.addContext(IntroductionPuzzle.INTRODUCTION_CONTEXT); ownSeed.setProperty(IntroductionServer.PUZZLE_COUNT_PROPERTY, Integer.toString(IntroductionServer.SEED_IDENTITY_PUZZLE_COUNT)); ownSeed.storeAndCommit(); } else { try { seed.setEdition(new FreenetURI(seedURI).getEdition()); seed.storeAndCommit(); } catch(InvalidParameterException e) { /* We already have the latest edition stored */ } } } catch (UnknownIdentityException uie) { try { seed = new Identity(this, seedURI, null, true); // We have to explicitely set the edition number because the constructor only considers the given edition as a hint. seed.setEdition(new FreenetURI(seedURI).getEdition()); seed.storeAndCommit(); } catch (Exception e) { Logger.error(this, "Seed identity creation error", e); } } catch (Exception e) { Persistent.checkedRollback(mDB, this, e); } } } } public void terminate() { if(logDEBUG) Logger.debug(this, "WoT plugin terminating ..."); /* We use single try/catch blocks so that failure of termination of one service does not prevent termination of the others */ try { if(mWebInterface != null) this.mWebInterface.unload(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mIntroductionClient != null) mIntroductionClient.terminate(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mIntroductionServer != null) mIntroductionServer.terminate(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mInserter != null) mInserter.terminate(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mFetcher != null) mFetcher.stop(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mDB != null) { /* TODO: At 2009-06-15, it does not seem possible to ask db4o for whether a transaction is pending. * If it becomes possible some day, we should check that here, and log an error if there is an uncommitted transaction. * - All transactions should be committed after obtaining the lock() on the database. */ synchronized(Persistent.transactionLock(mDB)) { System.gc(); mDB.rollback(); System.gc(); mDB.close(); } } } catch(Exception e) { Logger.error(this, "Error during termination.", e); } if(logDEBUG) Logger.debug(this, "WoT plugin terminated."); } /** * Inherited event handler from FredPluginFCP, handled in <code>class FCPInterface</code>. */ public void handle(PluginReplySender replysender, SimpleFieldSet params, Bucket data, int accesstype) { mFCPInterface.handle(replysender, params, data, accesstype); } /** * Loads an own or normal identity from the database, querying on its ID. * * @param id The ID of the identity to load * @return The identity matching the supplied ID. * @throws DuplicateIdentityException if there are more than one identity with this id in the database * @throws UnknownIdentityException if there is no identity with this id in the database */ public synchronized Identity getIdentityByID(String id) throws UnknownIdentityException { final Query query = mDB.query(); query.constrain(Identity.class); query.descend("mID").constrain(id); final ObjectSet<Identity> result = new Persistent.InitializingObjectSet<Identity>(this, query); switch(result.size()) { case 1: return result.next(); case 0: throw new UnknownIdentityException(id); default: throw new DuplicateIdentityException(id, result.size()); } } /** * Gets an OwnIdentity by its ID. * * @param id The unique identifier to query an OwnIdentity * @return The requested OwnIdentity * @throws UnknownIdentityException if there is now OwnIdentity with that id */ public synchronized OwnIdentity getOwnIdentityByID(String id) throws UnknownIdentityException { final Query query = mDB.query(); query.constrain(OwnIdentity.class); query.descend("mID").constrain(id); final ObjectSet<OwnIdentity> result = new Persistent.InitializingObjectSet<OwnIdentity>(this, query); switch(result.size()) { case 1: return result.next(); case 0: throw new UnknownIdentityException(id); default: throw new DuplicateIdentityException(id, result.size()); } } /** * Loads an identity from the database, querying on its requestURI (a valid {@link FreenetURI}) * * @param uri The requestURI of the identity * @return The identity matching the supplied requestURI * @throws UnknownIdentityException if there is no identity with this id in the database */ public Identity getIdentityByURI(FreenetURI uri) throws UnknownIdentityException { return getIdentityByID(IdentityID.constructAndValidateFromURI(uri).toString()); } /** * Loads an identity from the database, querying on its requestURI (as String) * * @param uri The requestURI of the identity which will be converted to {@link FreenetURI} * @return The identity matching the supplied requestURI * @throws UnknownIdentityException if there is no identity with this id in the database * @throws MalformedURLException if the requestURI isn't a valid FreenetURI */ public Identity getIdentityByURI(String uri) throws UnknownIdentityException, MalformedURLException { return getIdentityByURI(new FreenetURI(uri)); } /** * Gets an OwnIdentity by its requestURI (a {@link FreenetURI}). * The OwnIdentity's unique identifier is extracted from the supplied requestURI. * * @param uri The requestURI of the desired OwnIdentity * @return The requested OwnIdentity * @throws UnknownIdentityException if the OwnIdentity isn't in the database */ public OwnIdentity getOwnIdentityByURI(FreenetURI uri) throws UnknownIdentityException { return getOwnIdentityByID(IdentityID.constructAndValidateFromURI(uri).toString()); } /** * Gets an OwnIdentity by its requestURI (as String). * The given String is converted to {@link FreenetURI} in order to extract a unique id. * * @param uri The requestURI (as String) of the desired OwnIdentity * @return The requested OwnIdentity * @throws UnknownIdentityException if the OwnIdentity isn't in the database * @throws MalformedURLException if the supplied requestURI is not a valid FreenetURI */ public OwnIdentity getOwnIdentityByURI(String uri) throws UnknownIdentityException, MalformedURLException { return getOwnIdentityByURI(new FreenetURI(uri)); } /** * Returns all identities that are in the database * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all identities present in the database */ public ObjectSet<Identity> getAllIdentities() { final Query query = mDB.query(); query.constrain(Identity.class); return new Persistent.InitializingObjectSet<Identity>(this, query); } public static enum SortOrder { ByNicknameAscending, ByNicknameDescending, ByScoreAscending, ByScoreDescending, ByLocalTrustAscending, ByLocalTrustDescending } /** * Get a filtered and sorted list of identities. * You have to synchronize on this WoT when calling the function and processing the returned list. */ public ObjectSet<Identity> getAllIdentitiesFilteredAndSorted(OwnIdentity truster, String nickFilter, SortOrder sortInstruction) { Query q = mDB.query(); switch(sortInstruction) { case ByNicknameAscending: q.constrain(Identity.class); q.descend("mNickname").orderAscending(); break; case ByNicknameDescending: q.constrain(Identity.class); q.descend("mNickname").orderDescending(); break; case ByScoreAscending: q.constrain(Score.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderAscending(); q = q.descend("mTrustee"); break; case ByScoreDescending: // TODO: This excludes identities which have no score q.constrain(Score.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderDescending(); q = q.descend("mTrustee"); break; case ByLocalTrustAscending: q.constrain(Trust.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderAscending(); q = q.descend("mTrustee"); break; case ByLocalTrustDescending: // TODO: This excludes untrusted identities. q.constrain(Trust.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderDescending(); q = q.descend("mTrustee"); break; } if(nickFilter != null) { nickFilter = nickFilter.trim(); if(!nickFilter.equals("")) q.descend("mNickname").constrain(nickFilter).like(); } return new Persistent.InitializingObjectSet<Identity>(this, q); } /** * Returns all non-own identities that are in the database. * * You have to synchronize on this WoT when calling the function and processing the returned list! */ public ObjectSet<Identity> getAllNonOwnIdentities() { final Query q = mDB.query(); q.constrain(Identity.class); q.constrain(OwnIdentity.class).not(); return new Persistent.InitializingObjectSet<Identity>(this, q); } /** * Returns all non-own identities that are in the database, sorted descending by their date of modification, i.e. recently * modified identities will be at the beginning of the list. * * You have to synchronize on this WoT when calling the function and processing the returned list! * * Used by the IntroductionClient for fetching puzzles from recently modified identities. */ public ObjectSet<Identity> getAllNonOwnIdentitiesSortedByModification () { final Query q = mDB.query(); q.constrain(Identity.class); q.constrain(OwnIdentity.class).not(); /* TODO: As soon as identities announce that they were online every day, uncomment the following line */ /* q.descend("mLastChangedDate").constrain(new Date(CurrentTimeUTC.getInMillis() - 1 * 24 * 60 * 60 * 1000)).greater(); */ q.descend("mLastFetchedDate").orderDescending(); return new Persistent.InitializingObjectSet<Identity>(this, q); } /** * Returns all own identities that are in the database * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all identities present in the database. */ public ObjectSet<OwnIdentity> getAllOwnIdentities() { final Query q = mDB.query(); q.constrain(OwnIdentity.class); return new Persistent.InitializingObjectSet<OwnIdentity>(this, q); } /** * DO NOT USE THIS FUNCTION FOR DELETING OWN IDENTITIES UPON USER REQUEST! * IN FACT BE VERY CAREFUL WHEN USING IT FOR ANYTHING FOR THE FOLLOWING REASONS: * - This function deletes ALL given and received trust values of the given identity. This modifies the trust list of the trusters against their will. * - Especially it might be an information leak if the trust values of other OwnIdentities are deleted! * - If WOT one day is designed to be used by many different users at once, the deletion of other OwnIdentity's trust values would even be corruption. * * The intended purpose of this function is: * - To specify which objects have to be dealt with when messing with storage of an identity. * - To be able to do database object leakage tests: Many classes have a deleteWithoutCommit function and there are valid usecases for them. * However, the implementations of those functions might cause leaks by forgetting to delete certain object members. * If you call this function for ALL identities in a database, EVERYTHING should be deleted and the database SHOULD be empty. * You then can check whether the database actually IS empty to test for leakage. * * You have to lock the WebOfTrust, the IntroductionPuzzleStore and the IdentityFetcher before calling this function. */ private void deleteWithoutCommit(Identity identity) { // We want to use beginTrustListImport, finishTrustListImport / abortTrustListImport. // If the caller already handles that for us though, we should not call those function again. // So we check whether the caller already started an import. boolean trustListImportWasInProgress = mTrustListImportInProgress; try { if(!trustListImportWasInProgress) beginTrustListImport(); if(logDEBUG) Logger.debug(this, "Deleting identity " + identity + " ..."); if(logDEBUG) Logger.debug(this, "Deleting received scores..."); for(Score score : getScores(identity)) score.deleteWithoutCommit(); if(identity instanceof OwnIdentity) { if(logDEBUG) Logger.debug(this, "Deleting given scores..."); for(Score score : getGivenScores((OwnIdentity)identity)) score.deleteWithoutCommit(); } if(logDEBUG) Logger.debug(this, "Deleting received trusts..."); for(Trust trust : getReceivedTrusts(identity)) trust.deleteWithoutCommit(); if(logDEBUG) Logger.debug(this, "Deleting given trusts..."); for(Trust givenTrust : getGivenTrusts(identity)) { givenTrust.deleteWithoutCommit(); // We call computeAllScores anyway so we do not use removeTrustWithoutCommit() } mFullScoreComputationNeeded = true; // finishTrustListImport will call computeAllScoresWithoutCommit for us. if(logDEBUG) Logger.debug(this, "Deleting associated introduction puzzles ..."); mPuzzleStore.onIdentityDeletion(identity); if(logDEBUG) Logger.debug(this, "Storing an abort-fetch-command..."); if(mFetcher != null) { // Can be null if we use this function in upgradeDB() mFetcher.storeAbortFetchCommandWithoutCommit(identity); // NOTICE: // If the fetcher did store a db4o object reference to the identity, we would have to trigger command processing // now to prevent leakage of the identity object. // But the fetcher does NOT store a db4o object reference to the given identity. It stores its ID as String only. // Therefore, it is OK that the fetcher does not immediately process the commands now. } if(logDEBUG) Logger.debug(this, "Deleting the identity..."); identity.deleteWithoutCommit(); if(!trustListImportWasInProgress) finishTrustListImport(); } catch(RuntimeException e) { if(!trustListImportWasInProgress) abortTrustListImport(e); Persistent.checkedRollbackAndThrow(mDB, this, e); } } /** * Gets the score of this identity in a trust tree. * Each {@link OwnIdentity} has its own trust tree. * * @param truster The owner of the trust tree * @return The {@link Score} of this Identity in the required trust tree * @throws NotInTrustTreeException if this identity is not in the required trust tree */ public synchronized Score getScore(final OwnIdentity truster, final Identity trustee) throws NotInTrustTreeException { final Query query = mDB.query(); query.constrain(Score.class); query.descend("mID").constrain(new ScoreID(truster, trustee).toString()); final ObjectSet<Score> result = new Persistent.InitializingObjectSet<Score>(this, query); switch(result.size()) { case 1: final Score score = result.next(); assert(score.getTruster() == truster); assert(score.getTrustee() == trustee); return score; case 0: throw new NotInTrustTreeException(truster, trustee); default: throw new DuplicateScoreException(truster, trustee, result.size()); } } /** * Gets a list of all this Identity's Scores. * You have to synchronize on this WoT around the call to this function and the processing of the returned list! * * @return An {@link ObjectSet} containing all {@link Score} this Identity has. */ public ObjectSet<Score> getScores(final Identity identity) { final Query query = mDB.query(); query.constrain(Score.class); query.descend("mTrustee").constrain(identity).identity(); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Get a list of all scores which the passed own identity has assigned to other identities. * * You have to synchronize on this WoT around the call to this function and the processing of the returned list! * @return An {@link ObjectSet} containing all {@link Score} this Identity has given. */ public ObjectSet<Score> getGivenScores(final OwnIdentity truster) { final Query query = mDB.query(); query.constrain(Score.class); query.descend("mTruster").constrain(truster).identity(); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Gets the best score this Identity has in existing trust trees. * * @return the best score this Identity has * @throws NotInTrustTreeException If the identity has no score in any trusttree. */ public synchronized int getBestScore(final Identity identity) throws NotInTrustTreeException { int bestScore = Integer.MIN_VALUE; final ObjectSet<Score> scores = getScores(identity); if(scores.size() == 0) throw new NotInTrustTreeException(identity); // TODO: Cache the best score of an identity as a member variable. for(final Score score : scores) bestScore = Math.max(score.getScore(), bestScore); return bestScore; } /** * Gets the best capacity this identity has in any trust tree. * @throws NotInTrustTreeException If the identity is not in any trust tree. Can be interpreted as capacity 0. */ public int getBestCapacity(final Identity identity) throws NotInTrustTreeException { int bestCapacity = 0; final ObjectSet<Score> scores = getScores(identity); if(scores.size() == 0) throw new NotInTrustTreeException(identity); // TODO: Cache the best score of an identity as a member variable. for(final Score score : scores) bestCapacity = Math.max(score.getCapacity(), bestCapacity); return bestCapacity; } /** * Get all scores in the database. * You have to synchronize on this WoT when calling the function and processing the returned list! */ public ObjectSet<Score> getAllScores() { final Query query = mDB.query(); query.constrain(Score.class); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Checks whether the given identity should be downloaded. * @return Returns true if the identity has any capacity > 0, any score >= 0 or if it is an own identity. */ public boolean shouldFetchIdentity(final Identity identity) { if(identity instanceof OwnIdentity) return true; int bestScore = Integer.MIN_VALUE; int bestCapacity = 0; final ObjectSet<Score> scores = getScores(identity); if(scores.size() == 0) return false; // TODO: Cache the best score of an identity as a member variable. for(Score score : scores) { bestCapacity = Math.max(score.getCapacity(), bestCapacity); bestScore = Math.max(score.getScore(), bestScore); if(bestCapacity > 0 || bestScore >= 0) return true; } return false; } /** * Gets non-own Identities matching a specified score criteria. * TODO: Rename to getNonOwnIdentitiesByScore. Or even better: Make it return own identities as well, this will speed up the database query and clients might be ok with it. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @param truster The owner of the trust tree, null if you want the trusted identities of all owners. * @param select Score criteria, can be > zero, zero or negative. Greater than zero returns all identities with score >= 0, zero with score equal to 0 * and negative with score < 0. Zero is included in the positive range by convention because solving an introduction puzzle gives you a trust value of 0. * @return an {@link ObjectSet} containing Scores of the identities that match the criteria */ public ObjectSet<Score> getIdentitiesByScore(final OwnIdentity truster, final int select) { final Query query = mDB.query(); query.constrain(Score.class); if(truster != null) query.descend("mTruster").constrain(truster).identity(); query.descend("mTrustee").constrain(OwnIdentity.class).not(); /* We include 0 in the list of identities with positive score because solving captchas gives no points to score */ if(select > 0) query.descend("mValue").constrain(0).smaller().not(); else if(select < 0) query.descend("mValue").constrain(0).smaller(); else query.descend("mValue").constrain(0); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Gets {@link Trust} from a specified truster to a specified trustee. * * @param truster The identity that gives trust to this Identity * @param trustee The identity which receives the trust * @return The trust given to the trustee by the specified truster * @throws NotTrustedException if the truster doesn't trust the trustee */ public synchronized Trust getTrust(final Identity truster, final Identity trustee) throws NotTrustedException, DuplicateTrustException { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mID").constrain(new TrustID(truster, trustee).toString()); final ObjectSet<Trust> result = new Persistent.InitializingObjectSet<Trust>(this, query); switch(result.size()) { case 1: final Trust trust = result.next(); assert(trust.getTruster() == truster); assert(trust.getTrustee() == trustee); return trust; case 0: throw new NotTrustedException(truster, trustee); default: throw new DuplicateTrustException(truster, trustee, result.size()); } } /** * Gets all trusts given by the given truster. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has given. */ public ObjectSet<Trust> getGivenTrusts(final Identity truster) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTruster").constrain(truster).identity(); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets all trusts given by the given truster. * The result is sorted descending by the time we last fetched the trusted identity. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has given. */ public ObjectSet<Trust> getGivenTrustsSortedDescendingByLastSeen(final Identity truster) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTruster").constrain(truster).identity(); query.descend("mTrustee").descend("mLastFetchedDate").orderDescending(); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets given trust values of an identity matching a specified trust value criteria. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @param truster The identity which given the trust values. * @param select Trust value criteria, can be > zero, zero or negative. Greater than zero returns all trust values >= 0, zero returns trust values equal to 0. * Negative returns trust values < 0. Zero is included in the positive range by convention because solving an introduction puzzle gives you a value of 0. * @return an {@link ObjectSet} containing received trust values that match the criteria. */ public ObjectSet<Trust> getGivenTrusts(final Identity truster, final int select) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTruster").constrain(truster).identity(); /* We include 0 in the list of identities with positive trust because solving captchas gives 0 trust */ if(select > 0) query.descend("mValue").constrain(0).smaller().not(); else if(select < 0 ) query.descend("mValue").constrain(0).smaller(); else query.descend("mValue").constrain(0); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets all trusts given by the given truster in a trust list with a different edition than the passed in one. * You have to synchronize on this WoT when calling the function and processing the returned list! */ protected ObjectSet<Trust> getGivenTrustsOfDifferentEdition(final Identity truster, final long edition) { final Query q = mDB.query(); q.constrain(Trust.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mTrusterTrustListEdition").constrain(edition).not(); return new Persistent.InitializingObjectSet<Trust>(this, q); } /** * Gets all trusts received by the given trustee. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has received. */ public ObjectSet<Trust> getReceivedTrusts(final Identity trustee) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTrustee").constrain(trustee).identity(); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets received trust values of an identity matching a specified trust value criteria. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @param trustee The identity which has received the trust values. * @param select Trust value criteria, can be > zero, zero or negative. Greater than zero returns all trust values >= 0, zero returns trust values equal to 0. * Negative returns trust values < 0. Zero is included in the positive range by convention because solving an introduction puzzle gives you a value of 0. * @return an {@link ObjectSet} containing received trust values that match the criteria. */ public ObjectSet<Trust> getReceivedTrusts(final Identity trustee, final int select) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTrustee").constrain(trustee).identity(); /* We include 0 in the list of identities with positive trust because solving captchas gives 0 trust */ if(select > 0) query.descend("mValue").constrain(0).smaller().not(); else if(select < 0 ) query.descend("mValue").constrain(0).smaller(); else query.descend("mValue").constrain(0); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets all trusts. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has received. */ public ObjectSet<Trust> getAllTrusts() { final Query query = mDB.query(); query.constrain(Trust.class); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gives some {@link Trust} to another Identity. * It creates or updates an existing Trust object and make the trustee compute its {@link Score}. * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(WebOfTrust.this) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... setTrustWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * } * } * * @param truster The Identity that gives the trust * @param trustee The Identity that receives the trust * @param newValue Numeric value of the trust * @param newComment A comment to explain the given value * @throws InvalidParameterException if a given parameter isn't valid, see {@link Trust} for details on accepted values. */ protected void setTrustWithoutCommit(Identity truster, Identity trustee, byte newValue, String newComment) throws InvalidParameterException { try { // Check if we are updating an existing trust value final Trust trust = getTrust(truster, trustee); final Trust oldTrust = trust.clone(); trust.trusterEditionUpdated(); trust.setComment(newComment); trust.storeWithoutCommit(); if(trust.getValue() != newValue) { trust.setValue(newValue); trust.storeWithoutCommit(); if(logDEBUG) Logger.debug(this, "Updated trust value ("+ trust +"), now updating Score."); updateScoresWithoutCommit(oldTrust, trust); } } catch (NotTrustedException e) { final Trust trust = new Trust(this, truster, trustee, newValue, newComment); trust.storeWithoutCommit(); if(logDEBUG) Logger.debug(this, "New trust value ("+ trust +"), now updating Score."); updateScoresWithoutCommit(null, trust); } truster.updated(); truster.storeWithoutCommit(); } /** * Only for being used by WoT internally and by unit tests! */ synchronized void setTrust(OwnIdentity truster, Identity trustee, byte newValue, String newComment) throws InvalidParameterException { synchronized(Persistent.transactionLock(mDB)) { try { setTrustWithoutCommit(truster, trustee, newValue, newComment); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } } protected synchronized void removeTrust(OwnIdentity truster, Identity trustee) { synchronized(Persistent.transactionLock(mDB)) { try { removeTrustWithoutCommit(truster, trustee); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } } /** * Deletes a trust object. * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(Persistent.transactionLock(mDB)) { * try { ... removeTrustWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * } * * @param truster * @param trustee */ protected synchronized void removeTrustWithoutCommit(OwnIdentity truster, Identity trustee) { try { try { removeTrustWithoutCommit(getTrust(truster, trustee)); } catch (NotTrustedException e) { Logger.error(this, "Cannot remove trust - there is none - from " + truster.getNickname() + " to " + trustee.getNickname()); } } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } /** * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(this) { * synchronized(mFetcher) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... setTrustWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * }}} * */ protected void removeTrustWithoutCommit(Trust trust) { trust.deleteWithoutCommit(); updateScoresWithoutCommit(trust, null); } /** * Initializes this OwnIdentity's trust tree without commiting the transaction. * Meaning : It creates a Score object for this OwnIdentity in its own trust so it can give trust to other Identities. * * The score will have a rank of 0, a capacity of 100 (= 100 percent) and a score value of Integer.MAX_VALUE. * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(Persistent.transactionLock(mDB)) { * try { ... initTrustTreeWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * } * * @throws DuplicateScoreException if there already is more than one Score for this identity (should never happen) */ private synchronized void initTrustTreeWithoutCommit(OwnIdentity identity) throws DuplicateScoreException { try { getScore(identity, identity); Logger.error(this, "initTrustTreeWithoutCommit called even though there is already one for " + identity); return; } catch (NotInTrustTreeException e) { final Score score = new Score(this, identity, identity, Integer.MAX_VALUE, 0, 100); score.storeWithoutCommit(); } } /** * Computes the trustee's Score value according to the trusts it has received and the capacity of its trusters in the specified * trust tree. * * @param truster The OwnIdentity that owns the trust tree * @param trustee The identity for which the score shall be computed. * @return The new Score of the identity. Integer.MAX_VALUE if the trustee is equal to the truster. * @throws DuplicateScoreException if there already exist more than one {@link Score} objects for the trustee (should never happen) */ private synchronized int computeScoreValue(OwnIdentity truster, Identity trustee) throws DuplicateScoreException { if(trustee == truster) return Integer.MAX_VALUE; int value = 0; try { return getTrust(truster, trustee).getValue(); } catch(NotTrustedException e) { } for(Trust trust : getReceivedTrusts(trustee)) { try { final Score trusterScore = getScore(truster, trust.getTruster()); value += ( trust.getValue() * trusterScore.getCapacity() ) / 100; } catch (NotInTrustTreeException e) {} } return value; } /** * Computes the trustees's rank in the trust tree of the truster. * It gets its best ranked non-zero-capacity truster's rank, plus one. * If it has only received negative trust values from identities which have a non-zero-capacity it gets a rank of Integer.MAX_VALUE (infinite). * If it has only received trust values from identities with rank of Integer.MAX_VALUE it gets a rank of -1. * * If the tree owner has assigned a trust value to the identity, the rank computation is only done from that value because the score decisions of the * tree owner are always absolute (if you distrust someone, the remote identities should not be allowed to overpower your decision). * * The purpose of differentiation between Integer.MAX_VALUE and -1 is: * Score objects of identities with rank Integer.MAX_VALUE are kept in the database because WoT will usually "hear" about those identities by seeing them * in the trust lists of trusted identities (with negative trust values). So it must store the trust values to those identities and have a way of telling the * user "this identity is not trusted" by keeping a score object of them. * Score objects of identities with rank -1 are deleted because they are the trustees of distrusted identities and we will not get to the point where we * hear about those identities because the only way of hearing about them is downloading a trust list of a identity with Integer.MAX_VALUE rank - and * we never download their trust lists. * * Notice that 0 is included in infinite rank to prevent identities which have only solved introduction puzzles from having a capacity. * * @param truster The OwnIdentity that owns the trust tree * @return The new Rank if this Identity * @throws DuplicateScoreException if there already exist more than one {@link Score} objects for the trustee (should never happen) */ private synchronized int computeRank(OwnIdentity truster, Identity trustee) throws DuplicateScoreException { if(trustee == truster) return 0; int rank = -1; try { Trust treeOwnerTrust = getTrust(truster, trustee); if(treeOwnerTrust.getValue() > 0) return 1; else return Integer.MAX_VALUE; } catch(NotTrustedException e) { } for(Trust trust : getReceivedTrusts(trustee)) { try { Score score = getScore(truster, trust.getTruster()); if(score.getCapacity() != 0) { // If the truster has no capacity, he can't give his rank // A truster only gives his rank to a trustee if he has assigned a strictly positive trust value if(trust.getValue() > 0 ) { // We give the rank to the trustee if it is better than its current rank or he has no rank yet. if(rank == -1 || score.getRank() < rank) rank = score.getRank(); } else { // If the trustee has no rank yet we give him an infinite rank. because he is distrusted by the truster. if(rank == -1) rank = Integer.MAX_VALUE; } } } catch (NotInTrustTreeException e) {} } if(rank == -1) return -1; else if(rank == Integer.MAX_VALUE) return Integer.MAX_VALUE; else return rank+1; } /** * Begins the import of a trust list. This sets a flag on this WoT which signals that the import of a trust list is in progress. * This speeds up setTrust/removeTrust as the score calculation is only performed when endTrustListImport is called. * * You MUST synchronize on this WoT around beginTrustListImport, abortTrustListImport and finishTrustListImport! * You MUST create a database transaction by synchronizing on Persistent.transactionLock(db). */ protected void beginTrustListImport() { if(logMINOR) Logger.minor(this, "beginTrustListImport()"); if(mTrustListImportInProgress) { abortTrustListImport(new RuntimeException("There was already a trust list import in progress!")); mFullScoreComputationNeeded = true; computeAllScoresWithoutCommit(); assert(mFullScoreComputationNeeded == false); } mTrustListImportInProgress = true; assert(!mFullScoreComputationNeeded); assert(computeAllScoresWithoutCommit()); // The database is intact before the import } /** * See {@link beginTrustListImport} for an explanation of the purpose of this function. * * Aborts the import of a trust list and rolls back the current transaction. * * @param e The exception which triggered the abort. Will be logged to the Freenet log file. * @param logLevel The {@link LogLevel} to use when logging the abort to the Freenet log file. */ protected void abortTrustListImport(Exception e, LogLevel logLevel) { if(logMINOR) Logger.minor(this, "abortTrustListImport()"); assert(mTrustListImportInProgress); mTrustListImportInProgress = false; mFullScoreComputationNeeded = false; Persistent.checkedRollback(mDB, this, e, logLevel); assert(computeAllScoresWithoutCommit()); // Test rollback. } /** * See {@link beginTrustListImport} for an explanation of the purpose of this function. * * Aborts the import of a trust list and rolls back the current transaction. * * @param e The exception which triggered the abort. Will be logged to the Freenet log file with log level {@link LogLevel.ERROR} */ protected void abortTrustListImport(Exception e) { abortTrustListImport(e, Logger.LogLevel.ERROR); } /** * See {@link beginTrustListImport} for an explanation of the purpose of this function. * * Finishes the import of the current trust list and clears the "trust list * * Does NOT commit the transaction, you must do this. */ protected void finishTrustListImport() { if(logMINOR) Logger.minor(this, "finishTrustListImport()"); if(!mTrustListImportInProgress) { Logger.error(this, "There was no trust list import in progress!"); return; } if(mFullScoreComputationNeeded) { computeAllScoresWithoutCommit(); assert(!mFullScoreComputationNeeded); // It properly clears the flag assert(computeAllScoresWithoutCommit()); // computeAllScoresWithoutCommit() is stable } else assert(computeAllScoresWithoutCommit()); // Verify whether updateScoresWithoutCommit worked. mTrustListImportInProgress = false; } /** * Updates all trust trees which are affected by the given modified score. * For understanding how score calculation works you should first read {@link computeAllScores * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(this) { * synchronized(mFetcher) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... updateScoreWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e);; } * }}} */ private void updateScoresWithoutCommit(final Trust oldTrust, final Trust newTrust) { if(logMINOR) Logger.minor(this, "Doing an incremental computation of all Scores..."); final long beginTime = CurrentTimeUTC.getInMillis(); // We only include the time measurement if we actually do something. // If we figure out that a full score recomputation is needed just by looking at the initial parameters, the measurement won't be included. boolean includeMeasurement = false; final boolean trustWasCreated = (oldTrust == null); final boolean trustWasDeleted = (newTrust == null); final boolean trustWasModified = !trustWasCreated && !trustWasDeleted; if(trustWasCreated && trustWasDeleted) throw new NullPointerException("No old/new trust specified."); if(trustWasModified && oldTrust.getTruster() != newTrust.getTruster()) throw new IllegalArgumentException("oldTrust has different truster, oldTrust:" + oldTrust + "; newTrust: " + newTrust); if(trustWasModified && oldTrust.getTrustee() != newTrust.getTrustee()) throw new IllegalArgumentException("oldTrust has different trustee, oldTrust:" + oldTrust + "; newTrust: " + newTrust); // We cannot iteratively REMOVE an inherited rank from the trustees because we don't know whether there is a circle in the trust values // which would make the current identity get its old rank back via the circle: computeRank searches the trusters of an identity for the best // rank, if we remove the rank from an identity, all its trustees will have a better rank and if one of them trusts the original identity // then this function would run into an infinite loop. Decreasing or incrementing an existing rank is possible with this function because // the rank received from the trustees will always be higher (that is exactly 1 more) than this identities rank. if(trustWasDeleted) { mFullScoreComputationNeeded = true; } if(!mFullScoreComputationNeeded && (trustWasCreated || trustWasModified)) { includeMeasurement = true; for(OwnIdentity treeOwner : getAllOwnIdentities()) { try { // Throws to abort the update of the trustee's score: If the truster has no rank or capacity in the tree owner's view then we don't need to update the trustee's score. if(getScore(treeOwner, newTrust.getTruster()).getCapacity() == 0) continue; } catch(NotInTrustTreeException e) { continue; } // See explanation above "We cannot iteratively REMOVE an inherited rank..." if(trustWasModified && oldTrust.getValue() > 0 && newTrust.getValue() <= 0) { mFullScoreComputationNeeded = true; break; } final LinkedList<Trust> unprocessedEdges = new LinkedList<Trust>(); unprocessedEdges.add(newTrust); while(!unprocessedEdges.isEmpty()) { final Trust trust = unprocessedEdges.removeFirst(); final Identity trustee = trust.getTrustee(); if(trustee == treeOwner) continue; Score currentStoredTrusteeScore; try { currentStoredTrusteeScore = getScore(treeOwner, trustee); } catch(NotInTrustTreeException e) { currentStoredTrusteeScore = new Score(this, treeOwner, trustee, 0, -1, 0); } final Score oldScore = currentStoredTrusteeScore.clone(); boolean oldShouldFetch = shouldFetchIdentity(trustee); final int newScoreValue = computeScoreValue(treeOwner, trustee); final int newRank = computeRank(treeOwner, trustee); final int newCapacity = computeCapacity(treeOwner, trustee, newRank); final Score newScore = new Score(this, treeOwner, trustee, newScoreValue, newRank, newCapacity); // Normally we couldn't detect the following two cases due to circular trust values. However, if an own identity assigns a trust value, // the rank and capacity are always computed based on the trust value of the own identity so we must also check this here: if((oldScore.getRank() >= 0 && oldScore.getRank() < Integer.MAX_VALUE) // It had an inheritable rank && (newScore.getRank() == -1 || newScore.getRank() == Integer.MAX_VALUE)) { // It has no inheritable rank anymore mFullScoreComputationNeeded = true; break; } if(oldScore.getCapacity() > 0 && newScore.getCapacity() == 0) { mFullScoreComputationNeeded = true; break; } // We are OK to update it now. We must not update the values of the stored score object before determining whether we need // a full score computation - the full computation needs the old values of the object. currentStoredTrusteeScore.setValue(newScore.getScore()); currentStoredTrusteeScore.setRank(newScore.getRank()); currentStoredTrusteeScore.setCapacity(newScore.getCapacity()); // Identities should not get into the queue if they have no rank, see the large if() about 20 lines below assert(currentStoredTrusteeScore.getRank() >= 0); if(currentStoredTrusteeScore.getRank() >= 0) currentStoredTrusteeScore.storeWithoutCommit(); // If fetch status changed from false to true, we need to start fetching it // If the capacity changed from 0 to positive, we need to refetch the current edition: Identities with capacity 0 cannot // cause new identities to be imported from their trust list, capacity > 0 allows this. // If the fetch status changed from true to false, we need to stop fetching it if((!oldShouldFetch || (oldScore.getCapacity()== 0 && newScore.getCapacity() > 0)) && shouldFetchIdentity(trustee)) { if(!oldShouldFetch) if(logDEBUG) Logger.debug(this, "Fetch status changed from false to true, refetching " + trustee); else if(logDEBUG) Logger.debug(this, "Capacity changed from 0 to " + newScore.getCapacity() + ", refetching" + trustee); trustee.markForRefetch(); trustee.storeWithoutCommit(); mFetcher.storeStartFetchCommandWithoutCommit(trustee); } else if(oldShouldFetch && !shouldFetchIdentity(trustee)) { if(logDEBUG) Logger.debug(this, "Fetch status changed from true to false, aborting fetch of " + trustee); mFetcher.storeAbortFetchCommandWithoutCommit(trustee); } // If the rank or capacity changed then the trustees might be affected because the could have inherited theirs if(oldScore.getRank() != newScore.getRank() || oldScore.getCapacity() != newScore.getCapacity()) { // If this identity has no capacity or no rank then it cannot affect its trustees: // (- If it had none and it has none now then there is none which can be inherited, this is obvious) // - If it had one before and it was removed, this algorithm will have aborted already because a full computation is needed if(newScore.getCapacity() > 0 || (newScore.getRank() >= 0 && newScore.getRank() < Integer.MAX_VALUE)) { // We need to update the trustees of trustee for(Trust givenTrust : getGivenTrusts(trustee)) { unprocessedEdges.add(givenTrust); } } } } if(mFullScoreComputationNeeded) break; } } if(includeMeasurement) { ++mIncrementalScoreRecomputationCount; mIncrementalScoreRecomputationMilliseconds += CurrentTimeUTC.getInMillis() - beginTime; } if(logMINOR) { final String time = includeMeasurement ? ("Stats: Amount: " + mIncrementalScoreRecomputationCount + "; Avg Time:" + getAverageIncrementalScoreRecomputationTime() + "s") : ("Time not measured: Computation was aborted before doing anything."); if(!mFullScoreComputationNeeded) Logger.minor(this, "Incremental computation of all Scores finished. " + time); else Logger.minor(this, "Incremental computation of all Scores not possible, full computation is needed. " + time); } if(!mTrustListImportInProgress) { if(mFullScoreComputationNeeded) { // TODO: Optimization: This uses very much CPU and memory. Write a partial computation function... // TODO: Optimization: While we do not have a partial computation function, we could at least optimize computeAllScores to NOT // keep all objects in memory etc. computeAllScoresWithoutCommit(); assert(computeAllScoresWithoutCommit()); // computeAllScoresWithoutCommit is stable } else { assert(computeAllScoresWithoutCommit()); // This function worked correctly. } } else { // a trust list import is in progress // We not do the following here because it would cause too much CPU usage during debugging: Trust lists are large and therefore // updateScoresWithoutCommit is called often during import of a single trust list // assert(computeAllScoresWithoutCommit()); } } /* Client interface functions */ public synchronized Identity addIdentity(String requestURI) throws MalformedURLException, InvalidParameterException { try { getIdentityByURI(requestURI); throw new InvalidParameterException("We already have this identity"); } catch(UnknownIdentityException e) { final Identity identity = new Identity(this, requestURI, null, false); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Created identity " + identity); // The identity hasn't received a trust value. Therefore, there is no reason to fetch it and we don't notify the IdentityFetcher. // TODO: Document this function and the UI which uses is to warn the user that the identity won't be fetched without trust. return identity; } } public OwnIdentity createOwnIdentity(String nickName, boolean publishTrustList, String context) throws MalformedURLException, InvalidParameterException { FreenetURI[] keypair = getPluginRespirator().getHLSimpleClient().generateKeyPair(WOT_NAME); return createOwnIdentity(keypair[0], nickName, publishTrustList, context); } /** * @param context A context with which you want to use the identity. Null if you want to add it later. */ public synchronized OwnIdentity createOwnIdentity(FreenetURI insertURI, String nickName, boolean publishTrustList, String context) throws MalformedURLException, InvalidParameterException { synchronized(Persistent.transactionLock(mDB)) { OwnIdentity identity; try { identity = getOwnIdentityByURI(insertURI); throw new InvalidParameterException("The URI you specified is already used by the own identity " + identity.getNickname() + "."); } catch(UnknownIdentityException uie) { identity = new OwnIdentity(this, insertURI, nickName, publishTrustList); if(context != null) identity.addContext(context); if(publishTrustList) { identity.addContext(IntroductionPuzzle.INTRODUCTION_CONTEXT); /* TODO: make configureable */ identity.setProperty(IntroductionServer.PUZZLE_COUNT_PROPERTY, Integer.toString(IntroductionServer.DEFAULT_PUZZLE_COUNT)); } try { identity.storeWithoutCommit(); initTrustTreeWithoutCommit(identity); beginTrustListImport(); // Incremental score computation has proven to be very very slow when creating identities so we just schedule a full computation. mFullScoreComputationNeeded = true; for(String seedURI : SEED_IDENTITIES) { try { setTrustWithoutCommit(identity, getIdentityByURI(seedURI), (byte)100, "Automatically assigned trust to a seed identity."); } catch(UnknownIdentityException e) { Logger.error(this, "SHOULD NOT HAPPEN: Seed identity not known: " + e); } } finishTrustListImport(); Persistent.checkedCommit(mDB, this); if(mIntroductionClient != null) mIntroductionClient.nextIteration(); // This will make it fetch more introduction puzzles. if(logDEBUG) Logger.debug(this, "Successfully created a new OwnIdentity (" + identity.getNickname() + ")"); return identity; } catch(RuntimeException e) { abortTrustListImport(e); // Rolls back for us throw e; // Satisfy the compiler } } } } /** * This "deletes" an {@link OwnIdentity} by replacing it with an {@link Identity}. * * The {@link OwnIdentity} is not deleted because this would be a security issue: * If other {@link OwnIdentity}s have assigned a trust value to it, the trust value would be gone if there is no {@link Identity} object to be the target * * @param id The {@link Identity.IdentityID} of the identity. * @throws UnknownIdentityException If there is no {@link OwnIdentity} with the given ID. Also thrown if a non-own identity exists with the given ID. */ public synchronized void deleteOwnIdentity(String id) throws UnknownIdentityException { Logger.normal(this, "deleteOwnIdentity(): Starting... "); synchronized(mPuzzleStore) { synchronized(mFetcher) { synchronized(Persistent.transactionLock(mDB)) { final OwnIdentity oldIdentity = getOwnIdentityByID(id); try { Logger.normal(this, "Deleting an OwnIdentity by converting it to a non-own Identity: " + oldIdentity); // We don't need any score computations to happen (explanation will follow below) so we don't need the following: /* beginTrustListImport(); */ // This function messes with the score graph manually so it is a good idea to check whether it is intact before and afterwards. assert(computeAllScoresWithoutCommit()); final Identity newIdentity; try { newIdentity = new Identity(this, oldIdentity.getRequestURI(), oldIdentity.getNickname(), oldIdentity.doesPublishTrustList()); } catch(MalformedURLException e) { // The data was taken from the OwnIdentity so this shouldn't happen throw new RuntimeException(e); } catch (InvalidParameterException e) { // The data was taken from the OwnIdentity so this shouldn't happen throw new RuntimeException(e); } newIdentity.setContexts(oldIdentity.getContexts()); newIdentity.setProperties(oldIdentity.getProperties()); try { newIdentity.setEdition(oldIdentity.getEdition()); } catch (InvalidParameterException e) { // The data was taken from old identity so this shouldn't happen throw new RuntimeException(e); } // In theory we do not need to re-fetch the current trust list edition: // The trust list of an own identity is always stored completely in the database, i.e. all trustees exist. // HOWEVER if the user had used the restoreOwnIdentity feature and then used this function, it might be the case that // the current edition of the old OwndIdentity was not fetched yet. // So we set the fetch state to FetchState.Fetched if the oldIdentity's fetch state was like that as well. if(oldIdentity.getCurrentEditionFetchState() == FetchState.Fetched) { newIdentity.onFetched(oldIdentity.getLastFetchedDate()); } // An else to set the fetch state to FetchState.NotFetched is not necessary, newIdentity.setEdition() did that already. newIdentity.storeWithoutCommit(); // Copy all received trusts. // We don't have to modify them because they are user-assigned values and the assignment // of the user does not change just because the type of the identity changes. for(Trust oldReceivedTrust : getReceivedTrusts(oldIdentity)) { Trust newReceivedTrust; try { newReceivedTrust = new Trust(this, oldReceivedTrust.getTruster(), newIdentity, oldReceivedTrust.getValue(), oldReceivedTrust.getComment()); } catch (InvalidParameterException e) { // The data was taken from the old Trust so this shouldn't happen throw new RuntimeException(e); } // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newReceivedTrust.equals(oldReceivedTrust)); */ oldReceivedTrust.deleteWithoutCommit(); newReceivedTrust.storeWithoutCommit(); } assert(getReceivedTrusts(oldIdentity).size() == 0); // Copy all received scores. // We don't have to modify them because the rating of the identity from the perspective of a // different own identity should NOT be dependent upon whether it is an own identity or not. for(Score oldScore : getScores(oldIdentity)) { Score newScore = new Score(this, oldScore.getTruster(), newIdentity, oldScore.getScore(), oldScore.getRank(), oldScore.getCapacity()); // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newScore.equals(oldScore)); */ oldScore.deleteWithoutCommit(); newScore.storeWithoutCommit(); } assert(getScores(oldIdentity).size() == 0); // Delete all given scores: // Non-own identities do not assign scores to other identities so we can just delete them. for(Score oldScore : getGivenScores(oldIdentity)) { final Identity trustee = oldScore.getTrustee(); final boolean oldShouldFetchTrustee = shouldFetchIdentity(trustee); oldScore.deleteWithoutCommit(); // If the OwnIdentity which we are converting was the only source of trust to the trustee // of this Score value, the should-fetch state of the trustee might change to false. if(oldShouldFetchTrustee && shouldFetchIdentity(trustee) == false) { mFetcher.storeAbortFetchCommandWithoutCommit(trustee); } } assert(getGivenScores(oldIdentity).size() == 0); // Copy all given trusts: // We don't have to use the removeTrust/setTrust functions because the score graph does not need updating: // - To the rating of the converted identity in the score graphs of other own identities it is irrelevant // whether it is an own identity or not. The rating should never depend on whether it is an own identity! // - Non-own identities do not have a score graph. So the score graph of the converted identity is deleted // completely and therefore it does not need to be updated. for(Trust oldGivenTrust : getGivenTrusts(oldIdentity)) { Trust newGivenTrust; try { newGivenTrust = new Trust(this, newIdentity, oldGivenTrust.getTrustee(), oldGivenTrust.getValue(), oldGivenTrust.getComment()); } catch (InvalidParameterException e) { // The data was taken from the old Trust so this shouldn't happen throw new RuntimeException(e); } // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newGivenTrust.equals(oldGivenTrust)); */ oldGivenTrust.deleteWithoutCommit(); newGivenTrust.storeWithoutCommit(); } mPuzzleStore.onIdentityDeletion(oldIdentity); mFetcher.storeAbortFetchCommandWithoutCommit(oldIdentity); // NOTICE: // If the fetcher did store a db4o object reference to the identity, we would have to trigger command processing // now to prevent leakage of the identity object. // But the fetcher does NOT store a db4o object reference to the given identity. It stores its ID as String only. // Therefore, it is OK that the fetcher does not immediately process the commands now. oldIdentity.deleteWithoutCommit(); mFetcher.storeStartFetchCommandWithoutCommit(newIdentity); // This function messes with the score graph manually so it is a good idea to check whether it is intact before and afterwards. assert(computeAllScoresWithoutCommit()); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } } } Logger.normal(this, "deleteOwnIdentity(): Finished."); } /** * NOTICE: When changing this function, please also take care of {@link OwnIdentity.isRestoreInProgress()} */ public synchronized void restoreOwnIdentity(FreenetURI insertFreenetURI) throws MalformedURLException, InvalidParameterException { Logger.normal(this, "restoreOwnIdentity(): Starting... "); OwnIdentity identity; synchronized(mPuzzleStore) { synchronized(mFetcher) { synchronized(Persistent.transactionLock(mDB)) { try { long edition = 0; try { edition = Math.max(edition, insertFreenetURI.getEdition()); } catch(IllegalStateException e) { // The user supplied URI did not have an edition specified } try { // Try replacing an existing non-own version of the identity with an OwnIdentity Identity oldIdentity = getIdentityByURI(insertFreenetURI); if(oldIdentity instanceof OwnIdentity) throw new InvalidParameterException("There is already an own identity with the given URI pair."); Logger.normal(this, "Restoring an already known identity from Freenet: " + oldIdentity); // Normally, one would expect beginTrustListImport() to happen close to the actual trust list changes later on in this function. // But beginTrustListImport() contains an assert(computeAllScoresWithoutCommit()) and that call to the score computation reference // implementation will fail if two identities with the same ID exist. // This would be the case later on - we cannot delete the non-own version of the OwnIdentity before we modified the trust graph // but we must also store the own version to be able to modify the trust graph. beginTrustListImport(); // We already have fetched this identity as a stranger's one. We need to update the database. identity = new OwnIdentity(this, insertFreenetURI, oldIdentity.getNickname(), oldIdentity.doesPublishTrustList()); /* We re-fetch the most recent edition to make sure all trustees are imported */ edition = Math.max(edition, oldIdentity.getEdition()); identity.restoreEdition(edition, oldIdentity.getLastFetchedDate()); identity.setContexts(oldIdentity.getContexts()); identity.setProperties(oldIdentity.getProperties()); identity.storeWithoutCommit(); initTrustTreeWithoutCommit(identity); // Copy all received trusts. // We don't have to modify them because they are user-assigned values and the assignment // of the user does not change just because the type of the identity changes. for(Trust oldReceivedTrust : getReceivedTrusts(oldIdentity)) { Trust newReceivedTrust = new Trust(this, oldReceivedTrust.getTruster(), identity, oldReceivedTrust.getValue(), oldReceivedTrust.getComment()); // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newReceivedTrust.equals(oldReceivedTrust)); */ oldReceivedTrust.deleteWithoutCommit(); newReceivedTrust.storeWithoutCommit(); } assert(getReceivedTrusts(oldIdentity).size() == 0); // Copy all received scores. // We don't have to modify them because the rating of the identity from the perspective of a // different own identity should NOT be dependent upon whether it is an own identity or not. for(Score oldScore : getScores(oldIdentity)) { Score newScore = new Score(this, oldScore.getTruster(), identity, oldScore.getScore(), oldScore.getRank(), oldScore.getCapacity()); // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newScore.equals(oldScore)); */ oldScore.deleteWithoutCommit(); newScore.storeWithoutCommit(); } assert(getScores(oldIdentity).size() == 0); // What we do NOT have to deal with is the given scores of the old identity: // Given scores do NOT exist for non-own identities, so there are no old ones to update. // Of cause there WILL be scores because it is an own identity now. // They will be created automatically when updating the given trusts // - so thats what we will do now. // Update all given trusts for(Trust givenTrust : getGivenTrusts(oldIdentity)) { // TODO: Instead of using the regular removeTrustWithoutCommit on all trust values, we could: // - manually delete the old Trust objects from the database // - manually store the new trust objects // - Realize that only the trust graph of the restored identity needs to be updated and write an optimized version // of setTrustWithoutCommit which deals with that. // But before we do that, we should first do the existing possible optimization of removeTrustWithoutCommit: // To get rid of removeTrustWithoutCommit always triggering a FULL score recomputation and instead make // it only update the parts of the trust graph which are affected. // Maybe the optimized version is fast enough that we don't have to do the optimization which this TODO suggests. removeTrustWithoutCommit(givenTrust); setTrustWithoutCommit(identity, givenTrust.getTrustee(), givenTrust.getValue(), givenTrust.getComment()); } // We do not call finishTrustListImport() now: It might trigger execution of computeAllScoresWithoutCommit // which would re-create scores of the old identity. We later call it AFTER deleting the old identity. /* finishTrustListImport(); */ mPuzzleStore.onIdentityDeletion(oldIdentity); mFetcher.storeAbortFetchCommandWithoutCommit(oldIdentity); // NOTICE: // If the fetcher did store a db4o object reference to the identity, we would have to trigger command processing // now to prevent leakage of the identity object. // But the fetcher does NOT store a db4o object reference to the given identity. It stores its ID as String only. // Therefore, it is OK that the fetcher does not immediately process the commands now. oldIdentity.deleteWithoutCommit(); finishTrustListImport(); } catch (UnknownIdentityException e) { // The identity did NOT exist as non-own identity yet so we can just create an OwnIdentity and store it. identity = new OwnIdentity(this, insertFreenetURI, null, false); Logger.normal(this, "Restoring not-yet-known identity from Freenet: " + identity); identity.restoreEdition(edition, null); // Store the new identity identity.storeWithoutCommit(); initTrustTreeWithoutCommit(identity); } mFetcher.storeStartFetchCommandWithoutCommit(identity); // This function messes with the trust graph manually so it is a good idea to check whether it is intact afterwards. assert(computeAllScoresWithoutCommit()); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { abortTrustListImport(e); Persistent.checkedRollbackAndThrow(mDB, this, e); } } } } Logger.normal(this, "restoreOwnIdentity(): Finished."); } public synchronized void setTrust(String ownTrusterID, String trusteeID, byte value, String comment) throws UnknownIdentityException, NumberFormatException, InvalidParameterException { final OwnIdentity truster = getOwnIdentityByID(ownTrusterID); Identity trustee = getIdentityByID(trusteeID); setTrust(truster, trustee, value, comment); } public synchronized void removeTrust(String ownTrusterID, String trusteeID) throws UnknownIdentityException { final OwnIdentity truster = getOwnIdentityByID(ownTrusterID); final Identity trustee = getIdentityByID(trusteeID); removeTrust(truster, trustee); } public synchronized void addContext(String ownIdentityID, String newContext) throws UnknownIdentityException, InvalidParameterException { final Identity identity = getOwnIdentityByID(ownIdentityID); identity.addContext(newContext); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Added context '" + newContext + "' to identity '" + identity.getNickname() + "'"); } public synchronized void removeContext(String ownIdentityID, String context) throws UnknownIdentityException, InvalidParameterException { final Identity identity = getOwnIdentityByID(ownIdentityID); identity.removeContext(context); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Removed context '" + context + "' from identity '" + identity.getNickname() + "'"); } public synchronized String getProperty(String identityID, String property) throws InvalidParameterException, UnknownIdentityException { return getIdentityByID(identityID).getProperty(property); } public synchronized void setProperty(String ownIdentityID, String property, String value) throws UnknownIdentityException, InvalidParameterException { Identity identity = getOwnIdentityByID(ownIdentityID); identity.setProperty(property, value); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Added property '" + property + "=" + value + "' to identity '" + identity.getNickname() + "'"); } public synchronized void removeProperty(String ownIdentityID, String property) throws UnknownIdentityException, InvalidParameterException { final Identity identity = getOwnIdentityByID(ownIdentityID); identity.removeProperty(property); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Removed property '" + property + "' from identity '" + identity.getNickname() + "'"); } public String getVersion() { return Version.getMarketingVersion(); } public long getRealVersion() { return Version.getRealVersion(); } public String getString(String key) { return getBaseL10n().getString(key); } public void setLanguage(LANGUAGE newLanguage) { WebOfTrust.l10n = new PluginL10n(this, newLanguage); if(logDEBUG) Logger.debug(this, "Set LANGUAGE to: " + newLanguage.isoCode); } public PluginRespirator getPluginRespirator() { return mPR; } public ExtObjectContainer getDatabase() { return mDB; } public Configuration getConfig() { return mConfig; } public IdentityFetcher getIdentityFetcher() { return mFetcher; } public XMLTransformer getXMLTransformer() { return mXMLTransformer; } public IntroductionPuzzleStore getIntroductionPuzzleStore() { return mPuzzleStore; } public IntroductionClient getIntroductionClient() { return mIntroductionClient; } public RequestClient getRequestClient() { return mRequestClient; } /** * This is where our L10n files are stored. * @return Path of our L10n files. */ public String getL10nFilesBasePath() { return "plugins/WebOfTrust/l10n/"; } /** * This is the mask of our L10n files : lang_en.l10n, lang_de.10n, ... * @return Mask of the L10n files. */ public String getL10nFilesMask() { return "lang_${lang}.l10n"; } /** * Override L10n files are stored on the disk, their names should be explicit * we put here the plugin name, and the "override" indication. Plugin L10n * override is not implemented in the node yet. * @return Mask of the override L10n files. */ public String getL10nOverrideFilesMask() { return "WebOfTrust_lang_${lang}.override.l10n"; } /** * Get the ClassLoader of this plugin. This is necessary when getting * resources inside the plugin's Jar, for example L10n files. * @return ClassLoader object */ public ClassLoader getPluginClassLoader() { return WebOfTrust.class.getClassLoader(); } /** * Access to the current L10n data. * * @return L10n object. */ public BaseL10n getBaseL10n() { return WebOfTrust.l10n.getBase(); } public int getNumberOfFullScoreRecomputations() { return mFullScoreRecomputationCount; } public synchronized double getAverageFullScoreRecomputationTime() { return (double)mFullScoreRecomputationMilliseconds / ((mFullScoreRecomputationCount!= 0 ? mFullScoreRecomputationCount : 1) * 1000); } public int getNumberOfIncrementalScoreRecomputations() { return mIncrementalScoreRecomputationCount; } public synchronized double getAverageIncrementalScoreRecomputationTime() { return (double)mIncrementalScoreRecomputationMilliseconds / ((mIncrementalScoreRecomputationCount!= 0 ? mIncrementalScoreRecomputationCount : 1) * 1000); } /** * Tests whether two WoT are equal. * This is a complex operation in terms of execution time and memory usage and only intended for being used in unit tests. */ public synchronized boolean equals(Object obj) { if(obj == this) return true; if(!(obj instanceof WebOfTrust)) return false; WebOfTrust other = (WebOfTrust)obj; synchronized(other) { { // Compare own identities final ObjectSet<OwnIdentity> allIdentities = getAllOwnIdentities(); if(allIdentities.size() != other.getAllOwnIdentities().size()) return false; for(OwnIdentity identity : allIdentities) { try { if(!identity.equals(other.getOwnIdentityByID(identity.getID()))) return false; } catch(UnknownIdentityException e) { return false; } } } { // Compare identities final ObjectSet<Identity> allIdentities = getAllIdentities(); if(allIdentities.size() != other.getAllIdentities().size()) return false; for(Identity identity : allIdentities) { try { if(!identity.equals(other.getIdentityByID(identity.getID()))) return false; } catch(UnknownIdentityException e) { return false; } } } { // Compare trusts final ObjectSet<Trust> allTrusts = getAllTrusts(); if(allTrusts.size() != other.getAllTrusts().size()) return false; for(Trust trust : allTrusts) { try { Identity otherTruster = other.getIdentityByID(trust.getTruster().getID()); Identity otherTrustee = other.getIdentityByID(trust.getTrustee().getID()); if(!trust.equals(other.getTrust(otherTruster, otherTrustee))) return false; } catch(UnknownIdentityException e) { return false; } catch(NotTrustedException e) { return false; } } } { // Compare scores final ObjectSet<Score> allScores = getAllScores(); if(allScores.size() != other.getAllScores().size()) return false; for(Score score : allScores) { try { OwnIdentity otherTruster = other.getOwnIdentityByID(score.getTruster().getID()); Identity otherTrustee = other.getIdentityByID(score.getTrustee().getID()); if(!score.equals(other.getScore(otherTruster, otherTrustee))) return false; } catch(UnknownIdentityException e) { return false; } catch(NotInTrustTreeException e) { return false; } } } } return true; } }
private synchronized void deleteDuplicateObjects() { synchronized(mPuzzleStore) { // Needed for deleteIdentity() synchronized(mFetcher) { // // Needed for deleteIdentity() synchronized(Persistent.transactionLock(mDB)) { try { HashSet<String> deleted = new HashSet<String>(); if(logDEBUG) Logger.debug(this, "Searching for duplicate identities ..."); for(Identity identity : getAllIdentities()) { Query q = mDB.query(); q.constrain(Identity.class); q.descend("mID").constrain(identity.getID()); q.constrain(identity).identity().not(); ObjectSet<Identity> duplicates = new Persistent.InitializingObjectSet<Identity>(this, q); for(Identity duplicate : duplicates) { if(deleted.contains(duplicate.getID()) == false) { Logger.error(duplicate, "Deleting duplicate identity " + duplicate.getRequestURI()); deleteWithoutCommit(duplicate); Persistent.checkedCommit(mDB, this); } } deleted.add(identity.getID()); } Persistent.checkedCommit(mDB, this); if(logDEBUG) Logger.debug(this, "Finished searching for duplicate identities."); } catch(RuntimeException e) { Persistent.checkedRollback(mDB, this, e); } } } } // synchronized(this) { // For removeTrustWithoutCommit. Done at function level already. synchronized(mFetcher) { // For removeTrustWithoutCommit synchronized(Persistent.transactionLock(mDB)) { try { if(logDEBUG) Logger.debug(this, "Searching for duplicate Trust objects ..."); boolean duplicateTrustFound = false; for(OwnIdentity truster : getAllOwnIdentities()) { HashSet<String> givenTo = new HashSet<String>(); for(Trust trust : getGivenTrusts(truster)) { if(givenTo.contains(trust.getTrustee().getID()) == false) givenTo.add(trust.getTrustee().getID()); else { Logger.error(this, "Deleting duplicate given trust:" + trust); removeTrustWithoutCommit(trust); duplicateTrustFound = true; } } } if(duplicateTrustFound) { computeAllScoresWithoutCommit(); } Persistent.checkedCommit(mDB, this); if(logDEBUG) Logger.debug(this, "Finished searching for duplicate trust objects."); } catch(RuntimeException e) { Persistent.checkedRollback(mDB, this, e); } } // synchronized(Persistent.transactionLock(mDB)) { } // synchronized(mFetcher) { /* TODO: Also delete duplicate score */ } /** * Debug function for deleting trusts or scores of which one of the involved partners is missing. */ private synchronized void deleteOrphanObjects() { synchronized(Persistent.transactionLock(mDB)) { try { boolean orphanTrustFound = false; Query q = mDB.query(); q.constrain(Trust.class); q.descend("mTruster").constrain(null).identity().or(q.descend("mTrustee").constrain(null).identity()); ObjectSet<Trust> orphanTrusts = new Persistent.InitializingObjectSet<Trust>(this, q); for(Trust trust : orphanTrusts) { if(trust.getTruster() != null && trust.getTrustee() != null) { // TODO: Remove this workaround for the db4o bug as soon as we are sure that it does not happen anymore. Logger.error(this, "Db4o bug: constrain(null).identity() did not work for " + trust); continue; } Logger.error(trust, "Deleting orphan trust, truster = " + trust.getTruster() + ", trustee = " + trust.getTrustee()); orphanTrustFound = true; trust.deleteWithoutCommit(); } if(orphanTrustFound) { computeAllScoresWithoutCommit(); Persistent.checkedCommit(mDB, this); } } catch(Exception e) { Persistent.checkedRollback(mDB, this, e); } } synchronized(Persistent.transactionLock(mDB)) { try { boolean orphanScoresFound = false; Query q = mDB.query(); q.constrain(Score.class); q.descend("mTruster").constrain(null).identity().or(q.descend("mTrustee").constrain(null).identity()); ObjectSet<Score> orphanScores = new Persistent.InitializingObjectSet<Score>(this, q); for(Score score : orphanScores) { if(score.getTruster() != null && score.getTrustee() != null) { // TODO: Remove this workaround for the db4o bug as soon as we are sure that it does not happen anymore. Logger.error(this, "Db4o bug: constrain(null).identity() did not work for " + score); continue; } Logger.error(score, "Deleting orphan score, truster = " + score.getTruster() + ", trustee = " + score.getTrustee()); orphanScoresFound = true; score.deleteWithoutCommit(); } if(orphanScoresFound) { computeAllScoresWithoutCommit(); Persistent.checkedCommit(mDB, this); } } catch(Exception e) { Persistent.checkedRollback(mDB, this, e); } } } /** * Warning: This function is not synchronized, use it only in single threaded mode. * @return The WOT database format version of the given database. -1 if there is no Configuration stored in it or multiple configurations exist. */ @SuppressWarnings("deprecation") private static int peekDatabaseFormatVersion(WebOfTrust wot, ExtObjectContainer database) { final Query query = database.query(); query.constrain(Configuration.class); @SuppressWarnings("unchecked") ObjectSet<Configuration> result = (ObjectSet<Configuration>)query.execute(); switch(result.size()) { case 1: { final Configuration config = (Configuration)result.next(); config.initializeTransient(wot, database); // For the HashMaps to stay alive we need to activate to full depth. config.checkedActivate(4); return config.getDatabaseFormatVersion(); } default: return -1; } } /** * Loads an existing Config object from the database and adds any missing default values to it, creates and stores a new one if none exists. * @return The config object. */ private synchronized Configuration getOrCreateConfig() { final Query query = mDB.query(); query.constrain(Configuration.class); final ObjectSet<Configuration> result = new Persistent.InitializingObjectSet<Configuration>(this, query); switch(result.size()) { case 1: { final Configuration config = result.next(); // For the HashMaps to stay alive we need to activate to full depth. config.checkedActivate(4); config.setDefaultValues(false); config.storeAndCommit(); return config; } case 0: { final Configuration config = new Configuration(this); config.initializeTransient(this); config.storeAndCommit(); return config; } default: throw new RuntimeException("Multiple config objects found: " + result.size()); } } /** Capacity is the maximum amount of points an identity can give to an other by trusting it. * * Values choice : * Advogato Trust metric recommends that values decrease by rounded 2.5 times. * This makes sense, making the need of 3 N+1 ranked people to overpower * the trust given by a N ranked identity. * * Number of ranks choice : * When someone creates a fresh identity, he gets the seed identity at * rank 1 and freenet developpers at rank 2. That means that * he will see people that were : * - given 7 trust by freenet devs (rank 2) * - given 17 trust by rank 3 * - given 50 trust by rank 4 * - given 100 trust by rank 5 and above. * This makes the range small enough to avoid a newbie * to even see spam, and large enough to make him see a reasonnable part * of the community right out-of-the-box. * Of course, as soon as he will start to give trust, he will put more * people at rank 1 and enlarge his WoT. */ protected static final int capacities[] = { 100,// Rank 0 : Own identities 40, // Rank 1 : Identities directly trusted by ownIdenties 16, // Rank 2 : Identities trusted by rank 1 identities 6, // So on... 2, 1 // Every identity above rank 5 can give 1 point }; // Identities with negative score have zero capacity /** * Computes the capacity of a truster. The capacity is a weight function in percent which is used to decide how much * trust points an identity can add to the score of identities which it has assigned trust values to. * The higher the rank of an identity, the less is it's capacity. * * If the rank of the identity is Integer.MAX_VALUE (infinite, this means it has only received negative or 0 trust values from identities with rank >= 0 and less * than infinite) or -1 (this means that it has only received trust values from identities with infinite rank) then its capacity is 0. * * If the truster has assigned a trust value to the trustee the capacity will be computed only from that trust value: * The decision of the truster should always overpower the view of remote identities. * * Notice that 0 is included in infinite rank to prevent identities which have only solved introduction puzzles from having a capacity. * * @param truster The {@link OwnIdentity} in whose trust tree the capacity shall be computed * @param trustee The {@link Identity} of which the capacity shall be computed. * @param rank The rank of the identity. The rank is the distance in trust steps from the OwnIdentity which views the web of trust, * - its rank is 0, the rank of its trustees is 1 and so on. Must be -1 if the truster has no rank in the tree owners view. */ protected int computeCapacity(OwnIdentity truster, Identity trustee, int rank) { if(truster == trustee) return 100; try { if(getTrust(truster, trustee).getValue() <= 0) { // Security check, if rank computation breaks this will hit. assert(rank == Integer.MAX_VALUE); return 0; } } catch(NotTrustedException e) { } if(rank == -1 || rank == Integer.MAX_VALUE) return 0; return (rank < capacities.length) ? capacities[rank] : 1; } /** * Reference-implementation of score computation. This means:<br /> * - It is not used by the real WoT code because its slow<br /> * - It is used by unit tests (and WoT) to check whether the real implementation works<br /> * - It is the function which you should read if you want to understand how WoT works.<br /> * * Computes all rank and score values and checks whether the database is correct. If wrong values are found, they are correct.<br /> * * There was a bug in the score computation for a long time which resulted in wrong computation when trust values very removed under certain conditions.<br /> * * Further, rank values are shortest paths and the path-finding algorithm is not executed from the source * to the target upon score computation: It uses the rank of the neighbor nodes to find a shortest path. * Therefore, the algorithm is very vulnerable to bugs since one wrong value will stay in the database * and affect many others. So it is useful to have this function. * * @return True if all stored scores were correct. False if there were any errors in stored scores. */ protected synchronized boolean computeAllScoresWithoutCommit() { if(logMINOR) Logger.minor(this, "Doing a full computation of all Scores..."); final long beginTime = CurrentTimeUTC.getInMillis(); boolean returnValue = true; final ObjectSet<Identity> allIdentities = getAllIdentities(); // Scores are a rating of an identity from the view of an OwnIdentity so we compute them per OwnIdentity. for(OwnIdentity treeOwner : getAllOwnIdentities()) { // At the end of the loop body, this table will be filled with the ranks of all identities which are visible for treeOwner. // An identity is visible if there is a trust chain from the owner to it. // The rank is the distance in trust steps from the treeOwner. // So the treeOwner is rank 0, the trustees of the treeOwner are rank 1 and so on. final HashMap<Identity, Integer> rankValues = new HashMap<Identity, Integer>(allIdentities.size() * 2); // Compute the rank values { // For each identity which is added to rankValues, all its trustees are added to unprocessedTrusters. // The inner loop then pulls out one unprocessed identity and computes the rank of its trustees: // All trustees which have received positive (> 0) trust will get his rank + 1 // Trustees with negative trust or 0 trust will get a rank of Integer.MAX_VALUE. // Trusters with rank Integer.MAX_VALUE cannot inherit their rank to their trustees so the trustees will get no rank at all. // Identities with no rank are considered to be not in the trust tree of the own identity and their score will be null / none. // // Further, if the treeOwner has assigned a trust value to an identity, the rank decision is done by only considering this trust value: // The decision of the own identity shall not be overpowered by the view of the remote identities. // // The purpose of differentiation between Integer.MAX_VALUE and -1 is: // Score objects of identities with rank Integer.MAX_VALUE are kept in the database because WoT will usually "hear" about those identities by seeing // them in the trust lists of trusted identities (with 0 or negative trust values). So it must store the trust values to those identities and // have a way of telling the user "this identity is not trusted" by keeping a score object of them. // Score objects of identities with rank -1 are deleted because they are the trustees of distrusted identities and we will not get to the point where // we hear about those identities because the only way of hearing about them is importing a trust list of a identity with Integer.MAX_VALUE rank // - and we never import their trust lists. // We include trust values of 0 in the set of rank Integer.MAX_VALUE (instead of only NEGATIVE trust) so that identities which only have solved // introduction puzzles cannot inherit their rank to their trustees. final LinkedList<Identity> unprocessedTrusters = new LinkedList<Identity>(); // The own identity is the root of the trust tree, it should assign itself a rank of 0 , a capacity of 100 and a symbolic score of Integer.MAX_VALUE try { Score selfScore = getScore(treeOwner, treeOwner); if(selfScore.getRank() >= 0) { // It can only give it's rank if it has a valid one rankValues.put(treeOwner, selfScore.getRank()); unprocessedTrusters.addLast(treeOwner); } else { rankValues.put(treeOwner, null); } } catch(NotInTrustTreeException e) { // This only happens in unit tests. } while(!unprocessedTrusters.isEmpty()) { final Identity truster = unprocessedTrusters.removeFirst(); final Integer trusterRank = rankValues.get(truster); // The truster cannot give his rank to his trustees because he has none (or infinite), they receive no rank at all. if(trusterRank == null || trusterRank == Integer.MAX_VALUE) { // (Normally this does not happen because we do not enqueue the identities if they have no rank but we check for security) continue; } final int trusteeRank = trusterRank + 1; for(Trust trust : getGivenTrusts(truster)) { final Identity trustee = trust.getTrustee(); final Integer oldTrusteeRank = rankValues.get(trustee); if(oldTrusteeRank == null) { // The trustee was not processed yet if(trust.getValue() > 0) { rankValues.put(trustee, trusteeRank); unprocessedTrusters.addLast(trustee); } else rankValues.put(trustee, Integer.MAX_VALUE); } else { // Breadth first search will process all rank one identities are processed before any rank two identities, etc. assert(oldTrusteeRank == Integer.MAX_VALUE || trusteeRank >= oldTrusteeRank); if(oldTrusteeRank == Integer.MAX_VALUE) { // If we found a rank less than infinite we can overwrite the old rank with this one, but only if the infinite rank was not // given by the tree owner. try { final Trust treeOwnerTrust = getTrust(treeOwner, trustee); assert(treeOwnerTrust.getValue() <= 0); // TODO: Is this correct? } catch(NotTrustedException e) { if(trust.getValue() > 0) { rankValues.put(trustee, trusteeRank); unprocessedTrusters.addLast(trustee); } } } } } } } // Rank values of all visible identities are computed now. // Next step is to compute the scores of all identities for(Identity target : allIdentities) { // The score of an identity is the sum of all weighted trust values it has received. // Each trust value is weighted with the capacity of the truster - the capacity decays with increasing rank. Integer targetScore; final Integer targetRank = rankValues.get(target); if(targetRank == null) { targetScore = null; } else { // The treeOwner trusts himself. if(targetRank == 0) { targetScore = Integer.MAX_VALUE; } else { // If the treeOwner has assigned a trust value to the target, it always overrides the "remote" score. try { targetScore = (int)getTrust(treeOwner, target).getValue(); } catch(NotTrustedException e) { targetScore = 0; for(Trust receivedTrust : getReceivedTrusts(target)) { final Identity truster = receivedTrust.getTruster(); final Integer trusterRank = rankValues.get(truster); // The capacity is a weight function for trust values which are given from an identity: // The higher the rank, the less the capacity. // If the rank is Integer.MAX_VALUE (infinite) or -1 (no rank at all) the capacity will be 0. final int capacity = computeCapacity(treeOwner, truster, trusterRank != null ? trusterRank : -1); targetScore += (receivedTrust.getValue() * capacity) / 100; } } } } Score newScore = null; if(targetScore != null) { newScore = new Score(this, treeOwner, target, targetScore, targetRank, computeCapacity(treeOwner, target, targetRank)); } boolean needToCheckFetchStatus = false; boolean oldShouldFetch = false; int oldCapacity = 0; // Now we have the rank and the score of the target computed and can check whether the database-stored score object is correct. try { Score currentStoredScore = getScore(treeOwner, target); oldCapacity = currentStoredScore.getCapacity(); if(newScore == null) { returnValue = false; if(!mFullScoreComputationNeeded) Logger.error(this, "Correcting wrong score: The identity has no rank and should have no score but score was " + currentStoredScore, new RuntimeException()); needToCheckFetchStatus = true; oldShouldFetch = shouldFetchIdentity(target); currentStoredScore.deleteWithoutCommit(); } else { if(!newScore.equals(currentStoredScore)) { returnValue = false; if(!mFullScoreComputationNeeded) Logger.error(this, "Correcting wrong score: Should have been " + newScore + " but was " + currentStoredScore, new RuntimeException()); needToCheckFetchStatus = true; oldShouldFetch = shouldFetchIdentity(target); currentStoredScore.setRank(newScore.getRank()); currentStoredScore.setCapacity(newScore.getCapacity()); currentStoredScore.setValue(newScore.getScore()); currentStoredScore.storeWithoutCommit(); } } } catch(NotInTrustTreeException e) { oldCapacity = 0; if(newScore != null) { returnValue = false; if(!mFullScoreComputationNeeded) Logger.error(this, "Correcting wrong score: No score was stored for the identity but it should be " + newScore, new RuntimeException()); needToCheckFetchStatus = true; oldShouldFetch = shouldFetchIdentity(target); newScore.storeWithoutCommit(); } } if(needToCheckFetchStatus) { // If fetch status changed from false to true, we need to start fetching it // If the capacity changed from 0 to positive, we need to refetch the current edition: Identities with capacity 0 cannot // cause new identities to be imported from their trust list, capacity > 0 allows this. // If the fetch status changed from true to false, we need to stop fetching it if((!oldShouldFetch || (oldCapacity == 0 && newScore != null && newScore.getCapacity() > 0)) && shouldFetchIdentity(target) ) { if(!oldShouldFetch) if(logDEBUG) Logger.debug(this, "Fetch status changed from false to true, refetching " + target); else if(logDEBUG) Logger.debug(this, "Capacity changed from 0 to " + newScore.getCapacity() + ", refetching" + target); target.markForRefetch(); target.storeWithoutCommit(); mFetcher.storeStartFetchCommandWithoutCommit(target); } else if(oldShouldFetch && !shouldFetchIdentity(target)) { if(logDEBUG) Logger.debug(this, "Fetch status changed from true to false, aborting fetch of " + target); mFetcher.storeAbortFetchCommandWithoutCommit(target); } } } } mFullScoreComputationNeeded = false; ++mFullScoreRecomputationCount; mFullScoreRecomputationMilliseconds += CurrentTimeUTC.getInMillis() - beginTime; if(logMINOR) { Logger.minor(this, "Full score computation finished. Amount: " + mFullScoreRecomputationCount + "; Avg Time:" + getAverageFullScoreRecomputationTime() + "s"); } return returnValue; } private synchronized void createSeedIdentities() { for(String seedURI : SEED_IDENTITIES) { Identity seed; synchronized(Persistent.transactionLock(mDB)) { try { seed = getIdentityByURI(seedURI); if(seed instanceof OwnIdentity) { OwnIdentity ownSeed = (OwnIdentity)seed; ownSeed.addContext(IntroductionPuzzle.INTRODUCTION_CONTEXT); ownSeed.setProperty(IntroductionServer.PUZZLE_COUNT_PROPERTY, Integer.toString(IntroductionServer.SEED_IDENTITY_PUZZLE_COUNT)); ownSeed.storeAndCommit(); } else { try { seed.setEdition(new FreenetURI(seedURI).getEdition()); seed.storeAndCommit(); } catch(InvalidParameterException e) { /* We already have the latest edition stored */ } } } catch (UnknownIdentityException uie) { try { seed = new Identity(this, seedURI, null, true); // We have to explicitely set the edition number because the constructor only considers the given edition as a hint. seed.setEdition(new FreenetURI(seedURI).getEdition()); seed.storeAndCommit(); } catch (Exception e) { Logger.error(this, "Seed identity creation error", e); } } catch (Exception e) { Persistent.checkedRollback(mDB, this, e); } } } } public void terminate() { if(logDEBUG) Logger.debug(this, "WoT plugin terminating ..."); /* We use single try/catch blocks so that failure of termination of one service does not prevent termination of the others */ try { if(mWebInterface != null) this.mWebInterface.unload(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mIntroductionClient != null) mIntroductionClient.terminate(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mIntroductionServer != null) mIntroductionServer.terminate(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mInserter != null) mInserter.terminate(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mFetcher != null) mFetcher.stop(); } catch(Exception e) { Logger.error(this, "Error during termination.", e); } try { if(mDB != null) { /* TODO: At 2009-06-15, it does not seem possible to ask db4o for whether a transaction is pending. * If it becomes possible some day, we should check that here, and log an error if there is an uncommitted transaction. * - All transactions should be committed after obtaining the lock() on the database. */ synchronized(Persistent.transactionLock(mDB)) { System.gc(); mDB.rollback(); System.gc(); mDB.close(); } } } catch(Exception e) { Logger.error(this, "Error during termination.", e); } if(logDEBUG) Logger.debug(this, "WoT plugin terminated."); } /** * Inherited event handler from FredPluginFCP, handled in <code>class FCPInterface</code>. */ public void handle(PluginReplySender replysender, SimpleFieldSet params, Bucket data, int accesstype) { mFCPInterface.handle(replysender, params, data, accesstype); } /** * Loads an own or normal identity from the database, querying on its ID. * * @param id The ID of the identity to load * @return The identity matching the supplied ID. * @throws DuplicateIdentityException if there are more than one identity with this id in the database * @throws UnknownIdentityException if there is no identity with this id in the database */ public synchronized Identity getIdentityByID(String id) throws UnknownIdentityException { final Query query = mDB.query(); query.constrain(Identity.class); query.descend("mID").constrain(id); final ObjectSet<Identity> result = new Persistent.InitializingObjectSet<Identity>(this, query); switch(result.size()) { case 1: return result.next(); case 0: throw new UnknownIdentityException(id); default: throw new DuplicateIdentityException(id, result.size()); } } /** * Gets an OwnIdentity by its ID. * * @param id The unique identifier to query an OwnIdentity * @return The requested OwnIdentity * @throws UnknownIdentityException if there is now OwnIdentity with that id */ public synchronized OwnIdentity getOwnIdentityByID(String id) throws UnknownIdentityException { final Query query = mDB.query(); query.constrain(OwnIdentity.class); query.descend("mID").constrain(id); final ObjectSet<OwnIdentity> result = new Persistent.InitializingObjectSet<OwnIdentity>(this, query); switch(result.size()) { case 1: return result.next(); case 0: throw new UnknownIdentityException(id); default: throw new DuplicateIdentityException(id, result.size()); } } /** * Loads an identity from the database, querying on its requestURI (a valid {@link FreenetURI}) * * @param uri The requestURI of the identity * @return The identity matching the supplied requestURI * @throws UnknownIdentityException if there is no identity with this id in the database */ public Identity getIdentityByURI(FreenetURI uri) throws UnknownIdentityException { return getIdentityByID(IdentityID.constructAndValidateFromURI(uri).toString()); } /** * Loads an identity from the database, querying on its requestURI (as String) * * @param uri The requestURI of the identity which will be converted to {@link FreenetURI} * @return The identity matching the supplied requestURI * @throws UnknownIdentityException if there is no identity with this id in the database * @throws MalformedURLException if the requestURI isn't a valid FreenetURI */ public Identity getIdentityByURI(String uri) throws UnknownIdentityException, MalformedURLException { return getIdentityByURI(new FreenetURI(uri)); } /** * Gets an OwnIdentity by its requestURI (a {@link FreenetURI}). * The OwnIdentity's unique identifier is extracted from the supplied requestURI. * * @param uri The requestURI of the desired OwnIdentity * @return The requested OwnIdentity * @throws UnknownIdentityException if the OwnIdentity isn't in the database */ public OwnIdentity getOwnIdentityByURI(FreenetURI uri) throws UnknownIdentityException { return getOwnIdentityByID(IdentityID.constructAndValidateFromURI(uri).toString()); } /** * Gets an OwnIdentity by its requestURI (as String). * The given String is converted to {@link FreenetURI} in order to extract a unique id. * * @param uri The requestURI (as String) of the desired OwnIdentity * @return The requested OwnIdentity * @throws UnknownIdentityException if the OwnIdentity isn't in the database * @throws MalformedURLException if the supplied requestURI is not a valid FreenetURI */ public OwnIdentity getOwnIdentityByURI(String uri) throws UnknownIdentityException, MalformedURLException { return getOwnIdentityByURI(new FreenetURI(uri)); } /** * Returns all identities that are in the database * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all identities present in the database */ public ObjectSet<Identity> getAllIdentities() { final Query query = mDB.query(); query.constrain(Identity.class); return new Persistent.InitializingObjectSet<Identity>(this, query); } public static enum SortOrder { ByNicknameAscending, ByNicknameDescending, ByScoreAscending, ByScoreDescending, ByLocalTrustAscending, ByLocalTrustDescending } /** * Get a filtered and sorted list of identities. * You have to synchronize on this WoT when calling the function and processing the returned list. */ public ObjectSet<Identity> getAllIdentitiesFilteredAndSorted(OwnIdentity truster, String nickFilter, SortOrder sortInstruction) { Query q = mDB.query(); switch(sortInstruction) { case ByNicknameAscending: q.constrain(Identity.class); q.descend("mNickname").orderAscending(); break; case ByNicknameDescending: q.constrain(Identity.class); q.descend("mNickname").orderDescending(); break; case ByScoreAscending: q.constrain(Score.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderAscending(); q = q.descend("mTrustee"); break; case ByScoreDescending: // TODO: This excludes identities which have no score q.constrain(Score.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderDescending(); q = q.descend("mTrustee"); break; case ByLocalTrustAscending: q.constrain(Trust.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderAscending(); q = q.descend("mTrustee"); break; case ByLocalTrustDescending: // TODO: This excludes untrusted identities. q.constrain(Trust.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mValue").orderDescending(); q = q.descend("mTrustee"); break; } if(nickFilter != null) { nickFilter = nickFilter.trim(); if(!nickFilter.equals("")) q.descend("mNickname").constrain(nickFilter).like(); } return new Persistent.InitializingObjectSet<Identity>(this, q); } /** * Returns all non-own identities that are in the database. * * You have to synchronize on this WoT when calling the function and processing the returned list! */ public ObjectSet<Identity> getAllNonOwnIdentities() { final Query q = mDB.query(); q.constrain(Identity.class); q.constrain(OwnIdentity.class).not(); return new Persistent.InitializingObjectSet<Identity>(this, q); } /** * Returns all non-own identities that are in the database, sorted descending by their date of modification, i.e. recently * modified identities will be at the beginning of the list. * * You have to synchronize on this WoT when calling the function and processing the returned list! * * Used by the IntroductionClient for fetching puzzles from recently modified identities. */ public ObjectSet<Identity> getAllNonOwnIdentitiesSortedByModification () { final Query q = mDB.query(); q.constrain(Identity.class); q.constrain(OwnIdentity.class).not(); /* TODO: As soon as identities announce that they were online every day, uncomment the following line */ /* q.descend("mLastChangedDate").constrain(new Date(CurrentTimeUTC.getInMillis() - 1 * 24 * 60 * 60 * 1000)).greater(); */ q.descend("mLastFetchedDate").orderDescending(); return new Persistent.InitializingObjectSet<Identity>(this, q); } /** * Returns all own identities that are in the database * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all identities present in the database. */ public ObjectSet<OwnIdentity> getAllOwnIdentities() { final Query q = mDB.query(); q.constrain(OwnIdentity.class); return new Persistent.InitializingObjectSet<OwnIdentity>(this, q); } /** * DO NOT USE THIS FUNCTION FOR DELETING OWN IDENTITIES UPON USER REQUEST! * IN FACT BE VERY CAREFUL WHEN USING IT FOR ANYTHING FOR THE FOLLOWING REASONS: * - This function deletes ALL given and received trust values of the given identity. This modifies the trust list of the trusters against their will. * - Especially it might be an information leak if the trust values of other OwnIdentities are deleted! * - If WOT one day is designed to be used by many different users at once, the deletion of other OwnIdentity's trust values would even be corruption. * * The intended purpose of this function is: * - To specify which objects have to be dealt with when messing with storage of an identity. * - To be able to do database object leakage tests: Many classes have a deleteWithoutCommit function and there are valid usecases for them. * However, the implementations of those functions might cause leaks by forgetting to delete certain object members. * If you call this function for ALL identities in a database, EVERYTHING should be deleted and the database SHOULD be empty. * You then can check whether the database actually IS empty to test for leakage. * * You have to lock the WebOfTrust, the IntroductionPuzzleStore and the IdentityFetcher before calling this function. */ private void deleteWithoutCommit(Identity identity) { // We want to use beginTrustListImport, finishTrustListImport / abortTrustListImport. // If the caller already handles that for us though, we should not call those function again. // So we check whether the caller already started an import. boolean trustListImportWasInProgress = mTrustListImportInProgress; try { if(!trustListImportWasInProgress) beginTrustListImport(); if(logDEBUG) Logger.debug(this, "Deleting identity " + identity + " ..."); if(logDEBUG) Logger.debug(this, "Deleting received scores..."); for(Score score : getScores(identity)) score.deleteWithoutCommit(); if(identity instanceof OwnIdentity) { if(logDEBUG) Logger.debug(this, "Deleting given scores..."); for(Score score : getGivenScores((OwnIdentity)identity)) score.deleteWithoutCommit(); } if(logDEBUG) Logger.debug(this, "Deleting received trusts..."); for(Trust trust : getReceivedTrusts(identity)) trust.deleteWithoutCommit(); if(logDEBUG) Logger.debug(this, "Deleting given trusts..."); for(Trust givenTrust : getGivenTrusts(identity)) { givenTrust.deleteWithoutCommit(); // We call computeAllScores anyway so we do not use removeTrustWithoutCommit() } mFullScoreComputationNeeded = true; // finishTrustListImport will call computeAllScoresWithoutCommit for us. if(logDEBUG) Logger.debug(this, "Deleting associated introduction puzzles ..."); mPuzzleStore.onIdentityDeletion(identity); if(logDEBUG) Logger.debug(this, "Storing an abort-fetch-command..."); if(mFetcher != null) { // Can be null if we use this function in upgradeDB() mFetcher.storeAbortFetchCommandWithoutCommit(identity); // NOTICE: // If the fetcher did store a db4o object reference to the identity, we would have to trigger command processing // now to prevent leakage of the identity object. // But the fetcher does NOT store a db4o object reference to the given identity. It stores its ID as String only. // Therefore, it is OK that the fetcher does not immediately process the commands now. } if(logDEBUG) Logger.debug(this, "Deleting the identity..."); identity.deleteWithoutCommit(); if(!trustListImportWasInProgress) finishTrustListImport(); } catch(RuntimeException e) { if(!trustListImportWasInProgress) abortTrustListImport(e); Persistent.checkedRollbackAndThrow(mDB, this, e); } } /** * Gets the score of this identity in a trust tree. * Each {@link OwnIdentity} has its own trust tree. * * @param truster The owner of the trust tree * @return The {@link Score} of this Identity in the required trust tree * @throws NotInTrustTreeException if this identity is not in the required trust tree */ public synchronized Score getScore(final OwnIdentity truster, final Identity trustee) throws NotInTrustTreeException { final Query query = mDB.query(); query.constrain(Score.class); query.descend("mID").constrain(new ScoreID(truster, trustee).toString()); final ObjectSet<Score> result = new Persistent.InitializingObjectSet<Score>(this, query); switch(result.size()) { case 1: final Score score = result.next(); assert(score.getTruster() == truster); assert(score.getTrustee() == trustee); return score; case 0: throw new NotInTrustTreeException(truster, trustee); default: throw new DuplicateScoreException(truster, trustee, result.size()); } } /** * Gets a list of all this Identity's Scores. * You have to synchronize on this WoT around the call to this function and the processing of the returned list! * * @return An {@link ObjectSet} containing all {@link Score} this Identity has. */ public ObjectSet<Score> getScores(final Identity identity) { final Query query = mDB.query(); query.constrain(Score.class); query.descend("mTrustee").constrain(identity).identity(); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Get a list of all scores which the passed own identity has assigned to other identities. * * You have to synchronize on this WoT around the call to this function and the processing of the returned list! * @return An {@link ObjectSet} containing all {@link Score} this Identity has given. */ public ObjectSet<Score> getGivenScores(final OwnIdentity truster) { final Query query = mDB.query(); query.constrain(Score.class); query.descend("mTruster").constrain(truster).identity(); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Gets the best score this Identity has in existing trust trees. * * @return the best score this Identity has * @throws NotInTrustTreeException If the identity has no score in any trusttree. */ public synchronized int getBestScore(final Identity identity) throws NotInTrustTreeException { int bestScore = Integer.MIN_VALUE; final ObjectSet<Score> scores = getScores(identity); if(scores.size() == 0) throw new NotInTrustTreeException(identity); // TODO: Cache the best score of an identity as a member variable. for(final Score score : scores) bestScore = Math.max(score.getScore(), bestScore); return bestScore; } /** * Gets the best capacity this identity has in any trust tree. * @throws NotInTrustTreeException If the identity is not in any trust tree. Can be interpreted as capacity 0. */ public int getBestCapacity(final Identity identity) throws NotInTrustTreeException { int bestCapacity = 0; final ObjectSet<Score> scores = getScores(identity); if(scores.size() == 0) throw new NotInTrustTreeException(identity); // TODO: Cache the best score of an identity as a member variable. for(final Score score : scores) bestCapacity = Math.max(score.getCapacity(), bestCapacity); return bestCapacity; } /** * Get all scores in the database. * You have to synchronize on this WoT when calling the function and processing the returned list! */ public ObjectSet<Score> getAllScores() { final Query query = mDB.query(); query.constrain(Score.class); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Checks whether the given identity should be downloaded. * @return Returns true if the identity has any capacity > 0, any score >= 0 or if it is an own identity. */ public boolean shouldFetchIdentity(final Identity identity) { if(identity instanceof OwnIdentity) return true; int bestScore = Integer.MIN_VALUE; int bestCapacity = 0; final ObjectSet<Score> scores = getScores(identity); if(scores.size() == 0) return false; // TODO: Cache the best score of an identity as a member variable. for(Score score : scores) { bestCapacity = Math.max(score.getCapacity(), bestCapacity); bestScore = Math.max(score.getScore(), bestScore); if(bestCapacity > 0 || bestScore >= 0) return true; } return false; } /** * Gets non-own Identities matching a specified score criteria. * TODO: Rename to getNonOwnIdentitiesByScore. Or even better: Make it return own identities as well, this will speed up the database query and clients might be ok with it. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @param truster The owner of the trust tree, null if you want the trusted identities of all owners. * @param select Score criteria, can be > zero, zero or negative. Greater than zero returns all identities with score >= 0, zero with score equal to 0 * and negative with score < 0. Zero is included in the positive range by convention because solving an introduction puzzle gives you a trust value of 0. * @return an {@link ObjectSet} containing Scores of the identities that match the criteria */ public ObjectSet<Score> getIdentitiesByScore(final OwnIdentity truster, final int select) { final Query query = mDB.query(); query.constrain(Score.class); if(truster != null) query.descend("mTruster").constrain(truster).identity(); query.descend("mTrustee").constrain(OwnIdentity.class).not(); /* We include 0 in the list of identities with positive score because solving captchas gives no points to score */ if(select > 0) query.descend("mValue").constrain(0).smaller().not(); else if(select < 0) query.descend("mValue").constrain(0).smaller(); else query.descend("mValue").constrain(0); return new Persistent.InitializingObjectSet<Score>(this, query); } /** * Gets {@link Trust} from a specified truster to a specified trustee. * * @param truster The identity that gives trust to this Identity * @param trustee The identity which receives the trust * @return The trust given to the trustee by the specified truster * @throws NotTrustedException if the truster doesn't trust the trustee */ public synchronized Trust getTrust(final Identity truster, final Identity trustee) throws NotTrustedException, DuplicateTrustException { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mID").constrain(new TrustID(truster, trustee).toString()); final ObjectSet<Trust> result = new Persistent.InitializingObjectSet<Trust>(this, query); switch(result.size()) { case 1: final Trust trust = result.next(); assert(trust.getTruster() == truster); assert(trust.getTrustee() == trustee); return trust; case 0: throw new NotTrustedException(truster, trustee); default: throw new DuplicateTrustException(truster, trustee, result.size()); } } /** * Gets all trusts given by the given truster. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has given. */ public ObjectSet<Trust> getGivenTrusts(final Identity truster) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTruster").constrain(truster).identity(); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets all trusts given by the given truster. * The result is sorted descending by the time we last fetched the trusted identity. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has given. */ public ObjectSet<Trust> getGivenTrustsSortedDescendingByLastSeen(final Identity truster) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTruster").constrain(truster).identity(); query.descend("mTrustee").descend("mLastFetchedDate").orderDescending(); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets given trust values of an identity matching a specified trust value criteria. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @param truster The identity which given the trust values. * @param select Trust value criteria, can be > zero, zero or negative. Greater than zero returns all trust values >= 0, zero returns trust values equal to 0. * Negative returns trust values < 0. Zero is included in the positive range by convention because solving an introduction puzzle gives you a value of 0. * @return an {@link ObjectSet} containing received trust values that match the criteria. */ public ObjectSet<Trust> getGivenTrusts(final Identity truster, final int select) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTruster").constrain(truster).identity(); /* We include 0 in the list of identities with positive trust because solving captchas gives 0 trust */ if(select > 0) query.descend("mValue").constrain(0).smaller().not(); else if(select < 0 ) query.descend("mValue").constrain(0).smaller(); else query.descend("mValue").constrain(0); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets all trusts given by the given truster in a trust list with a different edition than the passed in one. * You have to synchronize on this WoT when calling the function and processing the returned list! */ protected ObjectSet<Trust> getGivenTrustsOfDifferentEdition(final Identity truster, final long edition) { final Query q = mDB.query(); q.constrain(Trust.class); q.descend("mTruster").constrain(truster).identity(); q.descend("mTrusterTrustListEdition").constrain(edition).not(); return new Persistent.InitializingObjectSet<Trust>(this, q); } /** * Gets all trusts received by the given trustee. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has received. */ public ObjectSet<Trust> getReceivedTrusts(final Identity trustee) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTrustee").constrain(trustee).identity(); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets received trust values of an identity matching a specified trust value criteria. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @param trustee The identity which has received the trust values. * @param select Trust value criteria, can be > zero, zero or negative. Greater than zero returns all trust values >= 0, zero returns trust values equal to 0. * Negative returns trust values < 0. Zero is included in the positive range by convention because solving an introduction puzzle gives you a value of 0. * @return an {@link ObjectSet} containing received trust values that match the criteria. */ public ObjectSet<Trust> getReceivedTrusts(final Identity trustee, final int select) { final Query query = mDB.query(); query.constrain(Trust.class); query.descend("mTrustee").constrain(trustee).identity(); /* We include 0 in the list of identities with positive trust because solving captchas gives 0 trust */ if(select > 0) query.descend("mValue").constrain(0).smaller().not(); else if(select < 0 ) query.descend("mValue").constrain(0).smaller(); else query.descend("mValue").constrain(0); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gets all trusts. * You have to synchronize on this WoT when calling the function and processing the returned list! * * @return An {@link ObjectSet} containing all {@link Trust} the passed Identity has received. */ public ObjectSet<Trust> getAllTrusts() { final Query query = mDB.query(); query.constrain(Trust.class); return new Persistent.InitializingObjectSet<Trust>(this, query); } /** * Gives some {@link Trust} to another Identity. * It creates or updates an existing Trust object and make the trustee compute its {@link Score}. * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(WebOfTrust.this) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... setTrustWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * } * } * * @param truster The Identity that gives the trust * @param trustee The Identity that receives the trust * @param newValue Numeric value of the trust * @param newComment A comment to explain the given value * @throws InvalidParameterException if a given parameter isn't valid, see {@link Trust} for details on accepted values. */ protected void setTrustWithoutCommit(Identity truster, Identity trustee, byte newValue, String newComment) throws InvalidParameterException { try { // Check if we are updating an existing trust value final Trust trust = getTrust(truster, trustee); final Trust oldTrust = trust.clone(); trust.trusterEditionUpdated(); trust.setComment(newComment); trust.storeWithoutCommit(); if(trust.getValue() != newValue) { trust.setValue(newValue); trust.storeWithoutCommit(); if(logDEBUG) Logger.debug(this, "Updated trust value ("+ trust +"), now updating Score."); updateScoresWithoutCommit(oldTrust, trust); } } catch (NotTrustedException e) { final Trust trust = new Trust(this, truster, trustee, newValue, newComment); trust.storeWithoutCommit(); if(logDEBUG) Logger.debug(this, "New trust value ("+ trust +"), now updating Score."); updateScoresWithoutCommit(null, trust); } truster.updated(); truster.storeWithoutCommit(); } /** * Only for being used by WoT internally and by unit tests! */ synchronized void setTrust(OwnIdentity truster, Identity trustee, byte newValue, String newComment) throws InvalidParameterException { synchronized(Persistent.transactionLock(mDB)) { try { setTrustWithoutCommit(truster, trustee, newValue, newComment); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } } protected synchronized void removeTrust(OwnIdentity truster, Identity trustee) { synchronized(Persistent.transactionLock(mDB)) { try { removeTrustWithoutCommit(truster, trustee); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } } /** * Deletes a trust object. * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(this) { * synchronized(mFetcher) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... removeTrustWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * }}} * * @param truster * @param trustee */ protected void removeTrustWithoutCommit(OwnIdentity truster, Identity trustee) { try { try { removeTrustWithoutCommit(getTrust(truster, trustee)); } catch (NotTrustedException e) { Logger.error(this, "Cannot remove trust - there is none - from " + truster.getNickname() + " to " + trustee.getNickname()); } } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } /** * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(this) { * synchronized(mFetcher) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... setTrustWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * }}} * */ protected void removeTrustWithoutCommit(Trust trust) { trust.deleteWithoutCommit(); updateScoresWithoutCommit(trust, null); } /** * Initializes this OwnIdentity's trust tree without commiting the transaction. * Meaning : It creates a Score object for this OwnIdentity in its own trust so it can give trust to other Identities. * * The score will have a rank of 0, a capacity of 100 (= 100 percent) and a score value of Integer.MAX_VALUE. * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(Persistent.transactionLock(mDB)) { * try { ... initTrustTreeWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } * } * * @throws DuplicateScoreException if there already is more than one Score for this identity (should never happen) */ private synchronized void initTrustTreeWithoutCommit(OwnIdentity identity) throws DuplicateScoreException { try { getScore(identity, identity); Logger.error(this, "initTrustTreeWithoutCommit called even though there is already one for " + identity); return; } catch (NotInTrustTreeException e) { final Score score = new Score(this, identity, identity, Integer.MAX_VALUE, 0, 100); score.storeWithoutCommit(); } } /** * Computes the trustee's Score value according to the trusts it has received and the capacity of its trusters in the specified * trust tree. * * @param truster The OwnIdentity that owns the trust tree * @param trustee The identity for which the score shall be computed. * @return The new Score of the identity. Integer.MAX_VALUE if the trustee is equal to the truster. * @throws DuplicateScoreException if there already exist more than one {@link Score} objects for the trustee (should never happen) */ private synchronized int computeScoreValue(OwnIdentity truster, Identity trustee) throws DuplicateScoreException { if(trustee == truster) return Integer.MAX_VALUE; int value = 0; try { return getTrust(truster, trustee).getValue(); } catch(NotTrustedException e) { } for(Trust trust : getReceivedTrusts(trustee)) { try { final Score trusterScore = getScore(truster, trust.getTruster()); value += ( trust.getValue() * trusterScore.getCapacity() ) / 100; } catch (NotInTrustTreeException e) {} } return value; } /** * Computes the trustees's rank in the trust tree of the truster. * It gets its best ranked non-zero-capacity truster's rank, plus one. * If it has only received negative trust values from identities which have a non-zero-capacity it gets a rank of Integer.MAX_VALUE (infinite). * If it has only received trust values from identities with rank of Integer.MAX_VALUE it gets a rank of -1. * * If the tree owner has assigned a trust value to the identity, the rank computation is only done from that value because the score decisions of the * tree owner are always absolute (if you distrust someone, the remote identities should not be allowed to overpower your decision). * * The purpose of differentiation between Integer.MAX_VALUE and -1 is: * Score objects of identities with rank Integer.MAX_VALUE are kept in the database because WoT will usually "hear" about those identities by seeing them * in the trust lists of trusted identities (with negative trust values). So it must store the trust values to those identities and have a way of telling the * user "this identity is not trusted" by keeping a score object of them. * Score objects of identities with rank -1 are deleted because they are the trustees of distrusted identities and we will not get to the point where we * hear about those identities because the only way of hearing about them is downloading a trust list of a identity with Integer.MAX_VALUE rank - and * we never download their trust lists. * * Notice that 0 is included in infinite rank to prevent identities which have only solved introduction puzzles from having a capacity. * * @param truster The OwnIdentity that owns the trust tree * @return The new Rank if this Identity * @throws DuplicateScoreException if there already exist more than one {@link Score} objects for the trustee (should never happen) */ private synchronized int computeRank(OwnIdentity truster, Identity trustee) throws DuplicateScoreException { if(trustee == truster) return 0; int rank = -1; try { Trust treeOwnerTrust = getTrust(truster, trustee); if(treeOwnerTrust.getValue() > 0) return 1; else return Integer.MAX_VALUE; } catch(NotTrustedException e) { } for(Trust trust : getReceivedTrusts(trustee)) { try { Score score = getScore(truster, trust.getTruster()); if(score.getCapacity() != 0) { // If the truster has no capacity, he can't give his rank // A truster only gives his rank to a trustee if he has assigned a strictly positive trust value if(trust.getValue() > 0 ) { // We give the rank to the trustee if it is better than its current rank or he has no rank yet. if(rank == -1 || score.getRank() < rank) rank = score.getRank(); } else { // If the trustee has no rank yet we give him an infinite rank. because he is distrusted by the truster. if(rank == -1) rank = Integer.MAX_VALUE; } } } catch (NotInTrustTreeException e) {} } if(rank == -1) return -1; else if(rank == Integer.MAX_VALUE) return Integer.MAX_VALUE; else return rank+1; } /** * Begins the import of a trust list. This sets a flag on this WoT which signals that the import of a trust list is in progress. * This speeds up setTrust/removeTrust as the score calculation is only performed when endTrustListImport is called. * * You MUST synchronize on this WoT around beginTrustListImport, abortTrustListImport and finishTrustListImport! * You MUST create a database transaction by synchronizing on Persistent.transactionLock(db). */ protected void beginTrustListImport() { if(logMINOR) Logger.minor(this, "beginTrustListImport()"); if(mTrustListImportInProgress) { abortTrustListImport(new RuntimeException("There was already a trust list import in progress!")); mFullScoreComputationNeeded = true; computeAllScoresWithoutCommit(); assert(mFullScoreComputationNeeded == false); } mTrustListImportInProgress = true; assert(!mFullScoreComputationNeeded); assert(computeAllScoresWithoutCommit()); // The database is intact before the import } /** * See {@link beginTrustListImport} for an explanation of the purpose of this function. * * Aborts the import of a trust list and rolls back the current transaction. * * @param e The exception which triggered the abort. Will be logged to the Freenet log file. * @param logLevel The {@link LogLevel} to use when logging the abort to the Freenet log file. */ protected void abortTrustListImport(Exception e, LogLevel logLevel) { if(logMINOR) Logger.minor(this, "abortTrustListImport()"); assert(mTrustListImportInProgress); mTrustListImportInProgress = false; mFullScoreComputationNeeded = false; Persistent.checkedRollback(mDB, this, e, logLevel); assert(computeAllScoresWithoutCommit()); // Test rollback. } /** * See {@link beginTrustListImport} for an explanation of the purpose of this function. * * Aborts the import of a trust list and rolls back the current transaction. * * @param e The exception which triggered the abort. Will be logged to the Freenet log file with log level {@link LogLevel.ERROR} */ protected void abortTrustListImport(Exception e) { abortTrustListImport(e, Logger.LogLevel.ERROR); } /** * See {@link beginTrustListImport} for an explanation of the purpose of this function. * * Finishes the import of the current trust list and clears the "trust list * * Does NOT commit the transaction, you must do this. */ protected void finishTrustListImport() { if(logMINOR) Logger.minor(this, "finishTrustListImport()"); if(!mTrustListImportInProgress) { Logger.error(this, "There was no trust list import in progress!"); return; } if(mFullScoreComputationNeeded) { computeAllScoresWithoutCommit(); assert(!mFullScoreComputationNeeded); // It properly clears the flag assert(computeAllScoresWithoutCommit()); // computeAllScoresWithoutCommit() is stable } else assert(computeAllScoresWithoutCommit()); // Verify whether updateScoresWithoutCommit worked. mTrustListImportInProgress = false; } /** * Updates all trust trees which are affected by the given modified score. * For understanding how score calculation works you should first read {@link computeAllScores * * This function does neither lock the database nor commit the transaction. You have to surround it with * synchronized(this) { * synchronized(mFetcher) { * synchronized(Persistent.transactionLock(mDB)) { * try { ... updateScoreWithoutCommit(...); Persistent.checkedCommit(mDB, this); } * catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e);; } * }}} */ private void updateScoresWithoutCommit(final Trust oldTrust, final Trust newTrust) { if(logMINOR) Logger.minor(this, "Doing an incremental computation of all Scores..."); final long beginTime = CurrentTimeUTC.getInMillis(); // We only include the time measurement if we actually do something. // If we figure out that a full score recomputation is needed just by looking at the initial parameters, the measurement won't be included. boolean includeMeasurement = false; final boolean trustWasCreated = (oldTrust == null); final boolean trustWasDeleted = (newTrust == null); final boolean trustWasModified = !trustWasCreated && !trustWasDeleted; if(trustWasCreated && trustWasDeleted) throw new NullPointerException("No old/new trust specified."); if(trustWasModified && oldTrust.getTruster() != newTrust.getTruster()) throw new IllegalArgumentException("oldTrust has different truster, oldTrust:" + oldTrust + "; newTrust: " + newTrust); if(trustWasModified && oldTrust.getTrustee() != newTrust.getTrustee()) throw new IllegalArgumentException("oldTrust has different trustee, oldTrust:" + oldTrust + "; newTrust: " + newTrust); // We cannot iteratively REMOVE an inherited rank from the trustees because we don't know whether there is a circle in the trust values // which would make the current identity get its old rank back via the circle: computeRank searches the trusters of an identity for the best // rank, if we remove the rank from an identity, all its trustees will have a better rank and if one of them trusts the original identity // then this function would run into an infinite loop. Decreasing or incrementing an existing rank is possible with this function because // the rank received from the trustees will always be higher (that is exactly 1 more) than this identities rank. if(trustWasDeleted) { mFullScoreComputationNeeded = true; } if(!mFullScoreComputationNeeded && (trustWasCreated || trustWasModified)) { includeMeasurement = true; for(OwnIdentity treeOwner : getAllOwnIdentities()) { try { // Throws to abort the update of the trustee's score: If the truster has no rank or capacity in the tree owner's view then we don't need to update the trustee's score. if(getScore(treeOwner, newTrust.getTruster()).getCapacity() == 0) continue; } catch(NotInTrustTreeException e) { continue; } // See explanation above "We cannot iteratively REMOVE an inherited rank..." if(trustWasModified && oldTrust.getValue() > 0 && newTrust.getValue() <= 0) { mFullScoreComputationNeeded = true; break; } final LinkedList<Trust> unprocessedEdges = new LinkedList<Trust>(); unprocessedEdges.add(newTrust); while(!unprocessedEdges.isEmpty()) { final Trust trust = unprocessedEdges.removeFirst(); final Identity trustee = trust.getTrustee(); if(trustee == treeOwner) continue; Score currentStoredTrusteeScore; try { currentStoredTrusteeScore = getScore(treeOwner, trustee); } catch(NotInTrustTreeException e) { currentStoredTrusteeScore = new Score(this, treeOwner, trustee, 0, -1, 0); } final Score oldScore = currentStoredTrusteeScore.clone(); boolean oldShouldFetch = shouldFetchIdentity(trustee); final int newScoreValue = computeScoreValue(treeOwner, trustee); final int newRank = computeRank(treeOwner, trustee); final int newCapacity = computeCapacity(treeOwner, trustee, newRank); final Score newScore = new Score(this, treeOwner, trustee, newScoreValue, newRank, newCapacity); // Normally we couldn't detect the following two cases due to circular trust values. However, if an own identity assigns a trust value, // the rank and capacity are always computed based on the trust value of the own identity so we must also check this here: if((oldScore.getRank() >= 0 && oldScore.getRank() < Integer.MAX_VALUE) // It had an inheritable rank && (newScore.getRank() == -1 || newScore.getRank() == Integer.MAX_VALUE)) { // It has no inheritable rank anymore mFullScoreComputationNeeded = true; break; } if(oldScore.getCapacity() > 0 && newScore.getCapacity() == 0) { mFullScoreComputationNeeded = true; break; } // We are OK to update it now. We must not update the values of the stored score object before determining whether we need // a full score computation - the full computation needs the old values of the object. currentStoredTrusteeScore.setValue(newScore.getScore()); currentStoredTrusteeScore.setRank(newScore.getRank()); currentStoredTrusteeScore.setCapacity(newScore.getCapacity()); // Identities should not get into the queue if they have no rank, see the large if() about 20 lines below assert(currentStoredTrusteeScore.getRank() >= 0); if(currentStoredTrusteeScore.getRank() >= 0) currentStoredTrusteeScore.storeWithoutCommit(); // If fetch status changed from false to true, we need to start fetching it // If the capacity changed from 0 to positive, we need to refetch the current edition: Identities with capacity 0 cannot // cause new identities to be imported from their trust list, capacity > 0 allows this. // If the fetch status changed from true to false, we need to stop fetching it if((!oldShouldFetch || (oldScore.getCapacity()== 0 && newScore.getCapacity() > 0)) && shouldFetchIdentity(trustee)) { if(!oldShouldFetch) if(logDEBUG) Logger.debug(this, "Fetch status changed from false to true, refetching " + trustee); else if(logDEBUG) Logger.debug(this, "Capacity changed from 0 to " + newScore.getCapacity() + ", refetching" + trustee); trustee.markForRefetch(); trustee.storeWithoutCommit(); mFetcher.storeStartFetchCommandWithoutCommit(trustee); } else if(oldShouldFetch && !shouldFetchIdentity(trustee)) { if(logDEBUG) Logger.debug(this, "Fetch status changed from true to false, aborting fetch of " + trustee); mFetcher.storeAbortFetchCommandWithoutCommit(trustee); } // If the rank or capacity changed then the trustees might be affected because the could have inherited theirs if(oldScore.getRank() != newScore.getRank() || oldScore.getCapacity() != newScore.getCapacity()) { // If this identity has no capacity or no rank then it cannot affect its trustees: // (- If it had none and it has none now then there is none which can be inherited, this is obvious) // - If it had one before and it was removed, this algorithm will have aborted already because a full computation is needed if(newScore.getCapacity() > 0 || (newScore.getRank() >= 0 && newScore.getRank() < Integer.MAX_VALUE)) { // We need to update the trustees of trustee for(Trust givenTrust : getGivenTrusts(trustee)) { unprocessedEdges.add(givenTrust); } } } } if(mFullScoreComputationNeeded) break; } } if(includeMeasurement) { ++mIncrementalScoreRecomputationCount; mIncrementalScoreRecomputationMilliseconds += CurrentTimeUTC.getInMillis() - beginTime; } if(logMINOR) { final String time = includeMeasurement ? ("Stats: Amount: " + mIncrementalScoreRecomputationCount + "; Avg Time:" + getAverageIncrementalScoreRecomputationTime() + "s") : ("Time not measured: Computation was aborted before doing anything."); if(!mFullScoreComputationNeeded) Logger.minor(this, "Incremental computation of all Scores finished. " + time); else Logger.minor(this, "Incremental computation of all Scores not possible, full computation is needed. " + time); } if(!mTrustListImportInProgress) { if(mFullScoreComputationNeeded) { // TODO: Optimization: This uses very much CPU and memory. Write a partial computation function... // TODO: Optimization: While we do not have a partial computation function, we could at least optimize computeAllScores to NOT // keep all objects in memory etc. computeAllScoresWithoutCommit(); assert(computeAllScoresWithoutCommit()); // computeAllScoresWithoutCommit is stable } else { assert(computeAllScoresWithoutCommit()); // This function worked correctly. } } else { // a trust list import is in progress // We not do the following here because it would cause too much CPU usage during debugging: Trust lists are large and therefore // updateScoresWithoutCommit is called often during import of a single trust list // assert(computeAllScoresWithoutCommit()); } } /* Client interface functions */ public synchronized Identity addIdentity(String requestURI) throws MalformedURLException, InvalidParameterException { try { getIdentityByURI(requestURI); throw new InvalidParameterException("We already have this identity"); } catch(UnknownIdentityException e) { final Identity identity = new Identity(this, requestURI, null, false); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Created identity " + identity); // The identity hasn't received a trust value. Therefore, there is no reason to fetch it and we don't notify the IdentityFetcher. // TODO: Document this function and the UI which uses is to warn the user that the identity won't be fetched without trust. return identity; } } public OwnIdentity createOwnIdentity(String nickName, boolean publishTrustList, String context) throws MalformedURLException, InvalidParameterException { FreenetURI[] keypair = getPluginRespirator().getHLSimpleClient().generateKeyPair(WOT_NAME); return createOwnIdentity(keypair[0], nickName, publishTrustList, context); } /** * @param context A context with which you want to use the identity. Null if you want to add it later. */ public synchronized OwnIdentity createOwnIdentity(FreenetURI insertURI, String nickName, boolean publishTrustList, String context) throws MalformedURLException, InvalidParameterException { synchronized(Persistent.transactionLock(mDB)) { OwnIdentity identity; try { identity = getOwnIdentityByURI(insertURI); throw new InvalidParameterException("The URI you specified is already used by the own identity " + identity.getNickname() + "."); } catch(UnknownIdentityException uie) { identity = new OwnIdentity(this, insertURI, nickName, publishTrustList); if(context != null) identity.addContext(context); if(publishTrustList) { identity.addContext(IntroductionPuzzle.INTRODUCTION_CONTEXT); /* TODO: make configureable */ identity.setProperty(IntroductionServer.PUZZLE_COUNT_PROPERTY, Integer.toString(IntroductionServer.DEFAULT_PUZZLE_COUNT)); } try { identity.storeWithoutCommit(); initTrustTreeWithoutCommit(identity); beginTrustListImport(); // Incremental score computation has proven to be very very slow when creating identities so we just schedule a full computation. mFullScoreComputationNeeded = true; for(String seedURI : SEED_IDENTITIES) { try { setTrustWithoutCommit(identity, getIdentityByURI(seedURI), (byte)100, "Automatically assigned trust to a seed identity."); } catch(UnknownIdentityException e) { Logger.error(this, "SHOULD NOT HAPPEN: Seed identity not known: " + e); } } finishTrustListImport(); Persistent.checkedCommit(mDB, this); if(mIntroductionClient != null) mIntroductionClient.nextIteration(); // This will make it fetch more introduction puzzles. if(logDEBUG) Logger.debug(this, "Successfully created a new OwnIdentity (" + identity.getNickname() + ")"); return identity; } catch(RuntimeException e) { abortTrustListImport(e); // Rolls back for us throw e; // Satisfy the compiler } } } } /** * This "deletes" an {@link OwnIdentity} by replacing it with an {@link Identity}. * * The {@link OwnIdentity} is not deleted because this would be a security issue: * If other {@link OwnIdentity}s have assigned a trust value to it, the trust value would be gone if there is no {@link Identity} object to be the target * * @param id The {@link Identity.IdentityID} of the identity. * @throws UnknownIdentityException If there is no {@link OwnIdentity} with the given ID. Also thrown if a non-own identity exists with the given ID. */ public synchronized void deleteOwnIdentity(String id) throws UnknownIdentityException { Logger.normal(this, "deleteOwnIdentity(): Starting... "); synchronized(mPuzzleStore) { synchronized(mFetcher) { synchronized(Persistent.transactionLock(mDB)) { final OwnIdentity oldIdentity = getOwnIdentityByID(id); try { Logger.normal(this, "Deleting an OwnIdentity by converting it to a non-own Identity: " + oldIdentity); // We don't need any score computations to happen (explanation will follow below) so we don't need the following: /* beginTrustListImport(); */ // This function messes with the score graph manually so it is a good idea to check whether it is intact before and afterwards. assert(computeAllScoresWithoutCommit()); final Identity newIdentity; try { newIdentity = new Identity(this, oldIdentity.getRequestURI(), oldIdentity.getNickname(), oldIdentity.doesPublishTrustList()); } catch(MalformedURLException e) { // The data was taken from the OwnIdentity so this shouldn't happen throw new RuntimeException(e); } catch (InvalidParameterException e) { // The data was taken from the OwnIdentity so this shouldn't happen throw new RuntimeException(e); } newIdentity.setContexts(oldIdentity.getContexts()); newIdentity.setProperties(oldIdentity.getProperties()); try { newIdentity.setEdition(oldIdentity.getEdition()); } catch (InvalidParameterException e) { // The data was taken from old identity so this shouldn't happen throw new RuntimeException(e); } // In theory we do not need to re-fetch the current trust list edition: // The trust list of an own identity is always stored completely in the database, i.e. all trustees exist. // HOWEVER if the user had used the restoreOwnIdentity feature and then used this function, it might be the case that // the current edition of the old OwndIdentity was not fetched yet. // So we set the fetch state to FetchState.Fetched if the oldIdentity's fetch state was like that as well. if(oldIdentity.getCurrentEditionFetchState() == FetchState.Fetched) { newIdentity.onFetched(oldIdentity.getLastFetchedDate()); } // An else to set the fetch state to FetchState.NotFetched is not necessary, newIdentity.setEdition() did that already. newIdentity.storeWithoutCommit(); // Copy all received trusts. // We don't have to modify them because they are user-assigned values and the assignment // of the user does not change just because the type of the identity changes. for(Trust oldReceivedTrust : getReceivedTrusts(oldIdentity)) { Trust newReceivedTrust; try { newReceivedTrust = new Trust(this, oldReceivedTrust.getTruster(), newIdentity, oldReceivedTrust.getValue(), oldReceivedTrust.getComment()); } catch (InvalidParameterException e) { // The data was taken from the old Trust so this shouldn't happen throw new RuntimeException(e); } // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newReceivedTrust.equals(oldReceivedTrust)); */ oldReceivedTrust.deleteWithoutCommit(); newReceivedTrust.storeWithoutCommit(); } assert(getReceivedTrusts(oldIdentity).size() == 0); // Copy all received scores. // We don't have to modify them because the rating of the identity from the perspective of a // different own identity should NOT be dependent upon whether it is an own identity or not. for(Score oldScore : getScores(oldIdentity)) { Score newScore = new Score(this, oldScore.getTruster(), newIdentity, oldScore.getScore(), oldScore.getRank(), oldScore.getCapacity()); // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newScore.equals(oldScore)); */ oldScore.deleteWithoutCommit(); newScore.storeWithoutCommit(); } assert(getScores(oldIdentity).size() == 0); // Delete all given scores: // Non-own identities do not assign scores to other identities so we can just delete them. for(Score oldScore : getGivenScores(oldIdentity)) { final Identity trustee = oldScore.getTrustee(); final boolean oldShouldFetchTrustee = shouldFetchIdentity(trustee); oldScore.deleteWithoutCommit(); // If the OwnIdentity which we are converting was the only source of trust to the trustee // of this Score value, the should-fetch state of the trustee might change to false. if(oldShouldFetchTrustee && shouldFetchIdentity(trustee) == false) { mFetcher.storeAbortFetchCommandWithoutCommit(trustee); } } assert(getGivenScores(oldIdentity).size() == 0); // Copy all given trusts: // We don't have to use the removeTrust/setTrust functions because the score graph does not need updating: // - To the rating of the converted identity in the score graphs of other own identities it is irrelevant // whether it is an own identity or not. The rating should never depend on whether it is an own identity! // - Non-own identities do not have a score graph. So the score graph of the converted identity is deleted // completely and therefore it does not need to be updated. for(Trust oldGivenTrust : getGivenTrusts(oldIdentity)) { Trust newGivenTrust; try { newGivenTrust = new Trust(this, newIdentity, oldGivenTrust.getTrustee(), oldGivenTrust.getValue(), oldGivenTrust.getComment()); } catch (InvalidParameterException e) { // The data was taken from the old Trust so this shouldn't happen throw new RuntimeException(e); } // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newGivenTrust.equals(oldGivenTrust)); */ oldGivenTrust.deleteWithoutCommit(); newGivenTrust.storeWithoutCommit(); } mPuzzleStore.onIdentityDeletion(oldIdentity); mFetcher.storeAbortFetchCommandWithoutCommit(oldIdentity); // NOTICE: // If the fetcher did store a db4o object reference to the identity, we would have to trigger command processing // now to prevent leakage of the identity object. // But the fetcher does NOT store a db4o object reference to the given identity. It stores its ID as String only. // Therefore, it is OK that the fetcher does not immediately process the commands now. oldIdentity.deleteWithoutCommit(); mFetcher.storeStartFetchCommandWithoutCommit(newIdentity); // This function messes with the score graph manually so it is a good idea to check whether it is intact before and afterwards. assert(computeAllScoresWithoutCommit()); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { Persistent.checkedRollbackAndThrow(mDB, this, e); } } } } Logger.normal(this, "deleteOwnIdentity(): Finished."); } /** * NOTICE: When changing this function, please also take care of {@link OwnIdentity.isRestoreInProgress()} */ public synchronized void restoreOwnIdentity(FreenetURI insertFreenetURI) throws MalformedURLException, InvalidParameterException { Logger.normal(this, "restoreOwnIdentity(): Starting... "); OwnIdentity identity; synchronized(mPuzzleStore) { synchronized(mFetcher) { synchronized(Persistent.transactionLock(mDB)) { try { long edition = 0; try { edition = Math.max(edition, insertFreenetURI.getEdition()); } catch(IllegalStateException e) { // The user supplied URI did not have an edition specified } try { // Try replacing an existing non-own version of the identity with an OwnIdentity Identity oldIdentity = getIdentityByURI(insertFreenetURI); if(oldIdentity instanceof OwnIdentity) throw new InvalidParameterException("There is already an own identity with the given URI pair."); Logger.normal(this, "Restoring an already known identity from Freenet: " + oldIdentity); // Normally, one would expect beginTrustListImport() to happen close to the actual trust list changes later on in this function. // But beginTrustListImport() contains an assert(computeAllScoresWithoutCommit()) and that call to the score computation reference // implementation will fail if two identities with the same ID exist. // This would be the case later on - we cannot delete the non-own version of the OwnIdentity before we modified the trust graph // but we must also store the own version to be able to modify the trust graph. beginTrustListImport(); // We already have fetched this identity as a stranger's one. We need to update the database. identity = new OwnIdentity(this, insertFreenetURI, oldIdentity.getNickname(), oldIdentity.doesPublishTrustList()); /* We re-fetch the most recent edition to make sure all trustees are imported */ edition = Math.max(edition, oldIdentity.getEdition()); identity.restoreEdition(edition, oldIdentity.getLastFetchedDate()); identity.setContexts(oldIdentity.getContexts()); identity.setProperties(oldIdentity.getProperties()); identity.storeWithoutCommit(); initTrustTreeWithoutCommit(identity); // Copy all received trusts. // We don't have to modify them because they are user-assigned values and the assignment // of the user does not change just because the type of the identity changes. for(Trust oldReceivedTrust : getReceivedTrusts(oldIdentity)) { Trust newReceivedTrust = new Trust(this, oldReceivedTrust.getTruster(), identity, oldReceivedTrust.getValue(), oldReceivedTrust.getComment()); // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newReceivedTrust.equals(oldReceivedTrust)); */ oldReceivedTrust.deleteWithoutCommit(); newReceivedTrust.storeWithoutCommit(); } assert(getReceivedTrusts(oldIdentity).size() == 0); // Copy all received scores. // We don't have to modify them because the rating of the identity from the perspective of a // different own identity should NOT be dependent upon whether it is an own identity or not. for(Score oldScore : getScores(oldIdentity)) { Score newScore = new Score(this, oldScore.getTruster(), identity, oldScore.getScore(), oldScore.getRank(), oldScore.getCapacity()); // The following assert() cannot be added because it would always fail: // It would implicitly trigger oldIdentity.equals(identity) which is not the case: // Certain member values such as the edition might not be equal. /* assert(newScore.equals(oldScore)); */ oldScore.deleteWithoutCommit(); newScore.storeWithoutCommit(); } assert(getScores(oldIdentity).size() == 0); // What we do NOT have to deal with is the given scores of the old identity: // Given scores do NOT exist for non-own identities, so there are no old ones to update. // Of cause there WILL be scores because it is an own identity now. // They will be created automatically when updating the given trusts // - so thats what we will do now. // Update all given trusts for(Trust givenTrust : getGivenTrusts(oldIdentity)) { // TODO: Instead of using the regular removeTrustWithoutCommit on all trust values, we could: // - manually delete the old Trust objects from the database // - manually store the new trust objects // - Realize that only the trust graph of the restored identity needs to be updated and write an optimized version // of setTrustWithoutCommit which deals with that. // But before we do that, we should first do the existing possible optimization of removeTrustWithoutCommit: // To get rid of removeTrustWithoutCommit always triggering a FULL score recomputation and instead make // it only update the parts of the trust graph which are affected. // Maybe the optimized version is fast enough that we don't have to do the optimization which this TODO suggests. removeTrustWithoutCommit(givenTrust); setTrustWithoutCommit(identity, givenTrust.getTrustee(), givenTrust.getValue(), givenTrust.getComment()); } // We do not call finishTrustListImport() now: It might trigger execution of computeAllScoresWithoutCommit // which would re-create scores of the old identity. We later call it AFTER deleting the old identity. /* finishTrustListImport(); */ mPuzzleStore.onIdentityDeletion(oldIdentity); mFetcher.storeAbortFetchCommandWithoutCommit(oldIdentity); // NOTICE: // If the fetcher did store a db4o object reference to the identity, we would have to trigger command processing // now to prevent leakage of the identity object. // But the fetcher does NOT store a db4o object reference to the given identity. It stores its ID as String only. // Therefore, it is OK that the fetcher does not immediately process the commands now. oldIdentity.deleteWithoutCommit(); finishTrustListImport(); } catch (UnknownIdentityException e) { // The identity did NOT exist as non-own identity yet so we can just create an OwnIdentity and store it. identity = new OwnIdentity(this, insertFreenetURI, null, false); Logger.normal(this, "Restoring not-yet-known identity from Freenet: " + identity); identity.restoreEdition(edition, null); // Store the new identity identity.storeWithoutCommit(); initTrustTreeWithoutCommit(identity); } mFetcher.storeStartFetchCommandWithoutCommit(identity); // This function messes with the trust graph manually so it is a good idea to check whether it is intact afterwards. assert(computeAllScoresWithoutCommit()); Persistent.checkedCommit(mDB, this); } catch(RuntimeException e) { abortTrustListImport(e); Persistent.checkedRollbackAndThrow(mDB, this, e); } } } } Logger.normal(this, "restoreOwnIdentity(): Finished."); } public synchronized void setTrust(String ownTrusterID, String trusteeID, byte value, String comment) throws UnknownIdentityException, NumberFormatException, InvalidParameterException { final OwnIdentity truster = getOwnIdentityByID(ownTrusterID); Identity trustee = getIdentityByID(trusteeID); setTrust(truster, trustee, value, comment); } public synchronized void removeTrust(String ownTrusterID, String trusteeID) throws UnknownIdentityException { final OwnIdentity truster = getOwnIdentityByID(ownTrusterID); final Identity trustee = getIdentityByID(trusteeID); removeTrust(truster, trustee); } public synchronized void addContext(String ownIdentityID, String newContext) throws UnknownIdentityException, InvalidParameterException { final Identity identity = getOwnIdentityByID(ownIdentityID); identity.addContext(newContext); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Added context '" + newContext + "' to identity '" + identity.getNickname() + "'"); } public synchronized void removeContext(String ownIdentityID, String context) throws UnknownIdentityException, InvalidParameterException { final Identity identity = getOwnIdentityByID(ownIdentityID); identity.removeContext(context); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Removed context '" + context + "' from identity '" + identity.getNickname() + "'"); } public synchronized String getProperty(String identityID, String property) throws InvalidParameterException, UnknownIdentityException { return getIdentityByID(identityID).getProperty(property); } public synchronized void setProperty(String ownIdentityID, String property, String value) throws UnknownIdentityException, InvalidParameterException { Identity identity = getOwnIdentityByID(ownIdentityID); identity.setProperty(property, value); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Added property '" + property + "=" + value + "' to identity '" + identity.getNickname() + "'"); } public synchronized void removeProperty(String ownIdentityID, String property) throws UnknownIdentityException, InvalidParameterException { final Identity identity = getOwnIdentityByID(ownIdentityID); identity.removeProperty(property); identity.storeAndCommit(); if(logDEBUG) Logger.debug(this, "Removed property '" + property + "' from identity '" + identity.getNickname() + "'"); } public String getVersion() { return Version.getMarketingVersion(); } public long getRealVersion() { return Version.getRealVersion(); } public String getString(String key) { return getBaseL10n().getString(key); } public void setLanguage(LANGUAGE newLanguage) { WebOfTrust.l10n = new PluginL10n(this, newLanguage); if(logDEBUG) Logger.debug(this, "Set LANGUAGE to: " + newLanguage.isoCode); } public PluginRespirator getPluginRespirator() { return mPR; } public ExtObjectContainer getDatabase() { return mDB; } public Configuration getConfig() { return mConfig; } public IdentityFetcher getIdentityFetcher() { return mFetcher; } public XMLTransformer getXMLTransformer() { return mXMLTransformer; } public IntroductionPuzzleStore getIntroductionPuzzleStore() { return mPuzzleStore; } public IntroductionClient getIntroductionClient() { return mIntroductionClient; } public RequestClient getRequestClient() { return mRequestClient; } /** * This is where our L10n files are stored. * @return Path of our L10n files. */ public String getL10nFilesBasePath() { return "plugins/WebOfTrust/l10n/"; } /** * This is the mask of our L10n files : lang_en.l10n, lang_de.10n, ... * @return Mask of the L10n files. */ public String getL10nFilesMask() { return "lang_${lang}.l10n"; } /** * Override L10n files are stored on the disk, their names should be explicit * we put here the plugin name, and the "override" indication. Plugin L10n * override is not implemented in the node yet. * @return Mask of the override L10n files. */ public String getL10nOverrideFilesMask() { return "WebOfTrust_lang_${lang}.override.l10n"; } /** * Get the ClassLoader of this plugin. This is necessary when getting * resources inside the plugin's Jar, for example L10n files. * @return ClassLoader object */ public ClassLoader getPluginClassLoader() { return WebOfTrust.class.getClassLoader(); } /** * Access to the current L10n data. * * @return L10n object. */ public BaseL10n getBaseL10n() { return WebOfTrust.l10n.getBase(); } public int getNumberOfFullScoreRecomputations() { return mFullScoreRecomputationCount; } public synchronized double getAverageFullScoreRecomputationTime() { return (double)mFullScoreRecomputationMilliseconds / ((mFullScoreRecomputationCount!= 0 ? mFullScoreRecomputationCount : 1) * 1000); } public int getNumberOfIncrementalScoreRecomputations() { return mIncrementalScoreRecomputationCount; } public synchronized double getAverageIncrementalScoreRecomputationTime() { return (double)mIncrementalScoreRecomputationMilliseconds / ((mIncrementalScoreRecomputationCount!= 0 ? mIncrementalScoreRecomputationCount : 1) * 1000); } /** * Tests whether two WoT are equal. * This is a complex operation in terms of execution time and memory usage and only intended for being used in unit tests. */ public synchronized boolean equals(Object obj) { if(obj == this) return true; if(!(obj instanceof WebOfTrust)) return false; WebOfTrust other = (WebOfTrust)obj; synchronized(other) { { // Compare own identities final ObjectSet<OwnIdentity> allIdentities = getAllOwnIdentities(); if(allIdentities.size() != other.getAllOwnIdentities().size()) return false; for(OwnIdentity identity : allIdentities) { try { if(!identity.equals(other.getOwnIdentityByID(identity.getID()))) return false; } catch(UnknownIdentityException e) { return false; } } } { // Compare identities final ObjectSet<Identity> allIdentities = getAllIdentities(); if(allIdentities.size() != other.getAllIdentities().size()) return false; for(Identity identity : allIdentities) { try { if(!identity.equals(other.getIdentityByID(identity.getID()))) return false; } catch(UnknownIdentityException e) { return false; } } } { // Compare trusts final ObjectSet<Trust> allTrusts = getAllTrusts(); if(allTrusts.size() != other.getAllTrusts().size()) return false; for(Trust trust : allTrusts) { try { Identity otherTruster = other.getIdentityByID(trust.getTruster().getID()); Identity otherTrustee = other.getIdentityByID(trust.getTrustee().getID()); if(!trust.equals(other.getTrust(otherTruster, otherTrustee))) return false; } catch(UnknownIdentityException e) { return false; } catch(NotTrustedException e) { return false; } } } { // Compare scores final ObjectSet<Score> allScores = getAllScores(); if(allScores.size() != other.getAllScores().size()) return false; for(Score score : allScores) { try { OwnIdentity otherTruster = other.getOwnIdentityByID(score.getTruster().getID()); Identity otherTrustee = other.getIdentityByID(score.getTrustee().getID()); if(!score.equals(other.getScore(otherTruster, otherTrustee))) return false; } catch(UnknownIdentityException e) { return false; } catch(NotInTrustTreeException e) { return false; } } } } return true; } }
diff --git a/openFaces/source/org/openfaces/renderkit/table/SelectAllCheckboxRenderer.java b/openFaces/source/org/openfaces/renderkit/table/SelectAllCheckboxRenderer.java index 4b4f60fbc..9c2893053 100644 --- a/openFaces/source/org/openfaces/renderkit/table/SelectAllCheckboxRenderer.java +++ b/openFaces/source/org/openfaces/renderkit/table/SelectAllCheckboxRenderer.java @@ -1,100 +1,100 @@ /* * OpenFaces - JSF Component Library 2.0 * Copyright (C) 2007-2009, TeamDev Ltd. * [email protected] * Unless agreed in writing the contents of this file are subject to * the GNU Lesser General Public License Version 2.1 (the "LGPL" License). * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * Please visit http://openfaces.org/licensing/ for more details. */ package org.openfaces.renderkit.table; import org.openfaces.component.table.AbstractTable; import org.openfaces.component.table.AbstractTableSelection; import org.openfaces.component.table.BaseColumn; import org.openfaces.component.table.CheckboxColumn; import org.openfaces.renderkit.RendererBase; import org.openfaces.renderkit.TableUtil; import org.openfaces.util.RenderingUtil; import org.openfaces.util.ResourceUtil; import org.openfaces.util.ScriptBuilder; import org.openfaces.util.StyleUtil; import javax.faces.component.UIComponent; import javax.faces.context.FacesContext; import javax.faces.context.ResponseWriter; import java.io.IOException; /** * @author Dmitry Pikhulya */ public class SelectAllCheckboxRenderer extends RendererBase { @Override public void encodeChildren(FacesContext context, UIComponent component) throws IOException { if (!component.isRendered()) return; AbstractTable table = getTable(component); if (table == null) throw new IllegalStateException("SelectionColumn must be nested inside a table"); BaseColumn col = getColumn(component); boolean checkBoxColHeader = col instanceof CheckboxColumn; AbstractTableSelection selection; if (!checkBoxColHeader) { selection = table.getSelection(); if (selection == null) throw new IllegalStateException("<o:selectAllCheckbox> can only be inserted into a DataTable or TreeTable with multiple selection. clientId = " + component.getClientId(context)); boolean multipleRowSelection = selection.isMultipleSelectionAllowed(); if (!multipleRowSelection) throw new IllegalStateException("<o:selectAllCheckbox> can only be inserted into a DataTable or TreeTable with multiple selection. clientId = " + component.getClientId(context)); } else selection = null; ResponseWriter writer = context.getResponseWriter(); writer.startElement("input", component); writeIdAttribute(context, component); if (!checkBoxColHeader) { if (!selection.isEnabled()) writer.writeAttribute("disabled", "disabled", null); } writer.writeAttribute("type", "checkbox", null); ScriptBuilder buf = new ScriptBuilder(); if (checkBoxColHeader) { - buf.initScript(context, component, "O$.Table._initCheckboxColHeader", col).semicolon(); + buf.initScript(context, component, "O$.Table._initCheckboxColHeader", table, col).semicolon(); } else { - buf.initScript(context, component, "O$.Table._initSelectionHeader").semicolon(); + buf.initScript(context, component, "O$.Table._initSelectionHeader", table).semicolon(); } RenderingUtil.renderInitScript(context, buf, new String[]{ ResourceUtil.getUtilJsURL(context), TableUtil.getTableUtilJsURL(context), AbstractTableRenderer.getTableJsURL(context) }); StyleUtil.renderStyleClasses(context, component); writer.endElement("input"); } @Override public boolean getRendersChildren() { return true; } private static AbstractTable getTable(UIComponent header) { for (UIComponent component = header.getParent(); component != null; component = component.getParent()) if (component instanceof AbstractTable) return (AbstractTable) component; return null; } private static BaseColumn getColumn(UIComponent header) { for (UIComponent component = header.getParent(); component != null; component = component.getParent()) if (component instanceof BaseColumn) return (BaseColumn) component; return null; } }
false
true
public void encodeChildren(FacesContext context, UIComponent component) throws IOException { if (!component.isRendered()) return; AbstractTable table = getTable(component); if (table == null) throw new IllegalStateException("SelectionColumn must be nested inside a table"); BaseColumn col = getColumn(component); boolean checkBoxColHeader = col instanceof CheckboxColumn; AbstractTableSelection selection; if (!checkBoxColHeader) { selection = table.getSelection(); if (selection == null) throw new IllegalStateException("<o:selectAllCheckbox> can only be inserted into a DataTable or TreeTable with multiple selection. clientId = " + component.getClientId(context)); boolean multipleRowSelection = selection.isMultipleSelectionAllowed(); if (!multipleRowSelection) throw new IllegalStateException("<o:selectAllCheckbox> can only be inserted into a DataTable or TreeTable with multiple selection. clientId = " + component.getClientId(context)); } else selection = null; ResponseWriter writer = context.getResponseWriter(); writer.startElement("input", component); writeIdAttribute(context, component); if (!checkBoxColHeader) { if (!selection.isEnabled()) writer.writeAttribute("disabled", "disabled", null); } writer.writeAttribute("type", "checkbox", null); ScriptBuilder buf = new ScriptBuilder(); if (checkBoxColHeader) { buf.initScript(context, component, "O$.Table._initCheckboxColHeader", col).semicolon(); } else { buf.initScript(context, component, "O$.Table._initSelectionHeader").semicolon(); } RenderingUtil.renderInitScript(context, buf, new String[]{ ResourceUtil.getUtilJsURL(context), TableUtil.getTableUtilJsURL(context), AbstractTableRenderer.getTableJsURL(context) }); StyleUtil.renderStyleClasses(context, component); writer.endElement("input"); }
public void encodeChildren(FacesContext context, UIComponent component) throws IOException { if (!component.isRendered()) return; AbstractTable table = getTable(component); if (table == null) throw new IllegalStateException("SelectionColumn must be nested inside a table"); BaseColumn col = getColumn(component); boolean checkBoxColHeader = col instanceof CheckboxColumn; AbstractTableSelection selection; if (!checkBoxColHeader) { selection = table.getSelection(); if (selection == null) throw new IllegalStateException("<o:selectAllCheckbox> can only be inserted into a DataTable or TreeTable with multiple selection. clientId = " + component.getClientId(context)); boolean multipleRowSelection = selection.isMultipleSelectionAllowed(); if (!multipleRowSelection) throw new IllegalStateException("<o:selectAllCheckbox> can only be inserted into a DataTable or TreeTable with multiple selection. clientId = " + component.getClientId(context)); } else selection = null; ResponseWriter writer = context.getResponseWriter(); writer.startElement("input", component); writeIdAttribute(context, component); if (!checkBoxColHeader) { if (!selection.isEnabled()) writer.writeAttribute("disabled", "disabled", null); } writer.writeAttribute("type", "checkbox", null); ScriptBuilder buf = new ScriptBuilder(); if (checkBoxColHeader) { buf.initScript(context, component, "O$.Table._initCheckboxColHeader", table, col).semicolon(); } else { buf.initScript(context, component, "O$.Table._initSelectionHeader", table).semicolon(); } RenderingUtil.renderInitScript(context, buf, new String[]{ ResourceUtil.getUtilJsURL(context), TableUtil.getTableUtilJsURL(context), AbstractTableRenderer.getTableJsURL(context) }); StyleUtil.renderStyleClasses(context, component); writer.endElement("input"); }
diff --git a/src/com/vaadin/terminal/gwt/client/ui/VScrollTable.java b/src/com/vaadin/terminal/gwt/client/ui/VScrollTable.java index 61e35a28b..430fd0e6a 100644 --- a/src/com/vaadin/terminal/gwt/client/ui/VScrollTable.java +++ b/src/com/vaadin/terminal/gwt/client/ui/VScrollTable.java @@ -1,3015 +1,3016 @@ /* @ITMillApache2LicenseForJavaFiles@ */ package com.vaadin.terminal.gwt.client.ui; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Set; import com.google.gwt.dom.client.Document; import com.google.gwt.dom.client.NodeList; import com.google.gwt.dom.client.TableCellElement; import com.google.gwt.dom.client.TableRowElement; import com.google.gwt.dom.client.TableSectionElement; import com.google.gwt.event.dom.client.ScrollEvent; import com.google.gwt.event.dom.client.ScrollHandler; import com.google.gwt.user.client.Command; import com.google.gwt.user.client.DOM; import com.google.gwt.user.client.DeferredCommand; import com.google.gwt.user.client.Element; import com.google.gwt.user.client.Event; import com.google.gwt.user.client.Timer; import com.google.gwt.user.client.Window; import com.google.gwt.user.client.ui.FlowPanel; import com.google.gwt.user.client.ui.Panel; import com.google.gwt.user.client.ui.RootPanel; import com.google.gwt.user.client.ui.ScrollPanel; import com.google.gwt.user.client.ui.Widget; import com.vaadin.terminal.gwt.client.ApplicationConnection; import com.vaadin.terminal.gwt.client.BrowserInfo; import com.vaadin.terminal.gwt.client.Container; import com.vaadin.terminal.gwt.client.MouseEventDetails; import com.vaadin.terminal.gwt.client.Paintable; import com.vaadin.terminal.gwt.client.RenderSpace; import com.vaadin.terminal.gwt.client.UIDL; import com.vaadin.terminal.gwt.client.Util; import com.vaadin.terminal.gwt.client.ui.VScrollTable.VScrollTableBody.VScrollTableRow; /** * VScrollTable * * VScrollTable is a FlowPanel having two widgets in it: * TableHead component * * ScrollPanel * * TableHead contains table's header and widgets + logic for resizing, * reordering and hiding columns. * * ScrollPanel contains VScrollTableBody object which handles content. To save * some bandwidth and to improve clients responsiveness with loads of data, in * VScrollTableBody all rows are not necessary rendered. There are "spacers" in * VScrollTableBody to use the exact same space as non-rendered rows would use. * This way we can use seamlessly traditional scrollbars and scrolling to fetch * more rows instead of "paging". * * In VScrollTable we listen to scroll events. On horizontal scrolling we also * update TableHeads scroll position which has its scrollbars hidden. On * vertical scroll events we will check if we are reaching the end of area where * we have rows rendered and * * TODO implement unregistering for child components in Cells */ public class VScrollTable extends FlowPanel implements Table, ScrollHandler { public static final String CLASSNAME = "v-table"; public static final String ITEM_CLICK_EVENT_ID = "itemClick"; private static final double CACHE_RATE_DEFAULT = 2; /** * multiple of pagelength which component will cache when requesting more * rows */ private double cache_rate = CACHE_RATE_DEFAULT; /** * fraction of pageLenght which can be scrolled without making new request */ private double cache_react_rate = 0.75 * cache_rate; public static final char ALIGN_CENTER = 'c'; public static final char ALIGN_LEFT = 'b'; public static final char ALIGN_RIGHT = 'e'; private int firstRowInViewPort = 0; private int pageLength = 15; private int lastRequestedFirstvisible = 0; // to detect "serverside scroll" private boolean showRowHeaders = false; private String[] columnOrder; private ApplicationConnection client; private String paintableId; private boolean immediate; private int selectMode = Table.SELECT_MODE_NONE; private final HashSet<String> selectedRowKeys = new HashSet<String>(); private boolean initializedAndAttached = false; /** * Flag to indicate if a column width recalculation is needed due update. */ private boolean headerChangedDuringUpdate = false; private final TableHead tHead = new TableHead(); private final ScrollPanel bodyContainer = new ScrollPanel(); private int totalRows; private Set<String> collapsedColumns; private final RowRequestHandler rowRequestHandler; private VScrollTableBody scrollBody; private int firstvisible = 0; private boolean sortAscending; private String sortColumn; private boolean columnReordering; /** * This map contains captions and icon urls for actions like: * "33_c" -> * "Edit" * "33_i" -> "http://dom.com/edit.png" */ private final HashMap<Object, String> actionMap = new HashMap<Object, String>(); private String[] visibleColOrder; private boolean initialContentReceived = false; private Element scrollPositionElement; private boolean enabled; private boolean showColHeaders; /** flag to indicate that table body has changed */ private boolean isNewBody = true; /* * Read from the "recalcWidths" -attribute. When it is true, the table will * recalculate the widths for columns - desirable in some cases. For #1983, * marked experimental. */ boolean recalcWidths = false; private final ArrayList<Panel> lazyUnregistryBag = new ArrayList<Panel>(); private String height; private String width = ""; private boolean rendering = false; public VScrollTable() { bodyContainer.addScrollHandler(this); bodyContainer.setStyleName(CLASSNAME + "-body"); setStyleName(CLASSNAME); add(tHead); add(bodyContainer); rowRequestHandler = new RowRequestHandler(); } @SuppressWarnings("unchecked") public void updateFromUIDL(UIDL uidl, ApplicationConnection client) { rendering = true; if (client.updateComponent(this, uidl, true)) { rendering = false; return; } // we may have pending cache row fetch, cancel it. See #2136 rowRequestHandler.cancel(); enabled = !uidl.hasAttribute("disabled"); this.client = client; paintableId = uidl.getStringAttribute("id"); immediate = uidl.getBooleanAttribute("immediate"); final int newTotalRows = uidl.getIntAttribute("totalrows"); if (newTotalRows != totalRows) { if (scrollBody != null) { if (totalRows == 0) { tHead.clear(); } initializedAndAttached = false; initialContentReceived = false; isNewBody = true; } totalRows = newTotalRows; } setCacheRate(uidl.hasAttribute("cr") ? uidl.getDoubleAttribute("cr") : CACHE_RATE_DEFAULT); recalcWidths = uidl.hasAttribute("recalcWidths"); if (uidl.hasAttribute("pagelength")) { pageLength = uidl.getIntAttribute("pagelength"); } else { // pagelenght is "0" meaning scrolling is turned off pageLength = totalRows; } firstvisible = uidl.hasVariable("firstvisible") ? uidl .getIntVariable("firstvisible") : 0; if (firstvisible != lastRequestedFirstvisible && scrollBody != null) { // received 'surprising' firstvisible from server: scroll there firstRowInViewPort = firstvisible; bodyContainer.setScrollPosition(firstvisible * scrollBody.getRowHeight()); } showRowHeaders = uidl.getBooleanAttribute("rowheaders"); showColHeaders = uidl.getBooleanAttribute("colheaders"); if (uidl.hasVariable("sortascending")) { sortAscending = uidl.getBooleanVariable("sortascending"); sortColumn = uidl.getStringVariable("sortcolumn"); } if (uidl.hasVariable("selected")) { final Set<String> selectedKeys = uidl .getStringArrayVariableAsSet("selected"); selectedRowKeys.clear(); for (String string : selectedKeys) { selectedRowKeys.add(string); } } if (uidl.hasAttribute("selectmode")) { if (uidl.getBooleanAttribute("readonly")) { selectMode = Table.SELECT_MODE_NONE; } else if (uidl.getStringAttribute("selectmode").equals("multi")) { selectMode = Table.SELECT_MODE_MULTI; } else if (uidl.getStringAttribute("selectmode").equals("single")) { selectMode = Table.SELECT_MODE_SINGLE; } else { selectMode = Table.SELECT_MODE_NONE; } } if (uidl.hasVariable("columnorder")) { columnReordering = true; columnOrder = uidl.getStringArrayVariable("columnorder"); } if (uidl.hasVariable("collapsedcolumns")) { tHead.setColumnCollapsingAllowed(true); collapsedColumns = uidl .getStringArrayVariableAsSet("collapsedcolumns"); } else { tHead.setColumnCollapsingAllowed(false); } UIDL rowData = null; for (final Iterator it = uidl.getChildIterator(); it.hasNext();) { final UIDL c = (UIDL) it.next(); if (c.getTag().equals("rows")) { rowData = c; } else if (c.getTag().equals("actions")) { updateActionMap(c); } else if (c.getTag().equals("visiblecolumns")) { tHead.updateCellsFromUIDL(c); } } updateHeader(uidl.getStringArrayAttribute("vcolorder")); if (!recalcWidths && initializedAndAttached) { updateBody(rowData, uidl.getIntAttribute("firstrow"), uidl .getIntAttribute("rows")); if (headerChangedDuringUpdate) { lazyAdjustColumnWidths.schedule(1); } else { // webkits may still bug with their disturbing scrollbar bug, // See #3457 // run overflow fix for scrollable area DeferredCommand.addCommand(new Command() { public void execute() { Util.runWebkitOverflowAutoFix(bodyContainer .getElement()); } }); } } else { if (scrollBody != null) { scrollBody.removeFromParent(); lazyUnregistryBag.add(scrollBody); } scrollBody = new VScrollTableBody(); scrollBody.renderInitialRows(rowData, uidl .getIntAttribute("firstrow"), uidl.getIntAttribute("rows")); bodyContainer.add(scrollBody); initialContentReceived = true; if (isAttached()) { sizeInit(); } scrollBody.restoreRowVisibility(); } if (selectMode == Table.SELECT_MODE_NONE) { scrollBody.addStyleName(CLASSNAME + "-body-noselection"); } else { scrollBody.removeStyleName(CLASSNAME + "-body-noselection"); } hideScrollPositionAnnotation(); purgeUnregistryBag(); rendering = false; headerChangedDuringUpdate = false; } private void setCacheRate(double d) { if (cache_rate != d) { cache_rate = d; cache_react_rate = 0.75 * d; } } /** * Unregisters Paintables in "trashed" HasWidgets (IScrollTableBodys or * IScrollTableRows). This is done lazily as Table must survive from * "subtreecaching" logic. */ private void purgeUnregistryBag() { for (Iterator<Panel> iterator = lazyUnregistryBag.iterator(); iterator .hasNext();) { client.unregisterChildPaintables(iterator.next()); } lazyUnregistryBag.clear(); } private void updateActionMap(UIDL c) { final Iterator<?> it = c.getChildIterator(); while (it.hasNext()) { final UIDL action = (UIDL) it.next(); final String key = action.getStringAttribute("key"); final String caption = action.getStringAttribute("caption"); actionMap.put(key + "_c", caption); if (action.hasAttribute("icon")) { // TODO need some uri handling ?? actionMap.put(key + "_i", client.translateVaadinUri(action .getStringAttribute("icon"))); } } } public String getActionCaption(String actionKey) { return actionMap.get(actionKey + "_c"); } public String getActionIcon(String actionKey) { return actionMap.get(actionKey + "_i"); } private void updateHeader(String[] strings) { if (strings == null) { return; } int visibleCols = strings.length; int colIndex = 0; if (showRowHeaders) { tHead.enableColumn("0", colIndex); visibleCols++; visibleColOrder = new String[visibleCols]; visibleColOrder[colIndex] = "0"; colIndex++; } else { visibleColOrder = new String[visibleCols]; tHead.removeCell("0"); } int i; for (i = 0; i < strings.length; i++) { final String cid = strings[i]; visibleColOrder[colIndex] = cid; tHead.enableColumn(cid, colIndex); colIndex++; } tHead.setVisible(showColHeaders); } /** * @param uidl * which contains row data * @param firstRow * first row in data set * @param reqRows * amount of rows in data set */ private void updateBody(UIDL uidl, int firstRow, int reqRows) { if (uidl == null || reqRows < 1) { // container is empty, remove possibly existing rows if (firstRow < 0) { while (scrollBody.getLastRendered() > scrollBody.firstRendered) { scrollBody.unlinkRow(false); } scrollBody.unlinkRow(false); } return; } scrollBody.renderRows(uidl, firstRow, reqRows); final int optimalFirstRow = (int) (firstRowInViewPort - pageLength * cache_rate); boolean cont = true; while (cont && scrollBody.getLastRendered() > optimalFirstRow && scrollBody.getFirstRendered() < optimalFirstRow) { // client.console.log("removing row from start"); cont = scrollBody.unlinkRow(true); } final int optimalLastRow = (int) (firstRowInViewPort + pageLength + pageLength * cache_rate); cont = true; while (cont && scrollBody.getLastRendered() > optimalLastRow) { // client.console.log("removing row from the end"); cont = scrollBody.unlinkRow(false); } scrollBody.fixSpacers(); scrollBody.restoreRowVisibility(); } /** * Gives correct column index for given column key ("cid" in UIDL). * * @param colKey * @return column index of visible columns, -1 if column not visible */ private int getColIndexByKey(String colKey) { // return 0 if asked for rowHeaders if ("0".equals(colKey)) { return 0; } for (int i = 0; i < visibleColOrder.length; i++) { if (visibleColOrder[i].equals(colKey)) { return i; } } return -1; } private boolean isCollapsedColumn(String colKey) { if (collapsedColumns == null) { return false; } if (collapsedColumns.contains(colKey)) { return true; } return false; } private String getColKeyByIndex(int index) { return tHead.getHeaderCell(index).getColKey(); } private void setColWidth(int colIndex, int w, boolean isDefinedWidth) { final HeaderCell cell = tHead.getHeaderCell(colIndex); cell.setWidth(w, isDefinedWidth); scrollBody.setColWidth(colIndex, w); } private int getColWidth(String colKey) { return tHead.getHeaderCell(colKey).getWidth(); } private VScrollTableRow getRenderedRowByKey(String key) { final Iterator<Widget> it = scrollBody.iterator(); VScrollTableRow r = null; while (it.hasNext()) { r = (VScrollTableRow) it.next(); if (r.getKey().equals(key)) { return r; } } return null; } private void reOrderColumn(String columnKey, int newIndex) { final int oldIndex = getColIndexByKey(columnKey); // Change header order tHead.moveCell(oldIndex, newIndex); // Change body order scrollBody.moveCol(oldIndex, newIndex); /* * Build new columnOrder and update it to server Note that columnOrder * also contains collapsed columns so we cannot directly build it from * cells vector Loop the old columnOrder and append in order to new * array unless on moved columnKey. On new index also put the moved key * i == index on columnOrder, j == index on newOrder */ final String oldKeyOnNewIndex = visibleColOrder[newIndex]; if (showRowHeaders) { newIndex--; // columnOrder don't have rowHeader } // add back hidden rows, for (int i = 0; i < columnOrder.length; i++) { if (columnOrder[i].equals(oldKeyOnNewIndex)) { break; // break loop at target } if (isCollapsedColumn(columnOrder[i])) { newIndex++; } } // finally we can build the new columnOrder for server final String[] newOrder = new String[columnOrder.length]; for (int i = 0, j = 0; j < newOrder.length; i++) { if (j == newIndex) { newOrder[j] = columnKey; j++; } if (i == columnOrder.length) { break; } if (columnOrder[i].equals(columnKey)) { continue; } newOrder[j] = columnOrder[i]; j++; } columnOrder = newOrder; // also update visibleColumnOrder int i = showRowHeaders ? 1 : 0; for (int j = 0; j < newOrder.length; j++) { final String cid = newOrder[j]; if (!isCollapsedColumn(cid)) { visibleColOrder[i++] = cid; } } client.updateVariable(paintableId, "columnorder", columnOrder, false); } @Override protected void onAttach() { super.onAttach(); if (initialContentReceived) { sizeInit(); } } @Override protected void onDetach() { rowRequestHandler.cancel(); super.onDetach(); // ensure that scrollPosElement will be detached if (scrollPositionElement != null) { final Element parent = DOM.getParent(scrollPositionElement); if (parent != null) { DOM.removeChild(parent, scrollPositionElement); } } } /** * Run only once when component is attached and received its initial * content. This function : * Syncs headers and bodys "natural widths and * saves the values. * Sets proper width and height * Makes deferred request * to get some cache rows */ private void sizeInit() { /* * We will use browsers table rendering algorithm to find proper column * widths. If content and header take less space than available, we will * divide extra space relatively to each column which has not width set. * * Overflow pixels are added to last column. */ Iterator<Widget> headCells = tHead.iterator(); int i = 0; int totalExplicitColumnsWidths = 0; int total = 0; float expandRatioDivider = 0; final int[] widths = new int[tHead.visibleCells.size()]; tHead.enableBrowserIntelligence(); // first loop: collect natural widths while (headCells.hasNext()) { final HeaderCell hCell = (HeaderCell) headCells.next(); int w = hCell.getWidth(); if (hCell.isDefinedWidth()) { // server has defined column width explicitly totalExplicitColumnsWidths += w; } else { if (hCell.getExpandRatio() > 0) { expandRatioDivider += hCell.getExpandRatio(); w = 0; } else { // get and store greater of header width and column width, // and // store it as a minimumn natural col width w = hCell.getNaturalColumnWidth(i); } hCell.setNaturalMinimumColumnWidth(w); } widths[i] = w; total += w; i++; } tHead.disableBrowserIntelligence(); boolean willHaveScrollbarz = willHaveScrollbars(); // fix "natural" width if width not set if (width == null || "".equals(width)) { int w = total; w += scrollBody.getCellExtraWidth() * visibleColOrder.length; if (willHaveScrollbarz) { w += Util.getNativeScrollbarSize(); } setContentWidth(w); } int availW = scrollBody.getAvailableWidth(); if (BrowserInfo.get().isIE()) { // Hey IE, are you really sure about this? availW = scrollBody.getAvailableWidth(); } availW -= scrollBody.getCellExtraWidth() * visibleColOrder.length; if (willHaveScrollbarz) { availW -= Util.getNativeScrollbarSize(); } // TODO refactor this code to be the same as in resize timer boolean needsReLayout = false; if (availW > total) { // natural size is smaller than available space final int extraSpace = availW - total; final int totalWidthR = total - totalExplicitColumnsWidths; needsReLayout = true; if (expandRatioDivider > 0) { // visible columns have some active expand ratios, excess // space is divided according to them headCells = tHead.iterator(); i = 0; while (headCells.hasNext()) { HeaderCell hCell = (HeaderCell) headCells.next(); if (hCell.getExpandRatio() > 0) { int w = widths[i]; final int newSpace = (int) (extraSpace * (hCell .getExpandRatio() / expandRatioDivider)); w += newSpace; widths[i] = w; } i++; } } else if (totalWidthR > 0) { // no expand ratios defined, we will share extra space // relatively to "natural widths" among those without // explicit width headCells = tHead.iterator(); i = 0; while (headCells.hasNext()) { HeaderCell hCell = (HeaderCell) headCells.next(); if (!hCell.isDefinedWidth()) { int w = widths[i]; final int newSpace = extraSpace * w / totalWidthR; w += newSpace; widths[i] = w; } i++; } } } else { // bodys size will be more than available and scrollbar will appear } // last loop: set possibly modified values or reset if new tBody i = 0; headCells = tHead.iterator(); while (headCells.hasNext()) { final HeaderCell hCell = (HeaderCell) headCells.next(); if (isNewBody || hCell.getWidth() == -1) { final int w = widths[i]; setColWidth(i, w, false); } i++; } initializedAndAttached = true; if (needsReLayout) { scrollBody.reLayoutComponents(); } updatePageLength(); /* * Fix "natural" height if height is not set. This must be after width * fixing so the components' widths have been adjusted. */ if (height == null || "".equals(height)) { /* * We must force an update of the row height as this point as it * might have been (incorrectly) calculated earlier */ if (pageLength == totalRows) { /* * A hack to support variable height rows when paging is off. * Generally this is not supported by scrolltable. We want to * show all rows so the bodyHeight should be equal to the table * height. */ // int bodyHeight = scrollBody.getOffsetHeight(); int bodyHeight = scrollBody.getRequiredHeight(); bodyContainer.setHeight(bodyHeight + "px"); Util.runWebkitOverflowAutoFix(bodyContainer.getElement()); } else { int bodyHeight = (scrollBody.getRowHeight(true) * pageLength); bodyContainer.setHeight(bodyHeight + "px"); } } isNewBody = false; if (firstvisible > 0) { // Deferred due some Firefox oddities. IE & Safari could survive // without DeferredCommand.addCommand(new Command() { public void execute() { bodyContainer.setScrollPosition(firstvisible * scrollBody.getRowHeight()); firstRowInViewPort = firstvisible; } }); } if (enabled) { // Do we need cache rows if (scrollBody.getLastRendered() + 1 < firstRowInViewPort + pageLength + (int) cache_react_rate * pageLength) { if (totalRows - 1 > scrollBody.getLastRendered()) { // fetch cache rows int firstInNewSet = scrollBody.getLastRendered() + 1; rowRequestHandler.setReqFirstRow(firstInNewSet); int lastInNewSet = (int) (firstRowInViewPort + pageLength + cache_rate * pageLength); if (lastInNewSet > totalRows - 1) { lastInNewSet = totalRows - 1; } - rowRequestHandler.setReqRows(lastInNewSet - firstInNewSet); + rowRequestHandler.setReqRows(lastInNewSet - firstInNewSet + + 1); rowRequestHandler.deferRowFetch(1); } } } } /** * Note, this method is not official api although declared as protected. * Extend at you own risk. * * @return true if content area will have scrollbars visible. */ protected boolean willHaveScrollbars() { if (!(height != null && !height.equals(""))) { if (pageLength < totalRows) { return true; } } else { int fakeheight = scrollBody.getRowHeight() * totalRows; int availableHeight = bodyContainer.getElement().getPropertyInt( "clientHeight"); if (fakeheight > availableHeight) { return true; } } return false; } private void announceScrollPosition() { if (scrollPositionElement == null) { scrollPositionElement = DOM.createDiv(); DOM.setElementProperty(scrollPositionElement, "className", CLASSNAME + "-scrollposition"); DOM .setStyleAttribute(scrollPositionElement, "position", "absolute"); DOM.appendChild(getElement(), scrollPositionElement); } DOM.setStyleAttribute(scrollPositionElement, "marginLeft", (DOM .getElementPropertyInt(getElement(), "offsetWidth") / 2 - 80) + "px"); DOM.setStyleAttribute(scrollPositionElement, "marginTop", -(DOM .getElementPropertyInt(bodyContainer.getElement(), "offsetHeight")) + "px"); // indexes go from 1-totalRows, as rowheaders in index-mode indicate int last = (firstRowInViewPort + pageLength); if (last > totalRows) { last = totalRows; } DOM.setInnerHTML(scrollPositionElement, "<span>" + (firstRowInViewPort + 1) + " &ndash; " + (last) + "..." + "</span>"); DOM.setStyleAttribute(scrollPositionElement, "display", "block"); } private void hideScrollPositionAnnotation() { if (scrollPositionElement != null) { DOM.setStyleAttribute(scrollPositionElement, "display", "none"); } } private class RowRequestHandler extends Timer { private int reqFirstRow = 0; private int reqRows = 0; public void deferRowFetch() { deferRowFetch(250); } public void deferRowFetch(int msec) { if (reqRows > 0 && reqFirstRow < totalRows) { schedule(msec); // tell scroll position to user if currently "visible" rows are // not rendered if ((firstRowInViewPort + pageLength > scrollBody .getLastRendered()) || (firstRowInViewPort < scrollBody.getFirstRendered())) { announceScrollPosition(); } else { hideScrollPositionAnnotation(); } } } public void setReqFirstRow(int reqFirstRow) { if (reqFirstRow < 0) { reqFirstRow = 0; } else if (reqFirstRow >= totalRows) { reqFirstRow = totalRows - 1; } this.reqFirstRow = reqFirstRow; } public void setReqRows(int reqRows) { this.reqRows = reqRows; } @Override public void run() { if (client.hasActiveRequest()) { // if client connection is busy, don't bother loading it more schedule(250); } else { int firstToBeRendered = scrollBody.firstRendered; if (reqFirstRow < firstToBeRendered) { firstToBeRendered = reqFirstRow; } else if (firstRowInViewPort - (int) (cache_rate * pageLength) > firstToBeRendered) { firstToBeRendered = firstRowInViewPort - (int) (cache_rate * pageLength); if (firstToBeRendered < 0) { firstToBeRendered = 0; } } int lastToBeRendered = scrollBody.lastRendered; if (reqFirstRow + reqRows - 1 > lastToBeRendered) { lastToBeRendered = reqFirstRow + reqRows - 1; } else if (firstRowInViewPort + pageLength + pageLength * cache_rate < lastToBeRendered) { lastToBeRendered = (firstRowInViewPort + pageLength + (int) (pageLength * cache_rate)); if (lastToBeRendered >= totalRows) { lastToBeRendered = totalRows - 1; } // due Safari 3.1 bug (see #2607), verify reqrows, original // problem unknown, but this should catch the issue if (reqFirstRow + reqRows - 1 > lastToBeRendered) { reqRows = lastToBeRendered - reqFirstRow; } } client.updateVariable(paintableId, "firstToBeRendered", firstToBeRendered, false); client.updateVariable(paintableId, "lastToBeRendered", lastToBeRendered, false); // remember which firstvisible we requested, in case the server // has // a differing opinion lastRequestedFirstvisible = firstRowInViewPort; client.updateVariable(paintableId, "firstvisible", firstRowInViewPort, false); client.updateVariable(paintableId, "reqfirstrow", reqFirstRow, false); client.updateVariable(paintableId, "reqrows", reqRows, true); } } public int getReqFirstRow() { return reqFirstRow; } public int getReqRows() { return reqRows; } /** * Sends request to refresh content at this position. */ public void refreshContent() { int first = (int) (firstRowInViewPort - pageLength * cache_rate); int reqRows = (int) (2 * pageLength * cache_rate + pageLength); if (first < 0) { reqRows = reqRows + first; first = 0; } setReqFirstRow(first); setReqRows(reqRows); run(); } } public class HeaderCell extends Widget { Element td = DOM.createTD(); Element captionContainer = DOM.createDiv(); Element colResizeWidget = DOM.createDiv(); Element floatingCopyOfHeaderCell; private boolean sortable = false; private final String cid; private boolean dragging; private int dragStartX; private int colIndex; private int originalWidth; private boolean isResizing; private int headerX; private boolean moved; private int closestSlot; private int width = -1; private int naturalWidth = -1; private char align = ALIGN_LEFT; boolean definedWidth = false; private float expandRatio = 0; public void setSortable(boolean b) { sortable = b; } public void setNaturalMinimumColumnWidth(int w) { naturalWidth = w; } public HeaderCell(String colId, String headerText) { cid = colId; DOM.setElementProperty(colResizeWidget, "className", CLASSNAME + "-resizer"); DOM.sinkEvents(colResizeWidget, Event.MOUSEEVENTS); setText(headerText); DOM.appendChild(td, colResizeWidget); DOM.setElementProperty(captionContainer, "className", CLASSNAME + "-caption-container"); // ensure no clipping initially (problem on column additions) DOM.setStyleAttribute(captionContainer, "overflow", "visible"); DOM.sinkEvents(captionContainer, Event.MOUSEEVENTS); DOM.appendChild(td, captionContainer); DOM.sinkEvents(td, Event.MOUSEEVENTS); setElement(td); } public void setWidth(int w, boolean ensureDefinedWidth) { if (ensureDefinedWidth) { definedWidth = true; // on column resize expand ratio becomes zero expandRatio = 0; } if (width == w) { return; } if (width == -1) { // go to default mode, clip content if necessary DOM.setStyleAttribute(captionContainer, "overflow", ""); } width = w; if (w == -1) { DOM.setStyleAttribute(captionContainer, "width", ""); setWidth(""); } else { captionContainer.getStyle().setPropertyPx("width", w); /* * if we already have tBody, set the header width properly, if * not defer it. IE will fail with complex float in table header * unless TD width is not explicitly set. */ if (scrollBody != null) { int tdWidth = width + scrollBody.getCellExtraWidth(); setWidth(tdWidth + "px"); } else { DeferredCommand.addCommand(new Command() { public void execute() { int tdWidth = width + scrollBody.getCellExtraWidth(); setWidth(tdWidth + "px"); } }); } } } public void setUndefinedWidth() { definedWidth = false; setWidth(-1, false); } /** * Detects if width is fixed by developer on server side or resized to * current width by user. * * @return true if defined, false if "natural" width */ public boolean isDefinedWidth() { return definedWidth; } public int getWidth() { return width; } public void setText(String headerText) { DOM.setInnerHTML(captionContainer, headerText); } public String getColKey() { return cid; } private void setSorted(boolean sorted) { if (sorted) { if (sortAscending) { this.setStyleName(CLASSNAME + "-header-cell-asc"); } else { this.setStyleName(CLASSNAME + "-header-cell-desc"); } } else { this.setStyleName(CLASSNAME + "-header-cell"); } } /** * Handle column reordering. */ @Override public void onBrowserEvent(Event event) { if (enabled && event != null) { if (isResizing || event.getTarget() == colResizeWidget) { onResizeEvent(event); } else { handleCaptionEvent(event); } } } private void createFloatingCopy() { floatingCopyOfHeaderCell = DOM.createDiv(); DOM.setInnerHTML(floatingCopyOfHeaderCell, DOM.getInnerHTML(td)); floatingCopyOfHeaderCell = DOM .getChild(floatingCopyOfHeaderCell, 1); DOM.setElementProperty(floatingCopyOfHeaderCell, "className", CLASSNAME + "-header-drag"); updateFloatingCopysPosition(DOM.getAbsoluteLeft(td), DOM .getAbsoluteTop(td)); DOM.appendChild(RootPanel.get().getElement(), floatingCopyOfHeaderCell); } private void updateFloatingCopysPosition(int x, int y) { x -= DOM.getElementPropertyInt(floatingCopyOfHeaderCell, "offsetWidth") / 2; DOM.setStyleAttribute(floatingCopyOfHeaderCell, "left", x + "px"); if (y > 0) { DOM.setStyleAttribute(floatingCopyOfHeaderCell, "top", (y + 7) + "px"); } } private void hideFloatingCopy() { DOM.removeChild(RootPanel.get().getElement(), floatingCopyOfHeaderCell); floatingCopyOfHeaderCell = null; } protected void handleCaptionEvent(Event event) { switch (DOM.eventGetType(event)) { case Event.ONMOUSEDOWN: if (columnReordering) { dragging = true; moved = false; colIndex = getColIndexByKey(cid); DOM.setCapture(getElement()); headerX = tHead.getAbsoluteLeft(); DOM.eventPreventDefault(event); // prevent selecting text } break; case Event.ONMOUSEUP: if (columnReordering) { dragging = false; DOM.releaseCapture(getElement()); if (moved) { hideFloatingCopy(); tHead.removeSlotFocus(); if (closestSlot != colIndex && closestSlot != (colIndex + 1)) { if (closestSlot > colIndex) { reOrderColumn(cid, closestSlot - 1); } else { reOrderColumn(cid, closestSlot); } } } } if (!moved) { // mouse event was a click to header -> sort column if (sortable) { if (sortColumn.equals(cid)) { // just toggle order client.updateVariable(paintableId, "sortascending", !sortAscending, false); } else { // set table scrolled by this column client.updateVariable(paintableId, "sortcolumn", cid, false); } // get also cache columns at the same request bodyContainer.setScrollPosition(0); firstvisible = 0; rowRequestHandler.setReqFirstRow(0); rowRequestHandler.setReqRows((int) (2 * pageLength * cache_rate + pageLength)); rowRequestHandler.deferRowFetch(); } break; } break; case Event.ONMOUSEMOVE: if (dragging) { if (!moved) { createFloatingCopy(); moved = true; } final int x = DOM.eventGetClientX(event) + DOM.getElementPropertyInt(tHead.hTableWrapper, "scrollLeft"); int slotX = headerX; closestSlot = colIndex; int closestDistance = -1; int start = 0; if (showRowHeaders) { start++; } final int visibleCellCount = tHead.getVisibleCellCount(); for (int i = start; i <= visibleCellCount; i++) { if (i > 0) { final String colKey = getColKeyByIndex(i - 1); slotX += getColWidth(colKey); } final int dist = Math.abs(x - slotX); if (closestDistance == -1 || dist < closestDistance) { closestDistance = dist; closestSlot = i; } } tHead.focusSlot(closestSlot); updateFloatingCopysPosition(DOM.eventGetClientX(event), -1); } break; default: break; } } private void onResizeEvent(Event event) { switch (DOM.eventGetType(event)) { case Event.ONMOUSEDOWN: isResizing = true; DOM.setCapture(getElement()); dragStartX = DOM.eventGetClientX(event); colIndex = getColIndexByKey(cid); originalWidth = getWidth(); DOM.eventPreventDefault(event); break; case Event.ONMOUSEUP: isResizing = false; DOM.releaseCapture(getElement()); // readjust undefined width columns lazyAdjustColumnWidths.cancel(); lazyAdjustColumnWidths.schedule(1); break; case Event.ONMOUSEMOVE: if (isResizing) { final int deltaX = DOM.eventGetClientX(event) - dragStartX; if (deltaX == 0) { return; } int newWidth = originalWidth + deltaX; if (newWidth < scrollBody.getCellExtraWidth()) { newWidth = scrollBody.getCellExtraWidth(); } setColWidth(colIndex, newWidth, true); } break; default: break; } } public String getCaption() { return DOM.getInnerText(captionContainer); } public boolean isEnabled() { return getParent() != null; } public void setAlign(char c) { if (align != c) { switch (c) { case ALIGN_CENTER: DOM.setStyleAttribute(captionContainer, "textAlign", "center"); break; case ALIGN_RIGHT: DOM.setStyleAttribute(captionContainer, "textAlign", "right"); break; default: DOM.setStyleAttribute(captionContainer, "textAlign", ""); break; } } align = c; } public char getAlign() { return align; } /** * Detects the natural minimum width for the column of this header cell. * If column is resized by user or the width is defined by server the * actual width is returned. Else the natural min width is returned. * * @param columnIndex * column index hint, if -1 (unknown) it will be detected * * @return */ public int getNaturalColumnWidth(int columnIndex) { if (isDefinedWidth()) { return width; } else { if (naturalWidth < 0) { // This is recently revealed column. Try to detect a proper // value (greater of header and data // cols) final int hw = ((Element) getElement().getLastChild()) .getOffsetWidth() + scrollBody.getCellExtraWidth(); if (columnIndex < 0) { columnIndex = 0; for (Iterator<Widget> it = tHead.iterator(); it .hasNext(); columnIndex++) { if (it.next() == this) { break; } } } final int cw = scrollBody.getColWidth(columnIndex); naturalWidth = (hw > cw ? hw : cw); } return naturalWidth; } } public void setExpandRatio(float floatAttribute) { expandRatio = floatAttribute; } public float getExpandRatio() { return expandRatio; } } /** * HeaderCell that is header cell for row headers. * * Reordering disabled and clicking on it resets sorting. */ public class RowHeadersHeaderCell extends HeaderCell { RowHeadersHeaderCell() { super("0", ""); } @Override protected void handleCaptionEvent(Event event) { // NOP: RowHeaders cannot be reordered // TODO It'd be nice to reset sorting here } } public class TableHead extends Panel implements ActionOwner { private static final int WRAPPER_WIDTH = 9000; ArrayList<Widget> visibleCells = new ArrayList<Widget>(); HashMap<String, HeaderCell> availableCells = new HashMap<String, HeaderCell>(); Element div = DOM.createDiv(); Element hTableWrapper = DOM.createDiv(); Element hTableContainer = DOM.createDiv(); Element table = DOM.createTable(); Element headerTableBody = DOM.createTBody(); Element tr = DOM.createTR(); private final Element columnSelector = DOM.createDiv(); private int focusedSlot = -1; public TableHead() { if (BrowserInfo.get().isIE()) { table.setPropertyInt("cellSpacing", 0); } DOM.setStyleAttribute(hTableWrapper, "overflow", "hidden"); DOM.setElementProperty(hTableWrapper, "className", CLASSNAME + "-header"); // TODO move styles to CSS DOM.setElementProperty(columnSelector, "className", CLASSNAME + "-column-selector"); DOM.setStyleAttribute(columnSelector, "display", "none"); DOM.appendChild(table, headerTableBody); DOM.appendChild(headerTableBody, tr); DOM.appendChild(hTableContainer, table); DOM.appendChild(hTableWrapper, hTableContainer); DOM.appendChild(div, hTableWrapper); DOM.appendChild(div, columnSelector); setElement(div); setStyleName(CLASSNAME + "-header-wrap"); DOM.sinkEvents(columnSelector, Event.ONCLICK); availableCells.put("0", new RowHeadersHeaderCell()); } @Override public void clear() { for (String cid : availableCells.keySet()) { removeCell(cid); } availableCells.clear(); availableCells.put("0", new RowHeadersHeaderCell()); } public void updateCellsFromUIDL(UIDL uidl) { Iterator<?> it = uidl.getChildIterator(); HashSet<String> updated = new HashSet<String>(); updated.add("0"); while (it.hasNext()) { final UIDL col = (UIDL) it.next(); final String cid = col.getStringAttribute("cid"); updated.add(cid); String caption = buildCaptionHtmlSnippet(col); HeaderCell c = getHeaderCell(cid); if (c == null) { c = new HeaderCell(cid, caption); availableCells.put(cid, c); if (initializedAndAttached) { // we will need a column width recalculation initializedAndAttached = false; initialContentReceived = false; isNewBody = true; } } else { c.setText(caption); } if (col.hasAttribute("sortable")) { c.setSortable(true); if (cid.equals(sortColumn)) { c.setSorted(true); } else { c.setSorted(false); } } else { c.setSortable(false); } if (col.hasAttribute("align")) { c.setAlign(col.getStringAttribute("align").charAt(0)); } if (col.hasAttribute("width")) { final String width = col.getStringAttribute("width"); c.setWidth(Integer.parseInt(width), true); } else if (recalcWidths) { c.setUndefinedWidth(); } if (col.hasAttribute("er")) { c.setExpandRatio(col.getFloatAttribute("er")); } if (col.hasAttribute("collapsed")) { // ensure header is properly removed from parent (case when // collapsing happens via servers side api) if (c.isAttached()) { c.removeFromParent(); headerChangedDuringUpdate = true; } } } // check for orphaned header cells for (Iterator<String> cit = availableCells.keySet().iterator(); cit .hasNext();) { String cid = cit.next(); if (!updated.contains(cid)) { removeCell(cid); cit.remove(); } } } public void enableColumn(String cid, int index) { final HeaderCell c = getHeaderCell(cid); if (!c.isEnabled() || getHeaderCell(index) != c) { setHeaderCell(index, c); if (initializedAndAttached) { headerChangedDuringUpdate = true; } } } public int getVisibleCellCount() { return visibleCells.size(); } public void setHorizontalScrollPosition(int scrollLeft) { if (BrowserInfo.get().isIE6()) { hTableWrapper.getStyle().setProperty("position", "relative"); hTableWrapper.getStyle().setPropertyPx("left", -scrollLeft); } else { hTableWrapper.setScrollLeft(scrollLeft); } } public void setColumnCollapsingAllowed(boolean cc) { if (cc) { DOM.setStyleAttribute(columnSelector, "display", "block"); } else { DOM.setStyleAttribute(columnSelector, "display", "none"); } } public void disableBrowserIntelligence() { DOM.setStyleAttribute(hTableContainer, "width", WRAPPER_WIDTH + "px"); } public void enableBrowserIntelligence() { DOM.setStyleAttribute(hTableContainer, "width", ""); } public void setHeaderCell(int index, HeaderCell cell) { if (cell.isEnabled()) { // we're moving the cell DOM.removeChild(tr, cell.getElement()); orphan(cell); } if (index < visibleCells.size()) { // insert to right slot DOM.insertChild(tr, cell.getElement(), index); adopt(cell); visibleCells.add(index, cell); } else if (index == visibleCells.size()) { // simply append DOM.appendChild(tr, cell.getElement()); adopt(cell); visibleCells.add(cell); } else { throw new RuntimeException( "Header cells must be appended in order"); } } public HeaderCell getHeaderCell(int index) { if (index < visibleCells.size()) { return (HeaderCell) visibleCells.get(index); } else { return null; } } /** * Get's HeaderCell by it's column Key. * * Note that this returns HeaderCell even if it is currently collapsed. * * @param cid * Column key of accessed HeaderCell * @return HeaderCell */ public HeaderCell getHeaderCell(String cid) { return availableCells.get(cid); } public void moveCell(int oldIndex, int newIndex) { final HeaderCell hCell = getHeaderCell(oldIndex); final Element cell = hCell.getElement(); visibleCells.remove(oldIndex); DOM.removeChild(tr, cell); DOM.insertChild(tr, cell, newIndex); visibleCells.add(newIndex, hCell); } public Iterator<Widget> iterator() { return visibleCells.iterator(); } @Override public boolean remove(Widget w) { if (visibleCells.contains(w)) { visibleCells.remove(w); orphan(w); DOM.removeChild(DOM.getParent(w.getElement()), w.getElement()); return true; } return false; } public void removeCell(String colKey) { final HeaderCell c = getHeaderCell(colKey); remove(c); } private void focusSlot(int index) { removeSlotFocus(); if (index > 0) { DOM.setElementProperty(DOM.getFirstChild(DOM.getChild(tr, index - 1)), "className", CLASSNAME + "-resizer " + CLASSNAME + "-focus-slot-right"); } else { DOM.setElementProperty(DOM.getFirstChild(DOM .getChild(tr, index)), "className", CLASSNAME + "-resizer " + CLASSNAME + "-focus-slot-left"); } focusedSlot = index; } private void removeSlotFocus() { if (focusedSlot < 0) { return; } if (focusedSlot == 0) { DOM.setElementProperty(DOM.getFirstChild(DOM.getChild(tr, focusedSlot)), "className", CLASSNAME + "-resizer"); } else if (focusedSlot > 0) { DOM.setElementProperty(DOM.getFirstChild(DOM.getChild(tr, focusedSlot - 1)), "className", CLASSNAME + "-resizer"); } focusedSlot = -1; } @Override public void onBrowserEvent(Event event) { if (enabled) { if (event.getTarget() == columnSelector) { final int left = DOM.getAbsoluteLeft(columnSelector); final int top = DOM.getAbsoluteTop(columnSelector) + DOM.getElementPropertyInt(columnSelector, "offsetHeight"); client.getContextMenu().showAt(this, left, top); } } } class VisibleColumnAction extends Action { String colKey; private boolean collapsed; public VisibleColumnAction(String colKey) { super(VScrollTable.TableHead.this); this.colKey = colKey; caption = tHead.getHeaderCell(colKey).getCaption(); } @Override public void execute() { client.getContextMenu().hide(); // toggle selected column if (collapsedColumns.contains(colKey)) { collapsedColumns.remove(colKey); } else { tHead.removeCell(colKey); collapsedColumns.add(colKey); lazyAdjustColumnWidths.schedule(1); } // update variable to server client.updateVariable(paintableId, "collapsedcolumns", collapsedColumns.toArray(new String[collapsedColumns .size()]), false); // let rowRequestHandler determine proper rows rowRequestHandler.refreshContent(); } public void setCollapsed(boolean b) { collapsed = b; } /** * Override default method to distinguish on/off columns */ @Override public String getHTML() { final StringBuffer buf = new StringBuffer(); if (collapsed) { buf.append("<span class=\"v-off\">"); } else { buf.append("<span class=\"v-on\">"); } buf.append(super.getHTML()); buf.append("</span>"); return buf.toString(); } } /* * Returns columns as Action array for column select popup */ public Action[] getActions() { Object[] cols; if (columnReordering) { cols = columnOrder; } else { // if columnReordering is disabled, we need different way to get // all available columns cols = visibleColOrder; cols = new Object[visibleColOrder.length + collapsedColumns.size()]; int i; for (i = 0; i < visibleColOrder.length; i++) { cols[i] = visibleColOrder[i]; } for (final Iterator<String> it = collapsedColumns.iterator(); it .hasNext();) { cols[i++] = it.next(); } } final Action[] actions = new Action[cols.length]; for (int i = 0; i < cols.length; i++) { final String cid = (String) cols[i]; final HeaderCell c = getHeaderCell(cid); final VisibleColumnAction a = new VisibleColumnAction(c .getColKey()); a.setCaption(c.getCaption()); if (!c.isEnabled()) { a.setCollapsed(true); } actions[i] = a; } return actions; } public ApplicationConnection getClient() { return client; } public String getPaintableId() { return paintableId; } /** * Returns column alignments for visible columns */ public char[] getColumnAlignments() { final Iterator<Widget> it = visibleCells.iterator(); final char[] aligns = new char[visibleCells.size()]; int colIndex = 0; while (it.hasNext()) { aligns[colIndex++] = ((HeaderCell) it.next()).getAlign(); } return aligns; } } /** * This Panel can only contain VScrollTableRow type of widgets. This * "simulates" very large table, keeping spacers which take room of * unrendered rows. * */ public class VScrollTableBody extends Panel { public static final int DEFAULT_ROW_HEIGHT = 24; private int rowHeight = -1; private final List<Widget> renderedRows = new ArrayList<Widget>(); /** * Due some optimizations row height measuring is deferred and initial * set of rows is rendered detached. Flag set on when table body has * been attached in dom and rowheight has been measured. */ private boolean tBodyMeasurementsDone = false; Element preSpacer = DOM.createDiv(); Element postSpacer = DOM.createDiv(); Element container = DOM.createDiv(); TableSectionElement tBodyElement = Document.get().createTBodyElement(); Element table = DOM.createTable(); private int firstRendered; private int lastRendered; private char[] aligns; VScrollTableBody() { constructDOM(); setElement(container); } /** * @return the height of scrollable body, subpixels ceiled. */ public int getRequiredHeight() { return preSpacer.getOffsetHeight() + postSpacer.getOffsetHeight() + Util.getRequiredHeight(table); } private void constructDOM() { DOM.setElementProperty(table, "className", CLASSNAME + "-table"); if (BrowserInfo.get().isIE()) { table.setPropertyInt("cellSpacing", 0); } DOM.setElementProperty(preSpacer, "className", CLASSNAME + "-row-spacer"); DOM.setElementProperty(postSpacer, "className", CLASSNAME + "-row-spacer"); table.appendChild(tBodyElement); DOM.appendChild(container, preSpacer); DOM.appendChild(container, table); DOM.appendChild(container, postSpacer); } public int getAvailableWidth() { int availW = bodyContainer.getOffsetWidth() - getBorderWidth(); return availW; } public void renderInitialRows(UIDL rowData, int firstIndex, int rows) { firstRendered = firstIndex; lastRendered = firstIndex + rows - 1; final Iterator<?> it = rowData.getChildIterator(); aligns = tHead.getColumnAlignments(); while (it.hasNext()) { final VScrollTableRow row = new VScrollTableRow((UIDL) it .next(), aligns); addRow(row); } if (isAttached()) { fixSpacers(); } } public void renderRows(UIDL rowData, int firstIndex, int rows) { // FIXME REVIEW aligns = tHead.getColumnAlignments(); final Iterator<?> it = rowData.getChildIterator(); if (firstIndex == lastRendered + 1) { while (it.hasNext()) { final VScrollTableRow row = createRow((UIDL) it.next()); addRow(row); lastRendered++; } fixSpacers(); } else if (firstIndex + rows == firstRendered) { final VScrollTableRow[] rowArray = new VScrollTableRow[rows]; int i = rows; while (it.hasNext()) { i--; rowArray[i] = createRow((UIDL) it.next()); } for (i = 0; i < rows; i++) { addRowBeforeFirstRendered(rowArray[i]); firstRendered--; } } else { // completely new set of rows while (lastRendered + 1 > firstRendered) { unlinkRow(false); } final VScrollTableRow row = createRow((UIDL) it.next()); firstRendered = firstIndex; lastRendered = firstIndex - 1; addRow(row); lastRendered++; setContainerHeight(); fixSpacers(); while (it.hasNext()) { addRow(createRow((UIDL) it.next())); lastRendered++; } fixSpacers(); } // this may be a new set of rows due content change, // ensure we have proper cache rows int reactFirstRow = (int) (firstRowInViewPort - pageLength * cache_react_rate); int reactLastRow = (int) (firstRowInViewPort + pageLength + pageLength * cache_react_rate); if (reactFirstRow < 0) { reactFirstRow = 0; } if (reactLastRow >= totalRows) { reactLastRow = totalRows - 1; } if (lastRendered < reactLastRow) { // get some cache rows below visible area rowRequestHandler.setReqFirstRow(lastRendered + 1); rowRequestHandler.setReqRows(reactLastRow - lastRendered - 1); rowRequestHandler.deferRowFetch(1); } else if (scrollBody.getFirstRendered() > reactFirstRow) { /* * Branch for fetching cache above visible area. * * If cache needed for both before and after visible area, this * will be rendered after-cache is reveived and rendered. So in * some rare situations table may take two cache visits to * server. */ rowRequestHandler.setReqFirstRow(reactFirstRow); rowRequestHandler.setReqRows(firstRendered - reactFirstRow); rowRequestHandler.deferRowFetch(1); } } /** * This method is used to instantiate new rows for this table. It * automatically sets correct widths to rows cells and assigns correct * client reference for child widgets. * * This method can be called only after table has been initialized * * @param uidl */ private VScrollTableRow createRow(UIDL uidl) { final VScrollTableRow row = new VScrollTableRow(uidl, aligns); final int cells = DOM.getChildCount(row.getElement()); for (int i = 0; i < cells; i++) { final Element cell = DOM.getChild(row.getElement(), i); int w = VScrollTable.this.getColWidth(getColKeyByIndex(i)); if (w < 0) { w = 0; } cell.getFirstChildElement().getStyle() .setPropertyPx("width", w); cell.getStyle().setPropertyPx("width", w); } return row; } private void addRowBeforeFirstRendered(VScrollTableRow row) { VScrollTableRow first = null; if (renderedRows.size() > 0) { first = (VScrollTableRow) renderedRows.get(0); } if (first != null && first.getStyleName().indexOf("-odd") == -1) { row.addStyleName(CLASSNAME + "-row-odd"); } else { row.addStyleName(CLASSNAME + "-row"); } if (row.isSelected()) { row.addStyleName("v-selected"); } tBodyElement.insertBefore(row.getElement(), tBodyElement .getFirstChild()); adopt(row); renderedRows.add(0, row); } private void addRow(VScrollTableRow row) { VScrollTableRow last = null; if (renderedRows.size() > 0) { last = (VScrollTableRow) renderedRows .get(renderedRows.size() - 1); } if (last != null && last.getStyleName().indexOf("-odd") == -1) { row.addStyleName(CLASSNAME + "-row-odd"); } else { row.addStyleName(CLASSNAME + "-row"); } if (row.isSelected()) { row.addStyleName("v-selected"); } tBodyElement.appendChild(row.getElement()); adopt(row); renderedRows.add(row); } public Iterator<Widget> iterator() { return renderedRows.iterator(); } /** * @return false if couldn't remove row */ public boolean unlinkRow(boolean fromBeginning) { if (lastRendered - firstRendered < 0) { return false; } int index; if (fromBeginning) { index = 0; firstRendered++; } else { index = renderedRows.size() - 1; lastRendered--; } if (index >= 0) { final VScrollTableRow toBeRemoved = (VScrollTableRow) renderedRows .get(index); lazyUnregistryBag.add(toBeRemoved); tBodyElement.removeChild(toBeRemoved.getElement()); orphan(toBeRemoved); renderedRows.remove(index); fixSpacers(); return true; } else { return false; } } @Override public boolean remove(Widget w) { throw new UnsupportedOperationException(); } @Override protected void onAttach() { super.onAttach(); setContainerHeight(); } /** * Fix container blocks height according to totalRows to avoid * "bouncing" when scrolling */ private void setContainerHeight() { fixSpacers(); DOM.setStyleAttribute(container, "height", totalRows * getRowHeight() + "px"); } private void fixSpacers() { int prepx = getRowHeight() * firstRendered; if (prepx < 0) { prepx = 0; } DOM.setStyleAttribute(preSpacer, "height", prepx + "px"); int postpx = getRowHeight() * (totalRows - 1 - lastRendered); if (postpx < 0) { postpx = 0; } DOM.setStyleAttribute(postSpacer, "height", postpx + "px"); } public int getRowHeight() { return getRowHeight(false); } public int getRowHeight(boolean forceUpdate) { if (tBodyMeasurementsDone && !forceUpdate) { return rowHeight; } else { if (tBodyElement.getRows().getLength() > 0) { int tableHeight = getTableHeight(); int rowCount = tBodyElement.getRows().getLength(); rowHeight = tableHeight / rowCount; } else { if (isAttached()) { // measure row height by adding a dummy row VScrollTableRow scrollTableRow = new VScrollTableRow(); tBodyElement.appendChild(scrollTableRow.getElement()); getRowHeight(forceUpdate); tBodyElement.removeChild(scrollTableRow.getElement()); } else { // TODO investigate if this can never happen anymore return DEFAULT_ROW_HEIGHT; } } tBodyMeasurementsDone = true; return rowHeight; } } public int getTableHeight() { return table.getOffsetHeight(); } /** * Returns the width available for column content. * * @param columnIndex * @return */ public int getColWidth(int columnIndex) { if (tBodyMeasurementsDone) { NodeList<TableRowElement> rows = tBodyElement.getRows(); if (rows.getLength() == 0) { // no rows yet rendered return 0; } else { com.google.gwt.dom.client.Element wrapperdiv = rows .getItem(0).getCells().getItem(columnIndex) .getFirstChildElement(); return wrapperdiv.getOffsetWidth(); } } else { return 0; } } /** * Sets the content width of a column. * * Due IE limitation, we must set the width to a wrapper elements inside * table cells (with overflow hidden, which does not work on td * elements). * * To get this work properly crossplatform, we will also set the width * of td. * * @param colIndex * @param w */ public void setColWidth(int colIndex, int w) { NodeList<TableRowElement> rows2 = tBodyElement.getRows(); final int rows = rows2.getLength(); for (int i = 0; i < rows; i++) { TableRowElement row = rows2.getItem(i); TableCellElement cell = row.getCells().getItem(colIndex); cell.getFirstChildElement().getStyle() .setPropertyPx("width", w); cell.getStyle().setPropertyPx("width", w); } } private int cellExtraWidth = -1; /** * Method to return the space used for cell paddings + border. */ private int getCellExtraWidth() { if (cellExtraWidth < 0) { detectExtrawidth(); } return cellExtraWidth; } private void detectExtrawidth() { NodeList<TableRowElement> rows = tBodyElement.getRows(); if (rows.getLength() == 0) { /* need to temporary add empty row and detect */ VScrollTableRow scrollTableRow = new VScrollTableRow(); tBodyElement.appendChild(scrollTableRow.getElement()); detectExtrawidth(); tBodyElement.removeChild(scrollTableRow.getElement()); } else { boolean noCells = false; TableRowElement item = rows.getItem(0); TableCellElement firstTD = item.getCells().getItem(0); if (firstTD == null) { // content is currently empty, we need to add a fake cell // for measuring noCells = true; VScrollTableRow next = (VScrollTableRow) iterator().next(); next.addCell("", ALIGN_LEFT, "", true); firstTD = item.getCells().getItem(0); } com.google.gwt.dom.client.Element wrapper = firstTD .getFirstChildElement(); cellExtraWidth = firstTD.getOffsetWidth() - wrapper.getOffsetWidth(); if (noCells) { firstTD.getParentElement().removeChild(firstTD); } } } private void reLayoutComponents() { for (Widget w : this) { VScrollTableRow r = (VScrollTableRow) w; for (Widget widget : r) { client.handleComponentRelativeSize(widget); } } } public int getLastRendered() { return lastRendered; } public int getFirstRendered() { return firstRendered; } public void moveCol(int oldIndex, int newIndex) { // loop all rows and move given index to its new place final Iterator<?> rows = iterator(); while (rows.hasNext()) { final VScrollTableRow row = (VScrollTableRow) rows.next(); final Element td = DOM.getChild(row.getElement(), oldIndex); DOM.removeChild(row.getElement(), td); DOM.insertChild(row.getElement(), td, newIndex); } } /** * Restore row visibility which is set to "none" when the row is * rendered (due a performance optimization). */ private void restoreRowVisibility() { for (Widget row : renderedRows) { row.getElement().getStyle().setProperty("visibility", ""); } } public class VScrollTableRow extends Panel implements ActionOwner, Container { ArrayList<Widget> childWidgets = new ArrayList<Widget>(); private boolean selected = false; private final int rowKey; private List<UIDL> pendingComponentPaints; private String[] actionKeys = null; private final TableRowElement rowElement; private VScrollTableRow(int rowKey) { this.rowKey = rowKey; rowElement = Document.get().createTRElement(); setElement(rowElement); DOM.sinkEvents(getElement(), Event.ONMOUSEUP | Event.ONDBLCLICK | Event.ONCONTEXTMENU); } private void paintComponent(Paintable p, UIDL uidl) { if (isAttached()) { p.updateFromUIDL(uidl, client); } else { if (pendingComponentPaints == null) { pendingComponentPaints = new LinkedList<UIDL>(); } pendingComponentPaints.add(uidl); } } @Override protected void onAttach() { super.onAttach(); if (pendingComponentPaints != null) { for (UIDL uidl : pendingComponentPaints) { Paintable paintable = client.getPaintable(uidl); paintable.updateFromUIDL(uidl, client); } } } public String getKey() { return String.valueOf(rowKey); } public VScrollTableRow(UIDL uidl, char[] aligns) { this(uidl.getIntAttribute("key")); /* * Rendering the rows as hidden improves Firefox and Safari * performance drastically. */ getElement().getStyle().setProperty("visibility", "hidden"); String rowStyle = uidl.getStringAttribute("rowstyle"); if (rowStyle != null) { addStyleName(CLASSNAME + "-row-" + rowStyle); } tHead.getColumnAlignments(); int col = 0; int visibleColumnIndex = -1; // row header if (showRowHeaders) { addCell(buildCaptionHtmlSnippet(uidl), aligns[col++], "", true); } if (uidl.hasAttribute("al")) { actionKeys = uidl.getStringArrayAttribute("al"); } final Iterator<?> cells = uidl.getChildIterator(); while (cells.hasNext()) { final Object cell = cells.next(); visibleColumnIndex++; String columnId = visibleColOrder[visibleColumnIndex]; String style = ""; if (uidl.hasAttribute("style-" + columnId)) { style = uidl.getStringAttribute("style-" + columnId); } if (cell instanceof String) { addCell(cell.toString(), aligns[col++], style, false); } else { final Paintable cellContent = client .getPaintable((UIDL) cell); addCell((Widget) cellContent, aligns[col++], style); paintComponent(cellContent, (UIDL) cell); } } if (uidl.hasAttribute("selected") && !isSelected()) { toggleSelection(); } } /** * Add a dummy row, used for measurements if Table is empty. */ public VScrollTableRow() { this(0); addStyleName(CLASSNAME + "-row"); addCell("_", 'b', "", true); } public void addCell(String text, char align, String style, boolean textIsHTML) { // String only content is optimized by not using Label widget final Element td = DOM.createTD(); final Element container = DOM.createDiv(); String className = CLASSNAME + "-cell-content"; if (style != null && !style.equals("")) { className += " " + CLASSNAME + "-cell-content-" + style; } td.setClassName(className); container.setClassName(CLASSNAME + "-cell-wrapper"); if (textIsHTML) { container.setInnerHTML(text); } else { container.setInnerText(text); } if (align != ALIGN_LEFT) { switch (align) { case ALIGN_CENTER: container.getStyle().setProperty("textAlign", "center"); break; case ALIGN_RIGHT: default: container.getStyle().setProperty("textAlign", "right"); break; } } td.appendChild(container); getElement().appendChild(td); } public void addCell(Widget w, char align, String style) { final Element td = DOM.createTD(); final Element container = DOM.createDiv(); String className = CLASSNAME + "-cell-content"; if (style != null && !style.equals("")) { className += " " + CLASSNAME + "-cell-content-" + style; } td.setClassName(className); container.setClassName(CLASSNAME + "-cell-wrapper"); // TODO most components work with this, but not all (e.g. // Select) // Old comment: make widget cells respect align. // text-align:center for IE, margin: auto for others if (align != ALIGN_LEFT) { switch (align) { case ALIGN_CENTER: container.getStyle().setProperty("textAlign", "center"); break; case ALIGN_RIGHT: default: container.getStyle().setProperty("textAlign", "right"); break; } } td.appendChild(container); getElement().appendChild(td); // ensure widget not attached to another element (possible tBody // change) w.removeFromParent(); container.appendChild(w.getElement()); adopt(w); childWidgets.add(w); } public Iterator<Widget> iterator() { return childWidgets.iterator(); } @Override public boolean remove(Widget w) { if (childWidgets.contains(w)) { orphan(w); DOM.removeChild(DOM.getParent(w.getElement()), w .getElement()); childWidgets.remove(w); return true; } else { return false; } } private void handleClickEvent(Event event, Element targetTdOrTr) { if (client.hasEventListeners(VScrollTable.this, ITEM_CLICK_EVENT_ID)) { boolean doubleClick = (DOM.eventGetType(event) == Event.ONDBLCLICK); /* This row was clicked */ client.updateVariable(paintableId, "clickedKey", "" + rowKey, false); if (getElement() == targetTdOrTr.getParentElement()) { /* A specific column was clicked */ int childIndex = DOM.getChildIndex(getElement(), targetTdOrTr); String colKey = null; colKey = tHead.getHeaderCell(childIndex).getColKey(); client.updateVariable(paintableId, "clickedColKey", colKey, false); } MouseEventDetails details = new MouseEventDetails(event); // Note: the 'immediate' logic would need to be more // involved (see #2104), but iscrolltable always sends // select event, even though nullselectionallowed wont let // the change trough. Will need to be updated if that is // changed. client .updateVariable( paintableId, "clickEvent", details.toString(), !(event.getButton() == Event.BUTTON_LEFT && !doubleClick && selectMode > Table.SELECT_MODE_NONE && immediate)); } } /* * React on click that occur on content cells only */ @Override public void onBrowserEvent(Event event) { if (enabled) { Element targetTdOrTr = getEventTargetTdOrTr(event); if (targetTdOrTr != null) { switch (DOM.eventGetType(event)) { case Event.ONDBLCLICK: handleClickEvent(event, targetTdOrTr); break; case Event.ONMOUSEUP: handleClickEvent(event, targetTdOrTr); if (event.getButton() == Event.BUTTON_LEFT && selectMode > Table.SELECT_MODE_NONE) { toggleSelection(); // Note: changing the immediateness of this // might // require changes to "clickEvent" immediateness // also. client .updateVariable( paintableId, "selected", selectedRowKeys .toArray(new String[selectedRowKeys .size()]), immediate); } break; case Event.ONCONTEXTMENU: showContextMenu(event); break; default: break; } } } super.onBrowserEvent(event); } /** * Finds the TD that the event interacts with. Returns null if the * target of the event should not be handled. If the event target is * the row directly this method returns the TR element instead of * the TD. * * @param event * @return TD or TR element that the event targets (the actual event * target is this element or a child of it) */ private Element getEventTargetTdOrTr(Event event) { Element targetTdOrTr = null; final Element eventTarget = DOM.eventGetTarget(event); final Element eventTargetParent = DOM.getParent(eventTarget); final Element eventTargetGrandParent = DOM .getParent(eventTargetParent); final Element thisTrElement = getElement(); if (eventTarget == thisTrElement) { // This was a click on the TR element targetTdOrTr = eventTarget; // rowTarget = true; } else if (thisTrElement == eventTargetParent) { // Target parent is the TR, so the actual target is the TD targetTdOrTr = eventTarget; } else if (thisTrElement == eventTargetGrandParent) { // Target grand parent is the TR, so the parent is the TD targetTdOrTr = eventTargetParent; } else { /* * This is a workaround to make Labels and Embedded in a * Table clickable (see #2688). It is really not a fix as it * does not work for a custom component (not extending * VLabel/VEmbedded) or for read only textfields etc. */ Element tdElement = eventTargetParent; while (DOM.getParent(tdElement) != thisTrElement) { tdElement = DOM.getParent(tdElement); } Element componentElement = tdElement.getFirstChildElement() .getFirstChildElement().cast(); Widget widget = (Widget) client .getPaintable(componentElement); if (widget instanceof VLabel || widget instanceof VEmbedded) { targetTdOrTr = tdElement; } } return targetTdOrTr; } public void showContextMenu(Event event) { if (enabled && actionKeys != null) { int left = event.getClientX(); int top = event.getClientY(); top += Window.getScrollTop(); left += Window.getScrollLeft(); client.getContextMenu().showAt(this, left, top); } event.cancelBubble(true); event.preventDefault(); } public boolean isSelected() { return selected; } private void toggleSelection() { selected = !selected; if (selected) { if (selectMode == Table.SELECT_MODE_SINGLE) { deselectAll(); } selectedRowKeys.add(String.valueOf(rowKey)); addStyleName("v-selected"); } else { selectedRowKeys.remove(String.valueOf(rowKey)); removeStyleName("v-selected"); } } /* * (non-Javadoc) * * @see com.vaadin.terminal.gwt.client.ui.IActionOwner#getActions () */ public Action[] getActions() { if (actionKeys == null) { return new Action[] {}; } final Action[] actions = new Action[actionKeys.length]; for (int i = 0; i < actions.length; i++) { final String actionKey = actionKeys[i]; final TreeAction a = new TreeAction(this, String .valueOf(rowKey), actionKey); a.setCaption(getActionCaption(actionKey)); a.setIconUrl(getActionIcon(actionKey)); actions[i] = a; } return actions; } public ApplicationConnection getClient() { return client; } public String getPaintableId() { return paintableId; } public RenderSpace getAllocatedSpace(Widget child) { int w = 0; int i = getColIndexOf(child); HeaderCell headerCell = tHead.getHeaderCell(i); if (headerCell != null) { if (initializedAndAttached) { w = headerCell.getWidth(); } else { // header offset width is not absolutely correct value, // but a best guess (expecting similar content in all // columns -> // if one component is relative width so are others) w = headerCell.getOffsetWidth() - getCellExtraWidth(); } } return new RenderSpace(w, 0) { @Override public int getHeight() { return getRowHeight(); } }; } private int getColIndexOf(Widget child) { com.google.gwt.dom.client.Element widgetCell = child .getElement().getParentElement().getParentElement(); NodeList<TableCellElement> cells = rowElement.getCells(); for (int i = 0; i < cells.getLength(); i++) { if (cells.getItem(i) == widgetCell) { return i; } } return -1; } public boolean hasChildComponent(Widget component) { return childWidgets.contains(component); } public void replaceChildComponent(Widget oldComponent, Widget newComponent) { com.google.gwt.dom.client.Element parentElement = oldComponent .getElement().getParentElement(); int index = childWidgets.indexOf(oldComponent); oldComponent.removeFromParent(); parentElement.appendChild(newComponent.getElement()); childWidgets.add(index, newComponent); adopt(newComponent); } public boolean requestLayout(Set<Paintable> children) { // row size should never change and system wouldn't event // survive as this is a kind of fake paitable return true; } public void updateCaption(Paintable component, UIDL uidl) { // NOP, not rendered } public void updateFromUIDL(UIDL uidl, ApplicationConnection client) { // Should never be called, // Component container interface faked here to get layouts // render properly } } } public void deselectAll() { final Object[] keys = selectedRowKeys.toArray(); for (int i = 0; i < keys.length; i++) { final VScrollTableRow row = getRenderedRowByKey((String) keys[i]); if (row != null && row.isSelected()) { row.toggleSelection(); } } // still ensure all selects are removed from (not necessary rendered) selectedRowKeys.clear(); } /** * Determines the pagelength when the table height is fixed. */ public void updatePageLength() { if (scrollBody == null) { return; } if (height == null || height.equals("")) { return; } int rowHeight = scrollBody.getRowHeight(); int bodyH = bodyContainer.getOffsetHeight(); int rowsAtOnce = bodyH / rowHeight; boolean anotherPartlyVisible = ((bodyH % rowHeight) != 0); if (anotherPartlyVisible) { rowsAtOnce++; } if (pageLength != rowsAtOnce) { pageLength = rowsAtOnce; client.updateVariable(paintableId, "pagelength", pageLength, false); if (!rendering) { int currentlyVisible = scrollBody.lastRendered - scrollBody.firstRendered; if (currentlyVisible < pageLength && currentlyVisible < totalRows) { // shake scrollpanel to fill empty space bodyContainer.setScrollPosition(bodyContainer .getScrollPosition() + 1); bodyContainer.setScrollPosition(bodyContainer .getScrollPosition() - 1); } } } } @Override public void setWidth(String width) { if (this.width.equals(width)) { return; } this.width = width; if (width != null && !"".equals(width)) { super.setWidth(width); int innerPixels = getOffsetWidth() - getBorderWidth(); if (innerPixels < 0) { innerPixels = 0; } setContentWidth(innerPixels); if (!rendering) { // readjust undefined width columns lazyAdjustColumnWidths.cancel(); lazyAdjustColumnWidths.schedule(LAZY_COLUMN_ADJUST_TIMEOUT); } } else { super.setWidth(""); } } private static final int LAZY_COLUMN_ADJUST_TIMEOUT = 300; private final Timer lazyAdjustColumnWidths = new Timer() { /** * Check for column widths, and available width, to see if we can fix * column widths "optimally". Doing this lazily to avoid expensive * calculation when resizing is not yet finished. */ @Override public void run() { Iterator<Widget> headCells = tHead.iterator(); int usedMinimumWidth = 0; int totalExplicitColumnsWidths = 0; float expandRatioDivider = 0; int colIndex = 0; while (headCells.hasNext()) { final HeaderCell hCell = (HeaderCell) headCells.next(); if (hCell.isDefinedWidth()) { totalExplicitColumnsWidths += hCell.getWidth(); usedMinimumWidth += hCell.getWidth(); } else { usedMinimumWidth += hCell.getNaturalColumnWidth(colIndex); expandRatioDivider += hCell.getExpandRatio(); } colIndex++; } int availW = scrollBody.getAvailableWidth(); // Hey IE, are you really sure about this? availW = scrollBody.getAvailableWidth(); int visibleCellCount = tHead.getVisibleCellCount(); availW -= scrollBody.getCellExtraWidth() * visibleCellCount; if (willHaveScrollbars()) { availW -= Util.getNativeScrollbarSize(); } int extraSpace = availW - usedMinimumWidth; if (extraSpace < 0) { extraSpace = 0; } int totalUndefinedNaturaWidths = usedMinimumWidth - totalExplicitColumnsWidths; // we have some space that can be divided optimally HeaderCell hCell; colIndex = 0; headCells = tHead.iterator(); while (headCells.hasNext()) { hCell = (HeaderCell) headCells.next(); if (!hCell.isDefinedWidth()) { int w = hCell.getNaturalColumnWidth(colIndex); int newSpace; if (expandRatioDivider > 0) { // divide excess space by expand ratios newSpace = (int) (w + extraSpace * hCell.getExpandRatio() / expandRatioDivider); } else { if (totalUndefinedNaturaWidths != 0) { // divide relatively to natural column widths newSpace = w + extraSpace * w / totalUndefinedNaturaWidths; } else { newSpace = w; } } setColWidth(colIndex, newSpace, false); } colIndex++; } scrollBody.reLayoutComponents(); DeferredCommand.addCommand(new Command() { public void execute() { Util.runWebkitOverflowAutoFix(bodyContainer.getElement()); } }); } }; /** * helper to set pixel size of head and body part * * @param pixels */ private void setContentWidth(int pixels) { tHead.setWidth(pixels + "px"); bodyContainer.setWidth(pixels + "px"); } private int borderWidth = -1; /** * @return border left + border right */ private int getBorderWidth() { if (borderWidth < 0) { borderWidth = Util.measureHorizontalPaddingAndBorder(bodyContainer .getElement(), 2); if (borderWidth < 0) { borderWidth = 0; } } return borderWidth; } /** * Ensures scrollable area is properly sized. This method is used when fixed * size is used. */ private void setContainerHeight() { if (height != null && !"".equals(height)) { int contentH = getOffsetHeight() - tHead.getOffsetHeight(); contentH -= getContentAreaBorderHeight(); if (contentH < 0) { contentH = 0; } bodyContainer.setHeight(contentH + "px"); } } private int contentAreaBorderHeight = -1; /** * @return border top + border bottom of the scrollable area of table */ private int getContentAreaBorderHeight() { if (contentAreaBorderHeight < 0) { if (BrowserInfo.get().isIE7()) { contentAreaBorderHeight = Util .measureVerticalBorder(bodyContainer.getElement()); } else { DOM.setStyleAttribute(bodyContainer.getElement(), "overflow", "hidden"); int oh = bodyContainer.getOffsetHeight(); int ch = bodyContainer.getElement().getPropertyInt( "clientHeight"); contentAreaBorderHeight = oh - ch; DOM.setStyleAttribute(bodyContainer.getElement(), "overflow", "auto"); } } return contentAreaBorderHeight; } @Override public void setHeight(String height) { this.height = height; super.setHeight(height); setContainerHeight(); if (initializedAndAttached) { updatePageLength(); } } /* * Overridden due Table might not survive of visibility change (scroll pos * lost). Example ITabPanel just set contained components invisible and back * when changing tabs. */ @Override public void setVisible(boolean visible) { if (isVisible() != visible) { super.setVisible(visible); if (initializedAndAttached) { if (visible) { DeferredCommand.addCommand(new Command() { public void execute() { bodyContainer.setScrollPosition(firstRowInViewPort * scrollBody.getRowHeight()); } }); } } } } /** * Helper function to build html snippet for column or row headers * * @param uidl * possibly with values caption and icon * @return html snippet containing possibly an icon + caption text */ private String buildCaptionHtmlSnippet(UIDL uidl) { String s = uidl.getStringAttribute("caption"); if (uidl.hasAttribute("icon")) { s = "<img src=\"" + client .translateVaadinUri(uidl.getStringAttribute("icon")) + "\" alt=\"icon\" class=\"v-icon\">" + s; } return s; } /** * This method has logic which rows needs to be requested from server when * user scrolls */ public void onScroll(ScrollEvent event) { int scrollLeft = bodyContainer.getElement().getScrollLeft(); int scrollTop = bodyContainer.getScrollPosition(); if (!initializedAndAttached) { return; } if (!enabled) { bodyContainer.setScrollPosition(firstRowInViewPort * scrollBody.getRowHeight()); return; } rowRequestHandler.cancel(); // fix headers horizontal scrolling tHead.setHorizontalScrollPosition(scrollLeft); firstRowInViewPort = (int) Math.ceil(scrollTop / (double) scrollBody.getRowHeight()); if (firstRowInViewPort > totalRows - pageLength) { firstRowInViewPort = totalRows - pageLength; } int postLimit = (int) (firstRowInViewPort + (pageLength - 1) + pageLength * cache_react_rate); if (postLimit > totalRows - 1) { postLimit = totalRows - 1; } int preLimit = (int) (firstRowInViewPort - pageLength * cache_react_rate); if (preLimit < 0) { preLimit = 0; } final int lastRendered = scrollBody.getLastRendered(); final int firstRendered = scrollBody.getFirstRendered(); if (postLimit <= lastRendered && preLimit >= firstRendered) { // remember which firstvisible we requested, in case the server has // a differing opinion lastRequestedFirstvisible = firstRowInViewPort; client.updateVariable(paintableId, "firstvisible", firstRowInViewPort, false); return; // scrolled withing "non-react area" } if (firstRowInViewPort - pageLength * cache_rate > lastRendered || firstRowInViewPort + pageLength + pageLength * cache_rate < firstRendered) { // need a totally new set rowRequestHandler .setReqFirstRow((firstRowInViewPort - (int) (pageLength * cache_rate))); int last = firstRowInViewPort + (int) (cache_rate * pageLength) + pageLength - 1; if (last >= totalRows) { last = totalRows - 1; } rowRequestHandler.setReqRows(last - rowRequestHandler.getReqFirstRow() + 1); rowRequestHandler.deferRowFetch(); return; } if (preLimit < firstRendered) { // need some rows to the beginning of the rendered area rowRequestHandler .setReqFirstRow((int) (firstRowInViewPort - pageLength * cache_rate)); rowRequestHandler.setReqRows(firstRendered - rowRequestHandler.getReqFirstRow()); rowRequestHandler.deferRowFetch(); return; } if (postLimit > lastRendered) { // need some rows to the end of the rendered area rowRequestHandler.setReqFirstRow(lastRendered + 1); rowRequestHandler.setReqRows((int) ((firstRowInViewPort + pageLength + pageLength * cache_rate) - lastRendered)); rowRequestHandler.deferRowFetch(); } } }
true
true
private void sizeInit() { /* * We will use browsers table rendering algorithm to find proper column * widths. If content and header take less space than available, we will * divide extra space relatively to each column which has not width set. * * Overflow pixels are added to last column. */ Iterator<Widget> headCells = tHead.iterator(); int i = 0; int totalExplicitColumnsWidths = 0; int total = 0; float expandRatioDivider = 0; final int[] widths = new int[tHead.visibleCells.size()]; tHead.enableBrowserIntelligence(); // first loop: collect natural widths while (headCells.hasNext()) { final HeaderCell hCell = (HeaderCell) headCells.next(); int w = hCell.getWidth(); if (hCell.isDefinedWidth()) { // server has defined column width explicitly totalExplicitColumnsWidths += w; } else { if (hCell.getExpandRatio() > 0) { expandRatioDivider += hCell.getExpandRatio(); w = 0; } else { // get and store greater of header width and column width, // and // store it as a minimumn natural col width w = hCell.getNaturalColumnWidth(i); } hCell.setNaturalMinimumColumnWidth(w); } widths[i] = w; total += w; i++; } tHead.disableBrowserIntelligence(); boolean willHaveScrollbarz = willHaveScrollbars(); // fix "natural" width if width not set if (width == null || "".equals(width)) { int w = total; w += scrollBody.getCellExtraWidth() * visibleColOrder.length; if (willHaveScrollbarz) { w += Util.getNativeScrollbarSize(); } setContentWidth(w); } int availW = scrollBody.getAvailableWidth(); if (BrowserInfo.get().isIE()) { // Hey IE, are you really sure about this? availW = scrollBody.getAvailableWidth(); } availW -= scrollBody.getCellExtraWidth() * visibleColOrder.length; if (willHaveScrollbarz) { availW -= Util.getNativeScrollbarSize(); } // TODO refactor this code to be the same as in resize timer boolean needsReLayout = false; if (availW > total) { // natural size is smaller than available space final int extraSpace = availW - total; final int totalWidthR = total - totalExplicitColumnsWidths; needsReLayout = true; if (expandRatioDivider > 0) { // visible columns have some active expand ratios, excess // space is divided according to them headCells = tHead.iterator(); i = 0; while (headCells.hasNext()) { HeaderCell hCell = (HeaderCell) headCells.next(); if (hCell.getExpandRatio() > 0) { int w = widths[i]; final int newSpace = (int) (extraSpace * (hCell .getExpandRatio() / expandRatioDivider)); w += newSpace; widths[i] = w; } i++; } } else if (totalWidthR > 0) { // no expand ratios defined, we will share extra space // relatively to "natural widths" among those without // explicit width headCells = tHead.iterator(); i = 0; while (headCells.hasNext()) { HeaderCell hCell = (HeaderCell) headCells.next(); if (!hCell.isDefinedWidth()) { int w = widths[i]; final int newSpace = extraSpace * w / totalWidthR; w += newSpace; widths[i] = w; } i++; } } } else { // bodys size will be more than available and scrollbar will appear } // last loop: set possibly modified values or reset if new tBody i = 0; headCells = tHead.iterator(); while (headCells.hasNext()) { final HeaderCell hCell = (HeaderCell) headCells.next(); if (isNewBody || hCell.getWidth() == -1) { final int w = widths[i]; setColWidth(i, w, false); } i++; } initializedAndAttached = true; if (needsReLayout) { scrollBody.reLayoutComponents(); } updatePageLength(); /* * Fix "natural" height if height is not set. This must be after width * fixing so the components' widths have been adjusted. */ if (height == null || "".equals(height)) { /* * We must force an update of the row height as this point as it * might have been (incorrectly) calculated earlier */ if (pageLength == totalRows) { /* * A hack to support variable height rows when paging is off. * Generally this is not supported by scrolltable. We want to * show all rows so the bodyHeight should be equal to the table * height. */ // int bodyHeight = scrollBody.getOffsetHeight(); int bodyHeight = scrollBody.getRequiredHeight(); bodyContainer.setHeight(bodyHeight + "px"); Util.runWebkitOverflowAutoFix(bodyContainer.getElement()); } else { int bodyHeight = (scrollBody.getRowHeight(true) * pageLength); bodyContainer.setHeight(bodyHeight + "px"); } } isNewBody = false; if (firstvisible > 0) { // Deferred due some Firefox oddities. IE & Safari could survive // without DeferredCommand.addCommand(new Command() { public void execute() { bodyContainer.setScrollPosition(firstvisible * scrollBody.getRowHeight()); firstRowInViewPort = firstvisible; } }); } if (enabled) { // Do we need cache rows if (scrollBody.getLastRendered() + 1 < firstRowInViewPort + pageLength + (int) cache_react_rate * pageLength) { if (totalRows - 1 > scrollBody.getLastRendered()) { // fetch cache rows int firstInNewSet = scrollBody.getLastRendered() + 1; rowRequestHandler.setReqFirstRow(firstInNewSet); int lastInNewSet = (int) (firstRowInViewPort + pageLength + cache_rate * pageLength); if (lastInNewSet > totalRows - 1) { lastInNewSet = totalRows - 1; } rowRequestHandler.setReqRows(lastInNewSet - firstInNewSet); rowRequestHandler.deferRowFetch(1); } } } }
private void sizeInit() { /* * We will use browsers table rendering algorithm to find proper column * widths. If content and header take less space than available, we will * divide extra space relatively to each column which has not width set. * * Overflow pixels are added to last column. */ Iterator<Widget> headCells = tHead.iterator(); int i = 0; int totalExplicitColumnsWidths = 0; int total = 0; float expandRatioDivider = 0; final int[] widths = new int[tHead.visibleCells.size()]; tHead.enableBrowserIntelligence(); // first loop: collect natural widths while (headCells.hasNext()) { final HeaderCell hCell = (HeaderCell) headCells.next(); int w = hCell.getWidth(); if (hCell.isDefinedWidth()) { // server has defined column width explicitly totalExplicitColumnsWidths += w; } else { if (hCell.getExpandRatio() > 0) { expandRatioDivider += hCell.getExpandRatio(); w = 0; } else { // get and store greater of header width and column width, // and // store it as a minimumn natural col width w = hCell.getNaturalColumnWidth(i); } hCell.setNaturalMinimumColumnWidth(w); } widths[i] = w; total += w; i++; } tHead.disableBrowserIntelligence(); boolean willHaveScrollbarz = willHaveScrollbars(); // fix "natural" width if width not set if (width == null || "".equals(width)) { int w = total; w += scrollBody.getCellExtraWidth() * visibleColOrder.length; if (willHaveScrollbarz) { w += Util.getNativeScrollbarSize(); } setContentWidth(w); } int availW = scrollBody.getAvailableWidth(); if (BrowserInfo.get().isIE()) { // Hey IE, are you really sure about this? availW = scrollBody.getAvailableWidth(); } availW -= scrollBody.getCellExtraWidth() * visibleColOrder.length; if (willHaveScrollbarz) { availW -= Util.getNativeScrollbarSize(); } // TODO refactor this code to be the same as in resize timer boolean needsReLayout = false; if (availW > total) { // natural size is smaller than available space final int extraSpace = availW - total; final int totalWidthR = total - totalExplicitColumnsWidths; needsReLayout = true; if (expandRatioDivider > 0) { // visible columns have some active expand ratios, excess // space is divided according to them headCells = tHead.iterator(); i = 0; while (headCells.hasNext()) { HeaderCell hCell = (HeaderCell) headCells.next(); if (hCell.getExpandRatio() > 0) { int w = widths[i]; final int newSpace = (int) (extraSpace * (hCell .getExpandRatio() / expandRatioDivider)); w += newSpace; widths[i] = w; } i++; } } else if (totalWidthR > 0) { // no expand ratios defined, we will share extra space // relatively to "natural widths" among those without // explicit width headCells = tHead.iterator(); i = 0; while (headCells.hasNext()) { HeaderCell hCell = (HeaderCell) headCells.next(); if (!hCell.isDefinedWidth()) { int w = widths[i]; final int newSpace = extraSpace * w / totalWidthR; w += newSpace; widths[i] = w; } i++; } } } else { // bodys size will be more than available and scrollbar will appear } // last loop: set possibly modified values or reset if new tBody i = 0; headCells = tHead.iterator(); while (headCells.hasNext()) { final HeaderCell hCell = (HeaderCell) headCells.next(); if (isNewBody || hCell.getWidth() == -1) { final int w = widths[i]; setColWidth(i, w, false); } i++; } initializedAndAttached = true; if (needsReLayout) { scrollBody.reLayoutComponents(); } updatePageLength(); /* * Fix "natural" height if height is not set. This must be after width * fixing so the components' widths have been adjusted. */ if (height == null || "".equals(height)) { /* * We must force an update of the row height as this point as it * might have been (incorrectly) calculated earlier */ if (pageLength == totalRows) { /* * A hack to support variable height rows when paging is off. * Generally this is not supported by scrolltable. We want to * show all rows so the bodyHeight should be equal to the table * height. */ // int bodyHeight = scrollBody.getOffsetHeight(); int bodyHeight = scrollBody.getRequiredHeight(); bodyContainer.setHeight(bodyHeight + "px"); Util.runWebkitOverflowAutoFix(bodyContainer.getElement()); } else { int bodyHeight = (scrollBody.getRowHeight(true) * pageLength); bodyContainer.setHeight(bodyHeight + "px"); } } isNewBody = false; if (firstvisible > 0) { // Deferred due some Firefox oddities. IE & Safari could survive // without DeferredCommand.addCommand(new Command() { public void execute() { bodyContainer.setScrollPosition(firstvisible * scrollBody.getRowHeight()); firstRowInViewPort = firstvisible; } }); } if (enabled) { // Do we need cache rows if (scrollBody.getLastRendered() + 1 < firstRowInViewPort + pageLength + (int) cache_react_rate * pageLength) { if (totalRows - 1 > scrollBody.getLastRendered()) { // fetch cache rows int firstInNewSet = scrollBody.getLastRendered() + 1; rowRequestHandler.setReqFirstRow(firstInNewSet); int lastInNewSet = (int) (firstRowInViewPort + pageLength + cache_rate * pageLength); if (lastInNewSet > totalRows - 1) { lastInNewSet = totalRows - 1; } rowRequestHandler.setReqRows(lastInNewSet - firstInNewSet + 1); rowRequestHandler.deferRowFetch(1); } } } }
diff --git a/src/org/nosco/Select.java b/src/org/nosco/Select.java index 1d4a485..54d9029 100644 --- a/src/org/nosco/Select.java +++ b/src/org/nosco/Select.java @@ -1,281 +1,288 @@ package org.nosco; import static org.nosco.Constants.DIRECTION.DESCENDING; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import org.nosco.Constants.DB_TYPE; import org.nosco.Constants.DIRECTION; import org.nosco.Field.FK; import org.nosco.util.Misc; import org.nosco.util.Tree.Callback; class Select<T extends Table> implements Iterable<T>, Iterator<T> { @Override protected void finalize() throws Throwable { super.finalize(); rs.close(); ps.close(); } private String sql; private QueryImpl<T> query; private PreparedStatement ps; private ResultSet rs; private T next; private Field<?>[] selectedFields; private Field<?>[] selectedBoundFields; private Object[] fieldValues; private Constructor<T> constructor; private Map<Field.FK,Constructor<T>> fkConstructors; private Map<Class<? extends Table>,Method> fkSetMethods = null; private Connection conn; @SuppressWarnings("unchecked") Select(QueryImpl<T> query) { this.query = query; try { constructor = (Constructor<T>) query.getType().getDeclaredConstructor( new Field[0].getClass(), new Object[0].getClass(), Integer.TYPE, Integer.TYPE); constructor.setAccessible(true); if (query.fks != null) { fkConstructors = new HashMap<Field.FK,Constructor<T>>(); fkSetMethods = new HashMap<Class<? extends Table>,Method>(); query.fks.visit(new Callback<Field.FK>() { public void call(FK fk, int depth, FK[] path) { if (fk==null && depth==0) return; try { Constructor<T> c = (Constructor<T>) fk.REFERENCED_FIELDS()[0].TABLE.getDeclaredConstructor( new Field[0].getClass(), new Object[0].getClass(), Integer.TYPE, Integer.TYPE); c.setAccessible(true); fkConstructors.put(fk, c); Class<? extends Table> reffedTable = fk.REFERENCED_FIELDS()[0].TABLE; Method setFKMethod = (Method) reffedTable.getDeclaredMethod("SET_FK", Field.FK.class, Object.class); setFKMethod.setAccessible(true); fkSetMethods.put(reffedTable, setFKMethod); } catch (SecurityException e) { e.printStackTrace(); } catch (NoSuchMethodException e) { e.printStackTrace(); } } }); Method setFKMethod = (Method) query.getType().getDeclaredMethod("SET_FK", Field.FK.class, Object.class); setFKMethod.setAccessible(true); fkSetMethods.put(query.getType(), setFKMethod); } } catch (SecurityException e) { e.printStackTrace(); } catch (NoSuchMethodException e) { e.printStackTrace(); } } protected String getSQL() { return getSQL(false); } protected String getSQL(boolean innerQuery) { if (sql==null) { selectedFields = query.getSelectFields(false); selectedBoundFields = query.getSelectFields(true); fieldValues = new Object[selectedFields.length]; StringBuffer sb = new StringBuffer(); sb.append("select "); if (query.distinct) sb.append("distinct "); if (query.getDBType()==DB_TYPE.SQLSERVER && query.top>0) { sb.append(" top ").append(query.top).append(" "); } if (query.globallyAppliedSelectFunction == null) { sb.append(Misc.join(", ", selectedBoundFields)); } else { String[] x = new String[selectedBoundFields.length]; for (int i=0; i < x.length; ++i) { x[i] = query.globallyAppliedSelectFunction + "("+ selectedBoundFields[i] +")"; } sb.append(Misc.join(", ", x)); } sb.append(" from "); sb.append(Misc.join(", ", query.getTableNameList())); sb.append(query.getWhereClauseAndSetBindings()); List<DIRECTION> directions = query.getOrderByDirections(); List<Field<?>> fields = query.getOrderByFields(); if (!innerQuery && directions!=null & fields!=null) { sb.append(" order by "); int x = Math.min(directions.size(), fields.size()); String[] tmp = new String[x]; for (int i=0; i<x; ++i) { DIRECTION direction = directions.get(i); tmp[i] = fields.get(i) + (direction==DESCENDING ? " DESC" : ""); } sb.append(Misc.join(", ", tmp)); } if (query.getDBType()!=DB_TYPE.SQLSERVER && query.top>0) { sb.append(" limit ").append(query.top); } if (innerQuery && selectedFields.length > 1) { Misc.log(sb.toString(), null); throw new RuntimeException("inner queries cannot have more than one selected" + "field - this query has "+ selectedFields.length); } sql = sb.toString(); } return sql; } protected List<Object> getSQLBindings() { return query.getSQLBindings(); } @Override public Iterator<T> iterator() { try { conn = query.getConnR(); ps = conn.prepareStatement(getSQL()); Misc.log(sql, query.bindings); query.setBindings(ps); ps.execute(); rs = ps.getResultSet(); //m = query.getType().getMethod("INSTANTIATE", Map.class); } catch (SQLException e) { e.printStackTrace(); } catch (SecurityException e) { e.printStackTrace(); } return this; } @Override public boolean hasNext() { if (next!=null) return true; try { if (!rs.next()) { cleanUp(); return false; } for (int i=0; i<selectedFields.length; ++i) { - if (selectedFields[i].TYPE == Long.class) fieldValues[i] = rs.getLong(i+1); else - if (selectedFields[i].TYPE == Double.class) fieldValues[i] = rs.getDouble(i+1); else - fieldValues[i] = rs.getObject(i+1); + if (selectedFields[i].TYPE == Long.class) fieldValues[i] = rs.getLong(i+1); + else if (selectedFields[i].TYPE == Double.class) { + fieldValues[i] = rs.getDouble(i+1); + if (rs.wasNull()) fieldValues[i] = null; + } + else if (selectedFields[i].TYPE == Character.class) { + String s = rs.getString(i+1); + if (s != null && s.length() > 0) fieldValues[i] = s.charAt(0); + } + else fieldValues[i] = rs.getObject(i+1); } Object[] objects = new Object[query.tableInfos.size()]; for (int i=0; i<query.tableInfos.size(); ++i) { QueryImpl.TableInfo ti = query.tableInfos.get(i); if (ti.path == null) { if (next != null) continue; //System.out.println(ti.start +" "+ ti.end); next = (T) constructor.newInstance(selectedFields, fieldValues, ti.start, ti.end); next.__NOSCO_GOT_FROM_DATABASE= true; objects[i] = next; } else { FK fk = ti.path[ti.path.length-1]; Table fkv = fkConstructors.get(fk).newInstance(selectedFields, fieldValues, ti.start, ti.end); objects[i] = fkv; } } for (int i=0; i<query.tableInfos.size(); ++i) { QueryImpl.TableInfo ti = query.tableInfos.get(i); for (int j=i+1; j<query.tableInfos.size(); ++j) { QueryImpl.TableInfo tj = query.tableInfos.get(j); if (tj.path == null) continue; if(startsWith(tj.path, ti.path)) { FK fk = tj.path[tj.path.length-1]; if (!fk.referencing.equals(objects[i].getClass())) continue; if (!fk.referenced.equals(objects[j].getClass())) continue; fkSetMethods.get(objects[i].getClass()).invoke(objects[i], fk, objects[j]); } } } /* if (fkConstructors != null) { Set<Table> createdObjects = new HashSet<Table>(); for (Entry<Field.FK, Constructor<T>> e : fkConstructors.entrySet()) { FK fk = e.getKey(); Table fkv = e.getValue().newInstance(selectedFields, fieldValues); Table refTable = (Table)fk.REFERENCING_FIELDS()[0].TABLE.newInstance(); if (next.sameTable(refTable)) { fkSetMethods.get(next.getClass()).invoke(next, fk, fkv); createdObjects.add(fkv); } else { for (Table t : createdObjects) { if (t.sameTable(refTable)) { fkSetMethods.get(t.getClass()).invoke(t, fk, fkv); createdObjects.add(fkv); } } } } }//*/ } catch (SQLException e) { e.printStackTrace(); } catch (IllegalArgumentException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } catch (InvocationTargetException e) { e.printStackTrace(); } catch (InstantiationException e) { e.printStackTrace(); } boolean hasNext = next != null; if (!hasNext) cleanUp(); return hasNext; } private void cleanUp() { if (!ThreadContext.inTransaction(query.ds)) { try { conn.close(); } catch (SQLException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } private boolean startsWith(FK[] path, FK[] path2) { if (path2 == null) return true; for (int i=0; i<path2.length; ++i) { if (path[i] != path2[i]) return false; } return true; } @Override public T next() { T t = next; next = null; return t; } @Override public void remove() { // TODO Auto-generated method stub } }
true
true
public boolean hasNext() { if (next!=null) return true; try { if (!rs.next()) { cleanUp(); return false; } for (int i=0; i<selectedFields.length; ++i) { if (selectedFields[i].TYPE == Long.class) fieldValues[i] = rs.getLong(i+1); else if (selectedFields[i].TYPE == Double.class) fieldValues[i] = rs.getDouble(i+1); else fieldValues[i] = rs.getObject(i+1); } Object[] objects = new Object[query.tableInfos.size()]; for (int i=0; i<query.tableInfos.size(); ++i) { QueryImpl.TableInfo ti = query.tableInfos.get(i); if (ti.path == null) { if (next != null) continue; //System.out.println(ti.start +" "+ ti.end); next = (T) constructor.newInstance(selectedFields, fieldValues, ti.start, ti.end); next.__NOSCO_GOT_FROM_DATABASE= true; objects[i] = next; } else { FK fk = ti.path[ti.path.length-1]; Table fkv = fkConstructors.get(fk).newInstance(selectedFields, fieldValues, ti.start, ti.end); objects[i] = fkv; } } for (int i=0; i<query.tableInfos.size(); ++i) { QueryImpl.TableInfo ti = query.tableInfos.get(i); for (int j=i+1; j<query.tableInfos.size(); ++j) { QueryImpl.TableInfo tj = query.tableInfos.get(j); if (tj.path == null) continue; if(startsWith(tj.path, ti.path)) { FK fk = tj.path[tj.path.length-1]; if (!fk.referencing.equals(objects[i].getClass())) continue; if (!fk.referenced.equals(objects[j].getClass())) continue; fkSetMethods.get(objects[i].getClass()).invoke(objects[i], fk, objects[j]); } } } /* if (fkConstructors != null) { Set<Table> createdObjects = new HashSet<Table>(); for (Entry<Field.FK, Constructor<T>> e : fkConstructors.entrySet()) { FK fk = e.getKey(); Table fkv = e.getValue().newInstance(selectedFields, fieldValues); Table refTable = (Table)fk.REFERENCING_FIELDS()[0].TABLE.newInstance(); if (next.sameTable(refTable)) { fkSetMethods.get(next.getClass()).invoke(next, fk, fkv); createdObjects.add(fkv); } else { for (Table t : createdObjects) { if (t.sameTable(refTable)) { fkSetMethods.get(t.getClass()).invoke(t, fk, fkv); createdObjects.add(fkv); } } } } }//*/ } catch (SQLException e) { e.printStackTrace(); } catch (IllegalArgumentException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } catch (InvocationTargetException e) { e.printStackTrace(); } catch (InstantiationException e) { e.printStackTrace(); } boolean hasNext = next != null; if (!hasNext) cleanUp(); return hasNext; }
public boolean hasNext() { if (next!=null) return true; try { if (!rs.next()) { cleanUp(); return false; } for (int i=0; i<selectedFields.length; ++i) { if (selectedFields[i].TYPE == Long.class) fieldValues[i] = rs.getLong(i+1); else if (selectedFields[i].TYPE == Double.class) { fieldValues[i] = rs.getDouble(i+1); if (rs.wasNull()) fieldValues[i] = null; } else if (selectedFields[i].TYPE == Character.class) { String s = rs.getString(i+1); if (s != null && s.length() > 0) fieldValues[i] = s.charAt(0); } else fieldValues[i] = rs.getObject(i+1); } Object[] objects = new Object[query.tableInfos.size()]; for (int i=0; i<query.tableInfos.size(); ++i) { QueryImpl.TableInfo ti = query.tableInfos.get(i); if (ti.path == null) { if (next != null) continue; //System.out.println(ti.start +" "+ ti.end); next = (T) constructor.newInstance(selectedFields, fieldValues, ti.start, ti.end); next.__NOSCO_GOT_FROM_DATABASE= true; objects[i] = next; } else { FK fk = ti.path[ti.path.length-1]; Table fkv = fkConstructors.get(fk).newInstance(selectedFields, fieldValues, ti.start, ti.end); objects[i] = fkv; } } for (int i=0; i<query.tableInfos.size(); ++i) { QueryImpl.TableInfo ti = query.tableInfos.get(i); for (int j=i+1; j<query.tableInfos.size(); ++j) { QueryImpl.TableInfo tj = query.tableInfos.get(j); if (tj.path == null) continue; if(startsWith(tj.path, ti.path)) { FK fk = tj.path[tj.path.length-1]; if (!fk.referencing.equals(objects[i].getClass())) continue; if (!fk.referenced.equals(objects[j].getClass())) continue; fkSetMethods.get(objects[i].getClass()).invoke(objects[i], fk, objects[j]); } } } /* if (fkConstructors != null) { Set<Table> createdObjects = new HashSet<Table>(); for (Entry<Field.FK, Constructor<T>> e : fkConstructors.entrySet()) { FK fk = e.getKey(); Table fkv = e.getValue().newInstance(selectedFields, fieldValues); Table refTable = (Table)fk.REFERENCING_FIELDS()[0].TABLE.newInstance(); if (next.sameTable(refTable)) { fkSetMethods.get(next.getClass()).invoke(next, fk, fkv); createdObjects.add(fkv); } else { for (Table t : createdObjects) { if (t.sameTable(refTable)) { fkSetMethods.get(t.getClass()).invoke(t, fk, fkv); createdObjects.add(fkv); } } } } }//*/ } catch (SQLException e) { e.printStackTrace(); } catch (IllegalArgumentException e) { e.printStackTrace(); } catch (IllegalAccessException e) { e.printStackTrace(); } catch (InvocationTargetException e) { e.printStackTrace(); } catch (InstantiationException e) { e.printStackTrace(); } boolean hasNext = next != null; if (!hasNext) cleanUp(); return hasNext; }
diff --git a/src/org/jruby/RubyMatchData.java b/src/org/jruby/RubyMatchData.java index 425e04074..e86a7b01c 100644 --- a/src/org/jruby/RubyMatchData.java +++ b/src/org/jruby/RubyMatchData.java @@ -1,342 +1,342 @@ /***** BEGIN LICENSE BLOCK ***** * Version: CPL 1.0/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Common Public * License Version 1.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.eclipse.org/legal/cpl-v10.html * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * Copyright (C) 2001 Alan Moore <[email protected]> * Copyright (C) 2001-2004 Jan Arne Petersen <[email protected]> * Copyright (C) 2002 Benoit Cerrina <[email protected]> * Copyright (C) 2002-2004 Anders Bengtsson <[email protected]> * Copyright (C) 2004 Thomas E Enebo <[email protected]> * Copyright (C) 2004 Charles O Nutter <[email protected]> * Copyright (C) 2004 Stefan Matthias Aust <[email protected]> * * Alternatively, the contents of this file may be used under the terms of * either of the GNU General Public License Version 2 or later (the "GPL"), * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the CPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the CPL, the GPL or the LGPL. ***** END LICENSE BLOCK *****/ package org.jruby; import org.joni.Regex; import org.joni.Region; import org.joni.exception.JOniException; import org.jruby.anno.JRubyMethod; import org.jruby.anno.JRubyClass; import org.jruby.runtime.Block; import org.jruby.runtime.ObjectAllocator; import org.jruby.runtime.ThreadContext; import org.jruby.runtime.builtin.IRubyObject; import org.jruby.util.ByteList; /** * @author olabini */ @JRubyClass(name="MatchData") public class RubyMatchData extends RubyObject { Region regs; // captures int begin; // begin and end are used when not groups defined int end; RubyString str; Regex pattern; public static RubyClass createMatchDataClass(Ruby runtime) { // TODO: Is NOT_ALLOCATABLE_ALLOCATOR ok here, since you can't actually instantiate MatchData directly? RubyClass matchDataClass = runtime.defineClass("MatchData", runtime.getObject(), ObjectAllocator.NOT_ALLOCATABLE_ALLOCATOR); runtime.setMatchData(matchDataClass); runtime.defineGlobalConstant("MatchingData", matchDataClass); matchDataClass.kindOf = new RubyModule.KindOf() { public boolean isKindOf(IRubyObject obj, RubyModule type) { return obj instanceof RubyMatchData; } }; matchDataClass.getMetaClass().undefineMethod("new"); matchDataClass.defineAnnotatedMethods(RubyMatchData.class); return matchDataClass; } public RubyMatchData(Ruby runtime) { super(runtime, runtime.getMatchData()); } public final static int MATCH_BUSY = USER2_F; // rb_match_busy public final void use() { flags |= MATCH_BUSY; } public final boolean used() { return (flags & MATCH_BUSY) != 0; } private RubyArray match_array(int start) { if (regs == null) { if (start != 0) return getRuntime().newEmptyArray(); if (begin == -1) { return getRuntime().newArray(getRuntime().getNil()); } else { RubyString ss = str.makeShared(begin, end - begin); if (isTaint()) ss.setTaint(true); return getRuntime().newArray(ss); } } else { RubyArray arr = getRuntime().newArray(regs.numRegs - start); for (int i=start; i<regs.numRegs; i++) { if (regs.beg[i] == -1) { arr.append(getRuntime().getNil()); } else { RubyString ss = str.makeShared(regs.beg[i], regs.end[i] - regs.beg[i]); if (isTaint()) ss.setTaint(true); arr.append(ss); } } return arr; } } public IRubyObject group(long n) { return RubyRegexp.nth_match((int)n, this); } public IRubyObject group(int n) { return RubyRegexp.nth_match(n, this); } @JRubyMethod(name = "inspect") public IRubyObject inspect() { return anyToString(); } /** match_to_a * */ @JRubyMethod(name = "to_a") public RubyArray to_a() { return match_array(0); } @JRubyMethod(name = "values_at", required = 1, rest = true) public IRubyObject values_at(IRubyObject[] args) { return to_a().values_at(args); } @JRubyMethod(name = "select", frame = true) public IRubyObject select(ThreadContext context, Block block) { return block.yield(context, to_a()); } /** match_captures * */ @JRubyMethod(name = "captures") public IRubyObject captures() { return match_array(1); } private int nameToBackrefNumber(RubyString str) { ByteList value = str.getByteList(); try { return pattern.nameToBackrefNumber(value.bytes, value.begin, value.begin + value.realSize, regs); } catch (JOniException je) { throw getRuntime().newIndexError(je.getMessage()); } } /** match_aref * */ @JRubyMethod(name = "[]", required = 1, optional = 1) public IRubyObject op_aref(IRubyObject[] args) { final IRubyObject rest = args.length == 2 ? args[1] : null; final IRubyObject idx = args[0]; if (rest == null || rest.isNil()) { if (idx instanceof RubyFixnum) { int num = RubyNumeric.fix2int(idx); if (num >= 0) return RubyRegexp.nth_match(num, this); } else { RubyString str; if (idx instanceof RubySymbol) { str = (RubyString)((RubySymbol)idx).id2name(); } else if (idx instanceof RubyString) { str = (RubyString)idx; } else { switch(args.length) { case 1: return ((RubyArray)to_a()).aref(args[0]); case 2: - return ((RubyArray)to_a()).aref(args[1]); + return ((RubyArray)to_a()).aref(args[0], args[1]); default: // Can't happen throw new IllegalArgumentException(); } } return RubyRegexp.nth_match(nameToBackrefNumber(str), this); } } switch(args.length) { case 1: return ((RubyArray)to_a()).aref(args[0]); case 2: - return ((RubyArray)to_a()).aref(args[1]); + return ((RubyArray)to_a()).aref(args[0], args[1]); default: // Can't happen throw new IllegalArgumentException(); } } /** match_size * */ @JRubyMethod(name = {"size", "length"}) public IRubyObject size() { return regs == null ? RubyFixnum.one(getRuntime()) : RubyFixnum.newFixnum(getRuntime(), regs.numRegs); } /** match_begin * */ @JRubyMethod(name = "begin", required = 1) public IRubyObject begin(IRubyObject index) { int i = RubyNumeric.num2int(index); if (regs == null) { if (i != 0) throw getRuntime().newIndexError("index " + i + " out of matches"); if (begin < 0) return getRuntime().getNil(); return RubyFixnum.newFixnum(getRuntime(), begin); } else { if (i < 0 || regs.numRegs <= i) throw getRuntime().newIndexError("index " + i + " out of matches"); if (regs.beg[i] < 0) return getRuntime().getNil(); return RubyFixnum.newFixnum(getRuntime(), regs.beg[i]); } } /** match_end * */ @JRubyMethod(name = "end", required = 1) public IRubyObject end(IRubyObject index) { int i = RubyNumeric.num2int(index); if (regs == null) { if (i != 0) throw getRuntime().newIndexError("index " + i + " out of matches"); if (end < 0) return getRuntime().getNil(); return RubyFixnum.newFixnum(getRuntime(), end); } else { if (i < 0 || regs.numRegs <= i) throw getRuntime().newIndexError("index " + i + " out of matches"); if (regs.end[i] < 0) return getRuntime().getNil(); return RubyFixnum.newFixnum(getRuntime(), regs.end[i]); } } /** match_offset * */ @JRubyMethod(name = "offset", required = 1) public IRubyObject offset(IRubyObject index) { int i = RubyNumeric.num2int(index); Ruby runtime = getRuntime(); if (regs == null) { if (i != 0) throw getRuntime().newIndexError("index " + i + " out of matches"); if (begin < 0) return runtime.newArray(runtime.getNil(), runtime.getNil()); return runtime.newArray(RubyFixnum.newFixnum(runtime, begin),RubyFixnum.newFixnum(runtime, end)); } else { if (i < 0 || regs.numRegs <= i) throw runtime.newIndexError("index " + i + " out of matches"); if (regs.beg[i] < 0) return runtime.newArray(runtime.getNil(), runtime.getNil()); return runtime.newArray(RubyFixnum.newFixnum(runtime, regs.beg[i]),RubyFixnum.newFixnum(runtime, regs.end[i])); } } /** match_pre_match * */ @JRubyMethod(name = "pre_match") public IRubyObject pre_match() { RubyString ss; if (regs == null) { if(begin == -1) return getRuntime().getNil(); ss = str.makeShared(0, begin); } else { if(regs.beg[0] == -1) return getRuntime().getNil(); ss = str.makeShared(0, regs.beg[0]); } if (isTaint()) ss.setTaint(true); return ss; } /** match_post_match * */ @JRubyMethod(name = "post_match") public IRubyObject post_match() { RubyString ss; if (regs == null) { if (begin == -1) return getRuntime().getNil(); ss = str.makeShared(end, str.getByteList().length() - end); } else { if (regs.beg[0] == -1) return getRuntime().getNil(); ss = str.makeShared(regs.end[0], str.getByteList().length() - regs.end[0]); } if(isTaint()) ss.setTaint(true); return ss; } /** match_to_s * */ @JRubyMethod(name = "to_s") public IRubyObject to_s() { IRubyObject ss = RubyRegexp.last_match(this); if (ss.isNil()) ss = RubyString.newEmptyString(getRuntime()); if (isTaint()) ss.setTaint(true); return ss; } /** match_string * */ @JRubyMethod(name = "string") public IRubyObject string() { return str; //str is frozen } @JRubyMethod(name = "initialize_copy", required = 1) public IRubyObject initialize_copy(IRubyObject original) { if (this == original) return this; if (!(getMetaClass() == original.getMetaClass())){ // MRI also does a pointer comparison here throw getRuntime().newTypeError("wrong argument class"); } RubyMatchData origMatchData = (RubyMatchData)original; str = origMatchData.str; regs = origMatchData.regs; return this; } }
false
true
public IRubyObject op_aref(IRubyObject[] args) { final IRubyObject rest = args.length == 2 ? args[1] : null; final IRubyObject idx = args[0]; if (rest == null || rest.isNil()) { if (idx instanceof RubyFixnum) { int num = RubyNumeric.fix2int(idx); if (num >= 0) return RubyRegexp.nth_match(num, this); } else { RubyString str; if (idx instanceof RubySymbol) { str = (RubyString)((RubySymbol)idx).id2name(); } else if (idx instanceof RubyString) { str = (RubyString)idx; } else { switch(args.length) { case 1: return ((RubyArray)to_a()).aref(args[0]); case 2: return ((RubyArray)to_a()).aref(args[1]); default: // Can't happen throw new IllegalArgumentException(); } } return RubyRegexp.nth_match(nameToBackrefNumber(str), this); } } switch(args.length) { case 1: return ((RubyArray)to_a()).aref(args[0]); case 2: return ((RubyArray)to_a()).aref(args[1]); default: // Can't happen throw new IllegalArgumentException(); } }
public IRubyObject op_aref(IRubyObject[] args) { final IRubyObject rest = args.length == 2 ? args[1] : null; final IRubyObject idx = args[0]; if (rest == null || rest.isNil()) { if (idx instanceof RubyFixnum) { int num = RubyNumeric.fix2int(idx); if (num >= 0) return RubyRegexp.nth_match(num, this); } else { RubyString str; if (idx instanceof RubySymbol) { str = (RubyString)((RubySymbol)idx).id2name(); } else if (idx instanceof RubyString) { str = (RubyString)idx; } else { switch(args.length) { case 1: return ((RubyArray)to_a()).aref(args[0]); case 2: return ((RubyArray)to_a()).aref(args[0], args[1]); default: // Can't happen throw new IllegalArgumentException(); } } return RubyRegexp.nth_match(nameToBackrefNumber(str), this); } } switch(args.length) { case 1: return ((RubyArray)to_a()).aref(args[0]); case 2: return ((RubyArray)to_a()).aref(args[0], args[1]); default: // Can't happen throw new IllegalArgumentException(); } }
diff --git a/src/test/java/eu/wisebed/wiseml/test/LoadWriteWiseML.java b/src/test/java/eu/wisebed/wiseml/test/LoadWriteWiseML.java index 887672b..0fc7c7e 100644 --- a/src/test/java/eu/wisebed/wiseml/test/LoadWriteWiseML.java +++ b/src/test/java/eu/wisebed/wiseml/test/LoadWriteWiseML.java @@ -1,125 +1,125 @@ package eu.wisebed.wiseml.test; import eu.wisebed.wiseml.controller.WiseMLController; import eu.wisebed.wiseml.model.WiseML; import eu.wisebed.wiseml.model.scenario.Timestamp; import eu.wisebed.wiseml.model.setup.Link; import eu.wisebed.wiseml.model.setup.Node; import eu.wisebed.wiseml.model.setup.Setup; import eu.wisebed.wiseml.model.trace.Trace; import org.jibx.runtime.BindingDirectory; import org.jibx.runtime.IBindingFactory; import org.jibx.runtime.IMarshallingContext; import org.jibx.runtime.JiBXException; import java.io.ByteArrayOutputStream; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.util.List; public class LoadWriteWiseML { public void testWriteToFile() { WiseML wiseml = new WiseML(); Setup setup = new Setup(); setup.setInterpolation("cubic"); setup.setDescription("this is an example WiseML with all the elements."); wiseml.setSetup(setup); try { // marshal object back out to file (with nice indentation, as UTF-8)... IBindingFactory bfact = BindingDirectory.getFactory(Setup.class); IMarshallingContext mctx = bfact.createMarshallingContext(); mctx.setIndent(5); FileOutputStream output = new FileOutputStream("telosB_short.wiseml"); mctx.setOutput(output, null); mctx.marshalDocument(wiseml.getSetup()); } catch (FileNotFoundException e) { e.printStackTrace(); System.exit(1); } catch (JiBXException e) { e.printStackTrace(); System.exit(1); } } public void testWriteToCharArray() { WiseML wiseml = new WiseML(); Setup setup = new Setup(); setup.setInterpolation("cubic"); setup.setDescription("this is an example WiseML with all the elements."); wiseml.setSetup(setup); try { // marshal object back out to file (with nice indentation, as UTF-8)... IBindingFactory bfact = BindingDirectory.getFactory(Setup.class); IMarshallingContext mctx = bfact.createMarshallingContext(); mctx.setIndent(5); // initialize the output stream final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); mctx.setOutput(buffer, null); mctx.marshalDocument(wiseml.getSetup()); System.out.println(buffer.toString()); } catch (Exception e) { e.printStackTrace(); System.exit(1); } } public void doAnotherTest() throws FileNotFoundException { FileInputStream fileML = null; try { - fileML = new FileInputStream("/home/evangelos/workspace/wiseml/src/test/resources/telosB_short.xml"); + fileML = new FileInputStream("C:\\wiseml\\telosB_short.wiseml"); } catch(Exception e){ System.err.println(e); } WiseMLController wmlcontroller = new WiseMLController(); WiseML wml = wmlcontroller.loadWiseMLFromFile(fileML); Trace theTrace = wml.getTrace(); List traceItems = theTrace.getChildren(); System.out.println("Timestamp Size:"+traceItems.size()); for(Object item : traceItems) { if (item.getClass().equals(Timestamp.class)){ Timestamp ts=(Timestamp) item; System.out.println("Timestamp"+ts.getValue()); }else if (item.getClass().equals(Node.class)){ Node nd=(Node) item; System.out.println("Node"+nd.getId()); }else if (item.getClass().equals(Link.class)){ Link ln=(Link) item; System.out.println("Link"+ln.getSource()+"-->"+ln.getTarget()); System.out.println("Link"+ln.getRssi().getValue()); } } } public static void main(String[] args) { LoadWriteWiseML testMe = new LoadWriteWiseML(); // testMe.testWriteToFile(); // testMe.testWriteToCharArray(); try { testMe.doAnotherTest(); } catch (FileNotFoundException e) { e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. } } }
true
true
public void doAnotherTest() throws FileNotFoundException { FileInputStream fileML = null; try { fileML = new FileInputStream("/home/evangelos/workspace/wiseml/src/test/resources/telosB_short.xml"); } catch(Exception e){ System.err.println(e); } WiseMLController wmlcontroller = new WiseMLController(); WiseML wml = wmlcontroller.loadWiseMLFromFile(fileML); Trace theTrace = wml.getTrace(); List traceItems = theTrace.getChildren(); System.out.println("Timestamp Size:"+traceItems.size()); for(Object item : traceItems) { if (item.getClass().equals(Timestamp.class)){ Timestamp ts=(Timestamp) item; System.out.println("Timestamp"+ts.getValue()); }else if (item.getClass().equals(Node.class)){ Node nd=(Node) item; System.out.println("Node"+nd.getId()); }else if (item.getClass().equals(Link.class)){ Link ln=(Link) item; System.out.println("Link"+ln.getSource()+"-->"+ln.getTarget()); System.out.println("Link"+ln.getRssi().getValue()); } } }
public void doAnotherTest() throws FileNotFoundException { FileInputStream fileML = null; try { fileML = new FileInputStream("C:\\wiseml\\telosB_short.wiseml"); } catch(Exception e){ System.err.println(e); } WiseMLController wmlcontroller = new WiseMLController(); WiseML wml = wmlcontroller.loadWiseMLFromFile(fileML); Trace theTrace = wml.getTrace(); List traceItems = theTrace.getChildren(); System.out.println("Timestamp Size:"+traceItems.size()); for(Object item : traceItems) { if (item.getClass().equals(Timestamp.class)){ Timestamp ts=(Timestamp) item; System.out.println("Timestamp"+ts.getValue()); }else if (item.getClass().equals(Node.class)){ Node nd=(Node) item; System.out.println("Node"+nd.getId()); }else if (item.getClass().equals(Link.class)){ Link ln=(Link) item; System.out.println("Link"+ln.getSource()+"-->"+ln.getTarget()); System.out.println("Link"+ln.getRssi().getValue()); } } }
diff --git a/component/webui/src/main/java/org/exoplatform/platform/webui/navigation/TreeNode.java b/component/webui/src/main/java/org/exoplatform/platform/webui/navigation/TreeNode.java index 4fc0c2fb53..8caebaf4ee 100644 --- a/component/webui/src/main/java/org/exoplatform/platform/webui/navigation/TreeNode.java +++ b/component/webui/src/main/java/org/exoplatform/platform/webui/navigation/TreeNode.java @@ -1,393 +1,394 @@ package org.exoplatform.platform.webui.navigation; import org.exoplatform.portal.config.model.LocalizedValue; import org.exoplatform.portal.mop.Described; import org.exoplatform.portal.mop.Described.State; import org.exoplatform.portal.mop.Visibility; import org.exoplatform.portal.mop.navigation.NodeChangeListener; import org.exoplatform.portal.mop.navigation.NodeState; import org.exoplatform.portal.mop.user.UserNavigation; import org.exoplatform.portal.mop.user.UserNode; import org.exoplatform.portal.webui.util.Util; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Map; /** * A wrapper class of {@link UserNode} for manipulation in WebUI part * * @author <a href="mailto:[email protected]">Trong Tran</a> * @version $Revision$ */ public class TreeNode implements NodeChangeListener<UserNode> { private Map<String, TreeNode> caches; private UserNavigation nav; private UserNode node; private TreeNode rootNode; private boolean deleteNode = false; private boolean cloneNode = false; private String id; private List<TreeNode> children; private Map<Locale, State> i18nizedLabels; public TreeNode(UserNavigation nav, UserNode node) { this(nav, node, null); this.rootNode = this; this.caches = new HashMap<String, TreeNode>(); addToCached(this); } private TreeNode(UserNavigation nav, UserNode node, TreeNode rootNode) { this.rootNode = rootNode; this.nav = nav; this.node = node; } public List<TreeNode> getChildren() { if (children == null) { children = new LinkedList<TreeNode>(); for (UserNode child : node.getChildren()) { String key = child.getId() == null ? String.valueOf(child.hashCode()) : child.getId(); TreeNode node = findNode(key); if (node == null) { throw new IllegalStateException("Can' find node " + child.getURI() + " in the cache"); } children.add(node); } } return children; } public TreeNode getChild(String name) { UserNode child = node.getChild(name); if (child == null) { return null; } return findNode(child.getId() == null ? String.valueOf(child.hashCode()) : child.getId()); } public boolean removeChild(TreeNode child) { children = null; if (child == null) { return false; } removeFromCached(child); return node.removeChild(child.getName()); } public TreeNode getParent() { UserNode parent = node.getParent(); if (parent == null) return null; return findNode(parent.getId() == null ? String.valueOf(parent.hashCode()) : parent.getId()); } public TreeNode getChild(int childIndex) throws IndexOutOfBoundsException { UserNode child = node.getChild(childIndex); if (child == null) { return null; } return findNode(child.getId() == null ? String.valueOf(child.hashCode()) : child.getId()); } public TreeNode addChild(String childName) { children = null; UserNode child = node.addChild(childName); return addToCached(new TreeNode(nav, child, this.rootNode)); } public void addChild(TreeNode child) { TreeNode oldParent = child.getParent(); if (oldParent != null) { oldParent.children = null; } children = null; this.node.addChild(child.getNode()); } public void addChild(int index, TreeNode child) { TreeNode oldParent = child.getParent(); if (oldParent != null) { oldParent.children = null; } children = null; node.addChild(index, child.getNode()); } public TreeNode findNode(String nodeID) { return this.rootNode.caches.get(nodeID); } public UserNode getNode() { return node; } public UserNavigation getPageNavigation() { return nav; } public boolean isDeleteNode() { return deleteNode; } public void setDeleteNode(boolean deleteNode) { this.deleteNode = deleteNode; } public boolean isCloneNode() { return cloneNode; } public void setCloneNode(boolean b) { cloneNode = b; } public String getPageRef() { return node.getPageRef(); } public String getId() { if (this.id == null) { this.id = node.getId() == null ? String.valueOf(node.hashCode()) : node.getId(); } return this.id; } public String getURI() { return node.getURI(); } public String getIcon() { return node.getIcon(); } public void setIcon(String icon) { node.setIcon(icon); } public String getEncodedResolvedLabel() { if (getLabel() == null) { if (i18nizedLabels != null) { Locale locale = Util.getPortalRequestContext().getLocale(); for (Locale key : i18nizedLabels.keySet()) { if (key.equals(locale)) { - return i18nizedLabels.get(key).getName(); + String encodedLabel = i18nizedLabels.get(key).getName(); + return encodedLabel == null ? getName() : encodedLabel; } } } } String encodedLabel = node.getEncodedResolvedLabel(); - return encodedLabel == null ? "" : encodedLabel; + return encodedLabel == null ? getName() : encodedLabel; } public String getName() { return node.getName(); } public void setName(String name) { node.setName(name); } public String getLabel() { return node.getLabel(); } public void setLabel(String label) { node.setLabel(label); } public Visibility getVisibility() { return node.getVisibility(); } public void setVisibility(Visibility visibility) { node.setVisibility(visibility); } public long getStartPublicationTime() { return node.getStartPublicationTime(); } public void setStartPublicationTime(long startPublicationTime) { node.setStartPublicationTime(startPublicationTime); } public long getEndPublicationTime() { return node.getEndPublicationTime(); } public void setEndPublicationTime(long endPublicationTime) { node.setEndPublicationTime(endPublicationTime); } public void setPageRef(String pageRef) { node.setPageRef(pageRef); } public String getResolvedLabel() { String resolvedLabel = node.getResolvedLabel(); return resolvedLabel == null ? "" : resolvedLabel; } public boolean hasChildrenRelationship() { return node.hasChildrenRelationship(); } public int getChildrenCount() { return node.getChildrenCount(); } private TreeNode addToCached(TreeNode node) { if (node == null) { return null; } if (findNode(node.getId()) != null) { return node; } this.rootNode.caches.put(node.getId(), node); for (UserNode child : node.getNode().getChildren()) { addToCached(new TreeNode(nav, child, this.rootNode)); } return node; } private TreeNode removeFromCached(TreeNode node) { if (node == null) { return null; } this.rootNode.caches.remove(node.getId()); if (node.hasChildrenRelationship()) { for (TreeNode child : node.getChildren()) { removeFromCached(child); } } return node; } @Override public void onAdd(UserNode target, UserNode parent, UserNode previous) { addToCached(new TreeNode(this.nav, target, this.rootNode)); findNode(parent.getId()).children = null; } @Override public void onCreate(UserNode target, UserNode parent, UserNode previous, String name) { } @Override public void onRemove(UserNode target, UserNode parent) { removeFromCached(findNode(target.getId())); findNode(parent.getId()).children = null; } @Override public void onDestroy(UserNode target, UserNode parent) { } @Override public void onRename(UserNode target, UserNode parent, String name) { } @Override public void onUpdate(UserNode target, NodeState state) { } @Override public void onMove(UserNode target, UserNode from, UserNode to, UserNode previous) { TreeNode fromTreeNode = findNode(from.getId()); TreeNode toTreeNode = findNode(to.getId()); fromTreeNode.children = null; toTreeNode.children = null; } public void setI18nizedLabels(Map<Locale, State> labels) { this.i18nizedLabels = labels; } public Map<Locale, State> getI18nizedLabels() { return i18nizedLabels; } }
false
true
public String getEncodedResolvedLabel() { if (getLabel() == null) { if (i18nizedLabels != null) { Locale locale = Util.getPortalRequestContext().getLocale(); for (Locale key : i18nizedLabels.keySet()) { if (key.equals(locale)) { return i18nizedLabels.get(key).getName(); } } } } String encodedLabel = node.getEncodedResolvedLabel(); return encodedLabel == null ? "" : encodedLabel; }
public String getEncodedResolvedLabel() { if (getLabel() == null) { if (i18nizedLabels != null) { Locale locale = Util.getPortalRequestContext().getLocale(); for (Locale key : i18nizedLabels.keySet()) { if (key.equals(locale)) { String encodedLabel = i18nizedLabels.get(key).getName(); return encodedLabel == null ? getName() : encodedLabel; } } } } String encodedLabel = node.getEncodedResolvedLabel(); return encodedLabel == null ? getName() : encodedLabel; }
diff --git a/src/org/jagatoo/loaders/models/collada/stax/XMLFloat.java b/src/org/jagatoo/loaders/models/collada/stax/XMLFloat.java index b22e139..8912a25 100644 --- a/src/org/jagatoo/loaders/models/collada/stax/XMLFloat.java +++ b/src/org/jagatoo/loaders/models/collada/stax/XMLFloat.java @@ -1,67 +1,70 @@ /** * Copyright (c) 2007-2008, JAGaToo Project Group all rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the 'Xith3D Project Group' nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) A * RISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE */ package org.jagatoo.loaders.models.collada.stax; import javax.xml.stream.XMLStreamConstants; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; /** * A float element. * Used in ShadingParameters. * * @author Amos Wenger (aka BlueSky) * @author Joe LaFata (aka qbproger) */ public class XMLFloat { public float _float; public void parse( XMLStreamReader parser, String endTag ) throws XMLStreamException { for ( int event = parser.next(); event != XMLStreamConstants.END_DOCUMENT; event = parser.next() ) { switch ( event ) { - case XMLStreamConstants.CHARACTERS: + case XMLStreamConstants.START_ELEMENT: { - _float = Float.parseFloat( parser.getText() ); + if ( parser.getLocalName().equals( "float" ) ) + { + _float = Float.parseFloat( parser.getElementText() ); + } break; } case XMLStreamConstants.END_ELEMENT: { if ( parser.getLocalName().equals( endTag ) ) return; break; } } } } }
false
true
public void parse( XMLStreamReader parser, String endTag ) throws XMLStreamException { for ( int event = parser.next(); event != XMLStreamConstants.END_DOCUMENT; event = parser.next() ) { switch ( event ) { case XMLStreamConstants.CHARACTERS: { _float = Float.parseFloat( parser.getText() ); break; } case XMLStreamConstants.END_ELEMENT: { if ( parser.getLocalName().equals( endTag ) ) return; break; } } } }
public void parse( XMLStreamReader parser, String endTag ) throws XMLStreamException { for ( int event = parser.next(); event != XMLStreamConstants.END_DOCUMENT; event = parser.next() ) { switch ( event ) { case XMLStreamConstants.START_ELEMENT: { if ( parser.getLocalName().equals( "float" ) ) { _float = Float.parseFloat( parser.getElementText() ); } break; } case XMLStreamConstants.END_ELEMENT: { if ( parser.getLocalName().equals( endTag ) ) return; break; } } } }
diff --git a/src/Tests/unitTests/utils/TestSleeper.java b/src/Tests/unitTests/utils/TestSleeper.java index b39fc52db..1e74d3a13 100644 --- a/src/Tests/unitTests/utils/TestSleeper.java +++ b/src/Tests/unitTests/utils/TestSleeper.java @@ -1,45 +1,48 @@ package unitTests.utils; import junit.framework.Assert; import org.junit.Test; import org.objectweb.proactive.core.util.Sleeper; import unitTests.UnitTests; public class TestSleeper extends UnitTests { static final long SLEEP_TIME = 1000; @Test public void test() { Sleeper sleeper = new Sleeper(SLEEP_TIME); T t = new T(Thread.currentThread()); Thread thread = new Thread(t); thread.setDaemon(true); thread.start(); long before = System.currentTimeMillis(); sleeper.sleep(); long after = System.currentTimeMillis(); logger.info("Spleeped " + (after - before) + " expected " + SLEEP_TIME); - Assert.assertTrue(after - before >= SLEEP_TIME); + // -1 is here because System.nanoTime() is more accurate + // than System.currentTimeMillis(). Rouding errors can leads to + // after - before == SLEEP_TIME - 1 + Assert.assertTrue(after - before >= SLEEP_TIME - 1); } private class T implements Runnable { private Thread sleeper; public T(Thread sleeper) { this.sleeper = sleeper; } public void run() { while (true) { this.sleeper.interrupt(); } } } }
true
true
public void test() { Sleeper sleeper = new Sleeper(SLEEP_TIME); T t = new T(Thread.currentThread()); Thread thread = new Thread(t); thread.setDaemon(true); thread.start(); long before = System.currentTimeMillis(); sleeper.sleep(); long after = System.currentTimeMillis(); logger.info("Spleeped " + (after - before) + " expected " + SLEEP_TIME); Assert.assertTrue(after - before >= SLEEP_TIME); }
public void test() { Sleeper sleeper = new Sleeper(SLEEP_TIME); T t = new T(Thread.currentThread()); Thread thread = new Thread(t); thread.setDaemon(true); thread.start(); long before = System.currentTimeMillis(); sleeper.sleep(); long after = System.currentTimeMillis(); logger.info("Spleeped " + (after - before) + " expected " + SLEEP_TIME); // -1 is here because System.nanoTime() is more accurate // than System.currentTimeMillis(). Rouding errors can leads to // after - before == SLEEP_TIME - 1 Assert.assertTrue(after - before >= SLEEP_TIME - 1); }
diff --git a/axis2/src/main/java/org/apache/ode/axis2/service/DeploymentWebService.java b/axis2/src/main/java/org/apache/ode/axis2/service/DeploymentWebService.java index 6232205d0..355f3fa9e 100644 --- a/axis2/src/main/java/org/apache/ode/axis2/service/DeploymentWebService.java +++ b/axis2/src/main/java/org/apache/ode/axis2/service/DeploymentWebService.java @@ -1,292 +1,292 @@ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.ode.axis2.service; import org.apache.axiom.om.OMAbstractFactory; import org.apache.axiom.om.OMElement; import org.apache.axiom.om.OMNamespace; import org.apache.axiom.om.OMText; import org.apache.axiom.soap.SOAPEnvelope; import org.apache.axiom.soap.SOAPFactory; import org.apache.axis2.AxisFault; import org.apache.axis2.context.MessageContext; import org.apache.axis2.description.AxisService; import org.apache.axis2.engine.AxisConfiguration; import org.apache.axis2.engine.AxisEngine; import org.apache.axis2.receivers.AbstractMessageReceiver; import org.apache.axis2.util.Utils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.ode.axis2.OdeFault; import org.apache.ode.axis2.deploy.DeploymentPoller; import org.apache.ode.axis2.hooks.ODEAxisService; import org.apache.ode.axis2.util.OMUtils; import org.apache.ode.bpel.iapi.BpelServer; import org.apache.ode.bpel.iapi.ProcessConf; import org.apache.ode.bpel.iapi.ProcessStore; import org.apache.ode.utils.fs.FileUtils; import javax.activation.DataHandler; import javax.wsdl.Definition; import javax.wsdl.WSDLException; import javax.wsdl.factory.WSDLFactory; import javax.wsdl.xml.WSDLReader; import javax.xml.namespace.QName; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.Collection; import java.util.List; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; /** * Axis wrapper for process deployment. */ public class DeploymentWebService { private static final Log __log = LogFactory.getLog(DeploymentWebService.class); private final OMNamespace _pmapi; private File _deployPath; private DeploymentPoller _poller; private ProcessStore _store; public DeploymentWebService() { _pmapi = OMAbstractFactory.getOMFactory().createOMNamespace("http://www.apache.org/ode/pmapi","pmapi"); } public void enableService(AxisConfiguration axisConfig, BpelServer server, ProcessStore store, DeploymentPoller poller, String rootpath, String workPath) { _deployPath = new File(workPath, "processes"); _store = store; Definition def; try { WSDLReader wsdlReader = WSDLFactory.newInstance().newWSDLReader(); wsdlReader.setFeature("javax.wsdl.verbose", false); File wsdlFile = new File(rootpath + "/deploy.wsdl"); def = wsdlReader.readWSDL(wsdlFile.toURI().toString()); AxisService deployService = ODEAxisService.createService( axisConfig, new QName("http://www.apache.org/ode/deployapi", "DeploymentService"), "DeploymentPort", "DeploymentService", def, new DeploymentMessageReceiver()); axisConfig.addService(deployService); _poller = poller; } catch (WSDLException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } } class DeploymentMessageReceiver extends AbstractMessageReceiver { public void invokeBusinessLogic(MessageContext messageContext) throws AxisFault { String operation = messageContext.getAxisOperation().getName().getLocalPart(); SOAPFactory factory = getSOAPFactory(messageContext); boolean unknown = false; try { if (operation.equals("deploy")) { OMElement namePart = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); OMElement zipPart = (OMElement) namePart.getNextOMSibling(); - OMElement zip = zipPart.getFirstElement(); - if (!zipPart.getQName().getLocalPart().equals("package") || - !zip.getQName().getLocalPart().equals("zip")) - throw new OdeFault("Your message should contain a part named 'package' with a zip element"); + OMElement zip = (zipPart == null) ? null : zipPart.getFirstElement(); + if (zip == null || !zipPart.getQName().getLocalPart().equals("package") + || !zip.getQName().getLocalPart().equals("zip")) + throw new OdeFault("Your message should contain an element named 'package' with a 'zip' element"); OMText binaryNode = (OMText) zip.getFirstOMChild(); if (binaryNode == null) { throw new OdeFault("Empty binary node under <zip> element"); } binaryNode.setOptimize(true); try { // We're going to create a directory under the deployment root and put // files in there. The poller shouldn't pick them up so we're asking // it to hold on for a while. _poller.hold(); File dest = new File(_deployPath, namePart.getText() + "-" + _store.getCurrentVersion()); dest.mkdir(); unzip(dest, (DataHandler) binaryNode.getDataHandler()); // Check that we have a deploy.xml File deployXml = new File(dest, "deploy.xml"); if (!deployXml.exists()) throw new OdeFault("The deployment doesn't appear to contain a deployment " + "descriptor in its root directory named deploy.xml, aborting."); Collection<QName> deployed = _store.deploy(dest); File deployedMarker = new File(_deployPath, dest.getName() + ".deployed"); deployedMarker.createNewFile(); // Telling the poller what we deployed so that it doesn't try to deploy it again _poller.markAsDeployed(dest); __log.info("Deployment of artifact " + dest.getName() + " successful."); OMElement response = factory.createOMElement("response", null); if (__log.isDebugEnabled()) __log.debug("Deployed package: "+dest.getName()); OMElement d = factory.createOMElement("name", null); d.setText(dest.getName()); response.addChild(d); for (QName pid : deployed) { if (__log.isDebugEnabled()) __log.debug("Deployed PID: "+pid); d = factory.createOMElement("id", null); d.setText(pid); response.addChild(d); } sendResponse(factory, messageContext, "deployResponse", response); } finally { _poller.release(); } } else if (operation.equals("undeploy")) { OMElement part = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); String pkg = part.getText(); File deploymentDir = new File(_deployPath, pkg); if (!deploymentDir.exists()) throw new OdeFault("Couldn't find deployment package " + pkg + " in directory " + _deployPath); try { // We're going to create a directory under the deployment root and put // files in there. The poller shouldn't pick them up so we're asking // it to hold on for a while. _poller.hold(); Collection<QName> undeployed = _store.undeploy(deploymentDir); File deployedMarker = new File(_deployPath, pkg + ".deployed"); deployedMarker.delete(); FileUtils.deepDelete(new File(_deployPath, pkg)); OMElement response = factory.createOMElement("response", null); response.setText("" + (undeployed.size() > 0)); sendResponse(factory, messageContext, "undeployResponse", response); _poller.markAsUndeployed(deploymentDir); } finally { _poller.release(); } } else if (operation.equals("listDeployedPackages")) { Collection<String> packageNames = _store.getPackages(); OMElement response = factory.createOMElement("deployedPackages", null); for (String name : packageNames) { OMElement nameElmt = factory.createOMElement("name", null); nameElmt.setText(name); response.addChild(nameElmt); } sendResponse(factory, messageContext, "listDeployedPackagesResponse", response); } else if (operation.equals("listProcesses")) { OMElement namePart = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); List<QName> processIds = _store.listProcesses(namePart.getText()); OMElement response = factory.createOMElement("processIds", null); for (QName qname : processIds) { OMElement nameElmt = factory.createOMElement("id", null); nameElmt.setText(qname); response.addChild(nameElmt); } sendResponse(factory, messageContext, "listProcessResponse", response); } else if (operation.equals("getProcessPackage")) { OMElement qnamePart = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); ProcessConf process = _store.getProcessConfiguration(OMUtils.getTextAsQName(qnamePart)); if (process == null) { throw new OdeFault("Could not find process: " + qnamePart.getTextAsQName()); } String packageName = _store.getProcessConfiguration(OMUtils.getTextAsQName(qnamePart)).getPackage(); OMElement response = factory.createOMElement("packageName", null); response.setText(packageName); sendResponse(factory, messageContext, "getProcessPackageResponse", response); } else unknown = true; } catch (Throwable t) { // Trying to extract a meaningful message Throwable source = t; while (source.getCause() != null && source.getCause() != source) source = source.getCause(); __log.warn("Invocation of operation " + operation + " failed", t); throw new OdeFault("Invocation of operation " + operation + " failed: " + source.toString(), t); } if (unknown) throw new OdeFault("Unknown operation: '" + messageContext.getAxisOperation().getName() + "'"); } private File buildUnusedDir(File deployPath, String dirName) { int v = 1; while (new File(deployPath, dirName + "-" + v).exists()) v++; return new File(deployPath, dirName + "-" + v); } private void unzip(File dest, DataHandler dataHandler) throws AxisFault { try { ZipInputStream zis = new ZipInputStream(dataHandler.getDataSource().getInputStream()); ZipEntry entry; // Processing the package while((entry = zis.getNextEntry()) != null) { if(entry.isDirectory()) { __log.debug("Extracting directory: " + entry.getName()); new File(dest, entry.getName()).mkdir(); continue; } __log.debug("Extracting file: " + entry.getName()); File destFile = new File(dest, entry.getName()); if (!destFile.getParentFile().exists()) destFile.getParentFile().mkdirs(); copyInputStream(zis, new BufferedOutputStream( new FileOutputStream(destFile))); } zis.close(); } catch (IOException e) { throw new OdeFault("An error occured on deployment.", e); } } private void sendResponse(SOAPFactory factory, MessageContext messageContext, String op, OMElement response) throws AxisFault { MessageContext outMsgContext = Utils.createOutMessageContext(messageContext); outMsgContext.getOperationContext().addMessageContext(outMsgContext); SOAPEnvelope envelope = factory.getDefaultEnvelope(); outMsgContext.setEnvelope(envelope); OMElement responseOp = factory.createOMElement(op, _pmapi); responseOp.addChild(response); envelope.getBody().addChild(response); AxisEngine engine = new AxisEngine( messageContext.getOperationContext().getServiceContext().getConfigurationContext()); engine.send(outMsgContext); } } private static void copyInputStream(InputStream in, OutputStream out) throws IOException { byte[] buffer = new byte[1024]; int len; while((len = in.read(buffer)) >= 0) out.write(buffer, 0, len); out.close(); } }
true
true
public void invokeBusinessLogic(MessageContext messageContext) throws AxisFault { String operation = messageContext.getAxisOperation().getName().getLocalPart(); SOAPFactory factory = getSOAPFactory(messageContext); boolean unknown = false; try { if (operation.equals("deploy")) { OMElement namePart = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); OMElement zipPart = (OMElement) namePart.getNextOMSibling(); OMElement zip = zipPart.getFirstElement(); if (!zipPart.getQName().getLocalPart().equals("package") || !zip.getQName().getLocalPart().equals("zip")) throw new OdeFault("Your message should contain a part named 'package' with a zip element"); OMText binaryNode = (OMText) zip.getFirstOMChild(); if (binaryNode == null) { throw new OdeFault("Empty binary node under <zip> element"); } binaryNode.setOptimize(true); try { // We're going to create a directory under the deployment root and put // files in there. The poller shouldn't pick them up so we're asking // it to hold on for a while. _poller.hold(); File dest = new File(_deployPath, namePart.getText() + "-" + _store.getCurrentVersion()); dest.mkdir(); unzip(dest, (DataHandler) binaryNode.getDataHandler()); // Check that we have a deploy.xml File deployXml = new File(dest, "deploy.xml"); if (!deployXml.exists()) throw new OdeFault("The deployment doesn't appear to contain a deployment " + "descriptor in its root directory named deploy.xml, aborting."); Collection<QName> deployed = _store.deploy(dest); File deployedMarker = new File(_deployPath, dest.getName() + ".deployed"); deployedMarker.createNewFile(); // Telling the poller what we deployed so that it doesn't try to deploy it again _poller.markAsDeployed(dest); __log.info("Deployment of artifact " + dest.getName() + " successful."); OMElement response = factory.createOMElement("response", null); if (__log.isDebugEnabled()) __log.debug("Deployed package: "+dest.getName()); OMElement d = factory.createOMElement("name", null); d.setText(dest.getName()); response.addChild(d); for (QName pid : deployed) { if (__log.isDebugEnabled()) __log.debug("Deployed PID: "+pid); d = factory.createOMElement("id", null); d.setText(pid); response.addChild(d); } sendResponse(factory, messageContext, "deployResponse", response); } finally { _poller.release(); } } else if (operation.equals("undeploy")) { OMElement part = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); String pkg = part.getText(); File deploymentDir = new File(_deployPath, pkg); if (!deploymentDir.exists()) throw new OdeFault("Couldn't find deployment package " + pkg + " in directory " + _deployPath); try { // We're going to create a directory under the deployment root and put // files in there. The poller shouldn't pick them up so we're asking // it to hold on for a while. _poller.hold(); Collection<QName> undeployed = _store.undeploy(deploymentDir); File deployedMarker = new File(_deployPath, pkg + ".deployed"); deployedMarker.delete(); FileUtils.deepDelete(new File(_deployPath, pkg)); OMElement response = factory.createOMElement("response", null); response.setText("" + (undeployed.size() > 0)); sendResponse(factory, messageContext, "undeployResponse", response); _poller.markAsUndeployed(deploymentDir); } finally { _poller.release(); } } else if (operation.equals("listDeployedPackages")) { Collection<String> packageNames = _store.getPackages(); OMElement response = factory.createOMElement("deployedPackages", null); for (String name : packageNames) { OMElement nameElmt = factory.createOMElement("name", null); nameElmt.setText(name); response.addChild(nameElmt); } sendResponse(factory, messageContext, "listDeployedPackagesResponse", response); } else if (operation.equals("listProcesses")) { OMElement namePart = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); List<QName> processIds = _store.listProcesses(namePart.getText()); OMElement response = factory.createOMElement("processIds", null); for (QName qname : processIds) { OMElement nameElmt = factory.createOMElement("id", null); nameElmt.setText(qname); response.addChild(nameElmt); } sendResponse(factory, messageContext, "listProcessResponse", response); } else if (operation.equals("getProcessPackage")) { OMElement qnamePart = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); ProcessConf process = _store.getProcessConfiguration(OMUtils.getTextAsQName(qnamePart)); if (process == null) { throw new OdeFault("Could not find process: " + qnamePart.getTextAsQName()); } String packageName = _store.getProcessConfiguration(OMUtils.getTextAsQName(qnamePart)).getPackage(); OMElement response = factory.createOMElement("packageName", null); response.setText(packageName); sendResponse(factory, messageContext, "getProcessPackageResponse", response); } else unknown = true; } catch (Throwable t) { // Trying to extract a meaningful message Throwable source = t; while (source.getCause() != null && source.getCause() != source) source = source.getCause(); __log.warn("Invocation of operation " + operation + " failed", t); throw new OdeFault("Invocation of operation " + operation + " failed: " + source.toString(), t); } if (unknown) throw new OdeFault("Unknown operation: '" + messageContext.getAxisOperation().getName() + "'"); }
public void invokeBusinessLogic(MessageContext messageContext) throws AxisFault { String operation = messageContext.getAxisOperation().getName().getLocalPart(); SOAPFactory factory = getSOAPFactory(messageContext); boolean unknown = false; try { if (operation.equals("deploy")) { OMElement namePart = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); OMElement zipPart = (OMElement) namePart.getNextOMSibling(); OMElement zip = (zipPart == null) ? null : zipPart.getFirstElement(); if (zip == null || !zipPart.getQName().getLocalPart().equals("package") || !zip.getQName().getLocalPart().equals("zip")) throw new OdeFault("Your message should contain an element named 'package' with a 'zip' element"); OMText binaryNode = (OMText) zip.getFirstOMChild(); if (binaryNode == null) { throw new OdeFault("Empty binary node under <zip> element"); } binaryNode.setOptimize(true); try { // We're going to create a directory under the deployment root and put // files in there. The poller shouldn't pick them up so we're asking // it to hold on for a while. _poller.hold(); File dest = new File(_deployPath, namePart.getText() + "-" + _store.getCurrentVersion()); dest.mkdir(); unzip(dest, (DataHandler) binaryNode.getDataHandler()); // Check that we have a deploy.xml File deployXml = new File(dest, "deploy.xml"); if (!deployXml.exists()) throw new OdeFault("The deployment doesn't appear to contain a deployment " + "descriptor in its root directory named deploy.xml, aborting."); Collection<QName> deployed = _store.deploy(dest); File deployedMarker = new File(_deployPath, dest.getName() + ".deployed"); deployedMarker.createNewFile(); // Telling the poller what we deployed so that it doesn't try to deploy it again _poller.markAsDeployed(dest); __log.info("Deployment of artifact " + dest.getName() + " successful."); OMElement response = factory.createOMElement("response", null); if (__log.isDebugEnabled()) __log.debug("Deployed package: "+dest.getName()); OMElement d = factory.createOMElement("name", null); d.setText(dest.getName()); response.addChild(d); for (QName pid : deployed) { if (__log.isDebugEnabled()) __log.debug("Deployed PID: "+pid); d = factory.createOMElement("id", null); d.setText(pid); response.addChild(d); } sendResponse(factory, messageContext, "deployResponse", response); } finally { _poller.release(); } } else if (operation.equals("undeploy")) { OMElement part = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); String pkg = part.getText(); File deploymentDir = new File(_deployPath, pkg); if (!deploymentDir.exists()) throw new OdeFault("Couldn't find deployment package " + pkg + " in directory " + _deployPath); try { // We're going to create a directory under the deployment root and put // files in there. The poller shouldn't pick them up so we're asking // it to hold on for a while. _poller.hold(); Collection<QName> undeployed = _store.undeploy(deploymentDir); File deployedMarker = new File(_deployPath, pkg + ".deployed"); deployedMarker.delete(); FileUtils.deepDelete(new File(_deployPath, pkg)); OMElement response = factory.createOMElement("response", null); response.setText("" + (undeployed.size() > 0)); sendResponse(factory, messageContext, "undeployResponse", response); _poller.markAsUndeployed(deploymentDir); } finally { _poller.release(); } } else if (operation.equals("listDeployedPackages")) { Collection<String> packageNames = _store.getPackages(); OMElement response = factory.createOMElement("deployedPackages", null); for (String name : packageNames) { OMElement nameElmt = factory.createOMElement("name", null); nameElmt.setText(name); response.addChild(nameElmt); } sendResponse(factory, messageContext, "listDeployedPackagesResponse", response); } else if (operation.equals("listProcesses")) { OMElement namePart = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); List<QName> processIds = _store.listProcesses(namePart.getText()); OMElement response = factory.createOMElement("processIds", null); for (QName qname : processIds) { OMElement nameElmt = factory.createOMElement("id", null); nameElmt.setText(qname); response.addChild(nameElmt); } sendResponse(factory, messageContext, "listProcessResponse", response); } else if (operation.equals("getProcessPackage")) { OMElement qnamePart = messageContext.getEnvelope().getBody().getFirstElement().getFirstElement(); ProcessConf process = _store.getProcessConfiguration(OMUtils.getTextAsQName(qnamePart)); if (process == null) { throw new OdeFault("Could not find process: " + qnamePart.getTextAsQName()); } String packageName = _store.getProcessConfiguration(OMUtils.getTextAsQName(qnamePart)).getPackage(); OMElement response = factory.createOMElement("packageName", null); response.setText(packageName); sendResponse(factory, messageContext, "getProcessPackageResponse", response); } else unknown = true; } catch (Throwable t) { // Trying to extract a meaningful message Throwable source = t; while (source.getCause() != null && source.getCause() != source) source = source.getCause(); __log.warn("Invocation of operation " + operation + " failed", t); throw new OdeFault("Invocation of operation " + operation + " failed: " + source.toString(), t); } if (unknown) throw new OdeFault("Unknown operation: '" + messageContext.getAxisOperation().getName() + "'"); }
diff --git a/src/com/quicinc/fmradio/FMRadioService.java b/src/com/quicinc/fmradio/FMRadioService.java index 8fda43a..5734ce7 100755 --- a/src/com/quicinc/fmradio/FMRadioService.java +++ b/src/com/quicinc/fmradio/FMRadioService.java @@ -1,2434 +1,2438 @@ /* * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Code Aurora nor * the names of its contributors may be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package com.quicinc.fmradio; import java.io.File; import java.io.IOException; import java.lang.ref.WeakReference; import android.app.Notification; import android.app.NotificationManager; import android.app.PendingIntent; import android.app.Service; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.content.BroadcastReceiver; import android.media.AudioManager; import android.media.AudioManager.OnAudioFocusChangeListener; import android.media.AudioSystem; import android.media.MediaRecorder; import android.os.Environment; import android.os.Handler; import android.os.IBinder; import android.os.Message; import android.os.PowerManager; import android.os.PowerManager.WakeLock; import android.os.RemoteException; import android.telephony.PhoneStateListener; import android.telephony.TelephonyManager; import android.util.Log; import android.widget.RemoteViews; import android.widget.Toast; import android.view.KeyEvent; import android.os.SystemProperties; import android.hardware.fmradio.FmReceiver; import android.hardware.fmradio.FmRxEvCallbacksAdaptor; import android.hardware.fmradio.FmRxRdsData; import android.hardware.fmradio.FmConfig; import android.net.Uri; import android.content.res.Resources; import java.util.Date; import java.text.SimpleDateFormat; import android.provider.MediaStore; import android.content.ContentResolver; import android.content.ContentValues; import android.database.Cursor; import com.quicinc.utils.A2dpDeviceStatus; /** * Provides "background" FM Radio (that uses the hardware) capabilities, * allowing the user to switch between activities without stopping playback. */ public class FMRadioService extends Service { public static final int RADIO_AUDIO_DEVICE_WIRED_HEADSET = 0; public static final int RADIO_AUDIO_DEVICE_SPEAKER = 1; private static final int FMRADIOSERVICE_STATUS = 101; private static final String FMRADIO_DEVICE_FD_STRING = "/dev/radio0"; private static final String LOGTAG = "FMService";//FMRadio.LOGTAG; private FmReceiver mReceiver; private BroadcastReceiver mHeadsetReceiver = null; private BroadcastReceiver mHeadsetHookListener = null; private BroadcastReceiver mSdcardUnmountReceiver = null; private BroadcastReceiver mMusicCommandListener = null; private boolean mOverA2DP = false; private IFMRadioServiceCallbacks mCallbacks; private static FmSharedPreferences mPrefs; private boolean mHeadsetPlugged = false; private boolean mInternalAntennaAvailable = false; private WakeLock mWakeLock; private int mServiceStartId = -1; private boolean mServiceInUse = false; private boolean mMuted = false; private boolean mResumeAfterCall = false; private static String mAudioDevice="headset"; MediaRecorder mRecorder = null; MediaRecorder mA2dp = null; private boolean mFMOn = false; private boolean mFmRecordingOn = false; private boolean mSpeakerPhoneOn = false; private static boolean mRadioState = true; private BroadcastReceiver mScreenOnOffReceiver = null; final Handler mHandler = new Handler(); private boolean misAnalogModeSupported = false; private boolean misAnalogPathEnabled = false; private boolean mA2dpDisconnected = false; //PhoneStateListener instances corresponding to each private FmRxRdsData mFMRxRDSData=null; // interval after which we stop the service when idle private static final int IDLE_DELAY = 60000; private File mA2DPSampleFile = null; //Track FM playback for reenter App usecases private boolean mPlaybackInProgress = false; private boolean mStoppedOnFocusLoss = false; private File mSampleFile = null; long mSampleStart = 0; // Messages handled in FM Service private static final int FM_STOP =1; private static final int RESET_NOTCH_FILTER =2; private static final int STOPSERVICE_ONSLEEP = 3; private static final int STOPRECORD_ONTIMEOUT = 4; private static final int FOCUSCHANGE = 5; //Track notch filter settings private boolean mNotchFilterSet = false; public static final int STOP_SERVICE = 0; public static final int STOP_RECORD = 1; // A2dp Device Status will be queried through this class A2dpDeviceStatus mA2dpDeviceState = null; //on shutdown not to send start Intent to AudioManager private boolean mAppShutdown = false; private boolean mSingleRecordingInstanceSupported = false; public FMRadioService() { } @Override public void onCreate() { super.onCreate(); mPrefs = new FmSharedPreferences(this); mCallbacks = null; TelephonyManager tmgr = (TelephonyManager) getSystemService(Context.TELEPHONY_SERVICE); tmgr.listen(mPhoneStateListener, PhoneStateListener.LISTEN_CALL_STATE | PhoneStateListener.LISTEN_DATA_ACTIVITY); PowerManager pm = (PowerManager)getSystemService(Context.POWER_SERVICE); mWakeLock = pm.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, this.getClass().getName()); mWakeLock.setReferenceCounted(false); misAnalogModeSupported = SystemProperties.getBoolean("ro.fm.analogpath.supported",false); /* Register for Screen On/off broadcast notifications */ mA2dpDeviceState = new A2dpDeviceStatus(getApplicationContext()); registerScreenOnOffListener(); registerHeadsetListener(); registerExternalStorageListener(); // registering media button receiver seperately as we need to set // different priority for receiving media events registerMediaButtonReceiver(); if ( false == SystemProperties.getBoolean("ro.fm.mulinst.recording.support",true)) { mSingleRecordingInstanceSupported = true; } // Register for pause commands from other apps to stop FM registerMusicServiceCommandReceiver(); // If the service was idle, but got killed before it stopped itself, the // system will relaunch it. Make sure it gets stopped again in that case. Message msg = mDelayedStopHandler.obtainMessage(); msg.what = FM_STOP; mDelayedStopHandler.sendMessageDelayed(msg, IDLE_DELAY); } @Override public void onDestroy() { Log.d(LOGTAG, "onDestroy"); if (isFmOn()) { Log.e(LOGTAG, "Service being destroyed while still playing."); } // make sure there aren't any other messages coming mDelayedStopHandler.removeCallbacksAndMessages(null); //release the audio focus listener AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); audioManager.abandonAudioFocus(mAudioFocusListener); /* Remove the Screen On/off listener */ if (mScreenOnOffReceiver != null) { unregisterReceiver(mScreenOnOffReceiver); mScreenOnOffReceiver = null; } /* Unregister the headset Broadcase receiver */ if (mHeadsetReceiver != null) { unregisterReceiver(mHeadsetReceiver); mHeadsetReceiver = null; } if( mHeadsetHookListener != null ) { unregisterReceiver(mHeadsetHookListener); mHeadsetHookListener = null; } if( mSdcardUnmountReceiver != null ) { unregisterReceiver(mSdcardUnmountReceiver); mSdcardUnmountReceiver = null; } if( mMusicCommandListener != null ) { unregisterReceiver(mMusicCommandListener); mMusicCommandListener = null; } /* Since the service is closing, disable the receiver */ fmOff(); TelephonyManager tmgr = (TelephonyManager) getSystemService(Context.TELEPHONY_SERVICE); tmgr.listen(mPhoneStateListener, 0); Log.d(LOGTAG, "onDestroy: unbindFromService completed"); //unregisterReceiver(mIntentReceiver); mWakeLock.release(); super.onDestroy(); } /** * Registers an intent to listen for ACTION_MEDIA_UNMOUNTED notifications. * The intent will call closeExternalStorageFiles() if the external media * is going to be ejected, so applications can clean up. */ public void registerExternalStorageListener() { if (mSdcardUnmountReceiver == null) { mSdcardUnmountReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { String action = intent.getAction(); if ((action.equals(Intent.ACTION_MEDIA_UNMOUNTED)) || (action.equals(Intent.ACTION_MEDIA_EJECT))) { Log.d(LOGTAG, "ACTION_MEDIA_UNMOUNTED Intent received"); if (mFmRecordingOn == true) { try { if ((mServiceInUse) && (mCallbacks != null) ) { mCallbacks.onRecordingStopped(); } } catch (RemoteException e) { e.printStackTrace(); } } } } }; IntentFilter iFilter = new IntentFilter(); iFilter.addAction(Intent.ACTION_MEDIA_UNMOUNTED); iFilter.addAction(Intent.ACTION_MEDIA_EJECT); iFilter.addDataScheme("file"); registerReceiver(mSdcardUnmountReceiver, iFilter); } } /** * Registers an intent to listen for ACTION_HEADSET_PLUG * notifications. This intent is called to know if the headset * was plugged in/out */ public void registerHeadsetListener() { if (mHeadsetReceiver == null) { mHeadsetReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { String action = intent.getAction(); if (action.equals(Intent.ACTION_HEADSET_PLUG)) { Log.d(LOGTAG, "ACTION_HEADSET_PLUG Intent received"); // Listen for ACTION_HEADSET_PLUG broadcasts. Log.d(LOGTAG, "mReceiver: ACTION_HEADSET_PLUG"); Log.d(LOGTAG, "==> intent: " + intent); Log.d(LOGTAG, " state: " + intent.getIntExtra("state", 0)); Log.d(LOGTAG, " name: " + intent.getStringExtra("name")); mHeadsetPlugged = (intent.getIntExtra("state", 0) == 1); // if headset is plugged out it is required to disable // in minimal duration to avoid race conditions with // audio policy manager switch audio to speaker. if ((mHeadsetPlugged == false) && (mReceiver != null) && (mInternalAntennaAvailable == false) && (isFmRecordingOn() == false) && (mOverA2DP == false)) { mReceiver.disable(); mReceiver = null; } mHandler.post(mHeadsetPluginHandler); } else if(mA2dpDeviceState.isA2dpStateChange(action) ) { boolean bA2dpConnected = mA2dpDeviceState.isConnected(intent); if (!bA2dpConnected) { Log.d(LOGTAG, "A2DP device is dis-connected!"); mA2dpDisconnected = true; } if (isAnalogModeEnabled()) { Log.d(LOGTAG, "FM Audio Path is Analog Mode: FM Over BT not allowed"); return ; } //when playback is overA2Dp and A2dp disconnected //when playback is not overA2DP and A2DP Connected // In above two cases we need to Stop and Start FM which // will take care of audio routing if( (isFmOn()) && (true == ((bA2dpConnected)^(mOverA2DP))) && (false == mStoppedOnFocusLoss) && (!isSpeakerEnabled())) { stopFM(); startFM(); } } else if (action.equals("HDMI_CONNECTED")) { //FM should be off when HDMI is connected. fmOff(); try { /* Notify the UI/Activity, only if the service is "bound" by an activity and if Callbacks are registered */ if((mServiceInUse) && (mCallbacks != null) ) { mCallbacks.onDisabled(); } } catch (RemoteException e) { e.printStackTrace(); } } else if( action.equals(Intent.ACTION_SHUTDOWN)) { mAppShutdown = true; } } }; IntentFilter iFilter = new IntentFilter(); iFilter.addAction(Intent.ACTION_HEADSET_PLUG); iFilter.addAction(mA2dpDeviceState.getActionSinkStateChangedString()); iFilter.addAction("HDMI_CONNECTED"); iFilter.addAction(Intent.ACTION_SHUTDOWN); iFilter.addCategory(Intent.CATEGORY_DEFAULT); registerReceiver(mHeadsetReceiver, iFilter); } } public void registerMediaButtonReceiver() { if (mHeadsetHookListener == null) { mHeadsetHookListener = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { Log.d(LOGTAG, "ACTION_MEDIA_BUTTON Intent received"); String action = intent.getAction(); if (action.equals(Intent.ACTION_MEDIA_BUTTON)) { KeyEvent event = (KeyEvent) intent.getParcelableExtra(Intent.EXTRA_KEY_EVENT); if (event == null) { return; } int keycode = event.getKeyCode(); int key_action = event.getAction(); if((KeyEvent.KEYCODE_HEADSETHOOK == keycode) && (key_action == KeyEvent.ACTION_DOWN)) { if(isFmOn()){ //FM should be off when Headset hook pressed. fmOff(); if (isOrderedBroadcast()) { abortBroadcast(); } try { /* Notify the UI/Activity, only if the service is "bound" by an activity and if Callbacks are registered */ if((mServiceInUse) && (mCallbacks != null) ) { mCallbacks.onDisabled(); } } catch (RemoteException e) { e.printStackTrace(); } } else if( mServiceInUse ) { fmOn(); if (isOrderedBroadcast()) { abortBroadcast(); } try { /* Notify the UI/Activity, only if the service is "bound" by an activity and if Callbacks are registered */ if(mCallbacks != null ) { mCallbacks.onEnabled(); } } catch (RemoteException e) { e.printStackTrace(); } } } } } }; IntentFilter iFilter = new IntentFilter(); iFilter.addAction(Intent.ACTION_MEDIA_BUTTON); iFilter.setPriority(10000); // AudioService registers with 1000 and // consume the broadcast so our // priority to be higher registerReceiver(mHeadsetHookListener, iFilter); } } public void registerMusicServiceCommandReceiver() { if (mMusicCommandListener == null) { mMusicCommandListener = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { String action = intent.getAction(); if (action.equals("com.android.music.musicservicecommand")) { String cmd = intent.getStringExtra("command"); Log.d(LOGTAG, "Music Service command : "+cmd+ " received"); if (cmd != null && cmd.equals("pause")) { if (mA2dpDisconnected) { Log.d(LOGTAG, "not to pause,this is a2dp disconnected's pause"); mA2dpDisconnected = false; return; } if(isFmOn()){ fmOff(); if (isOrderedBroadcast()) { abortBroadcast(); } try { /* Notify the UI/Activity, only if the service is "bound" by an activity and if Callbacks are registered */ if((mServiceInUse) && (mCallbacks != null) ){ mCallbacks.onDisabled(); } } catch (RemoteException e) { e.printStackTrace(); } } } } } }; IntentFilter commandFilter = new IntentFilter(); commandFilter.addAction("com.android.music.musicservicecommand"); registerReceiver(mMusicCommandListener, commandFilter); } } final Runnable mHeadsetPluginHandler = new Runnable() { public void run() { /* Update the UI based on the state change of the headset/antenna*/ if(!isAntennaAvailable()) { /* Disable FM and let the UI know */ fmOff(); try { /* Notify the UI/Activity, only if the service is "bound" by an activity and if Callbacks are registered */ if((mServiceInUse) && (mCallbacks != null) ) { mCallbacks.onDisabled(); } } catch (RemoteException e) { e.printStackTrace(); } } else { /* headset is plugged back in, So turn on FM if: - FM is not already ON. - If the FM UI/Activity is in the foreground (the service is "bound" by an activity and if Callbacks are registered) */ if ( (!isFmOn()) && (mServiceInUse) && (mCallbacks != null)) { if (mRadioState) { if( true != fmOn() ) { return; } try { mCallbacks.onEnabled(); } catch (RemoteException e) { e.printStackTrace(); } } else { try { mCallbacks.onDisabled(); } catch (RemoteException e) { e.printStackTrace(); } } } } } }; @Override public IBinder onBind(Intent intent) { mDelayedStopHandler.removeCallbacksAndMessages(null); mServiceInUse = true; /* Application/UI is attached, so get out of lower power mode */ setLowPowerMode(false); Log.d(LOGTAG, "onBind"); return mBinder; } @Override public void onRebind(Intent intent) { mDelayedStopHandler.removeCallbacksAndMessages(null); mServiceInUse = true; /* Application/UI is attached, so get out of lower power mode */ setLowPowerMode(false); if(false == mPlaybackInProgress) startFM(); Log.d(LOGTAG, "onRebind"); } @Override public void onStart(Intent intent, int startId) { Log.d(LOGTAG, "onStart"); mServiceStartId = startId; // adding code for audio focus gain. AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); audioManager.requestAudioFocus(mAudioFocusListener, AudioManager.STREAM_FM, AudioManager.AUDIOFOCUS_GAIN_TRANSIENT); // make sure the service will shut down on its own if it was // just started but not bound to and nothing is playing mDelayedStopHandler.removeCallbacksAndMessages(null); Message msg = mDelayedStopHandler.obtainMessage(); msg.what = FM_STOP; mDelayedStopHandler.sendMessageDelayed(msg, IDLE_DELAY); } @Override public boolean onUnbind(Intent intent) { mServiceInUse = false; Log.d(LOGTAG, "onUnbind"); /* Application/UI is not attached, so go into lower power mode */ unregisterCallbacks(); setLowPowerMode(true); if (isFmOn()) { // something is currently playing, or will be playing once // an in-progress call ends, so don't stop the service now. return true; } stopSelf(mServiceStartId); return true; } private void startFM(){ Log.d(LOGTAG, "In startFM"); if(true == mAppShutdown) { // not to send intent to AudioManager in Shutdown return; } if (isCallActive()) { // when Call is active never let audio playback mResumeAfterCall = true; return; } if ( true == mPlaybackInProgress ) // no need to resend event return; AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); audioManager.requestAudioFocus(mAudioFocusListener, AudioManager.STREAM_FM, AudioManager.AUDIOFOCUS_GAIN_TRANSIENT); mStoppedOnFocusLoss = false; if ((true == mA2dpDeviceState.isDeviceAvailable()) && (!isSpeakerEnabled()) && !isAnalogModeEnabled() && (true == startA2dpPlayback())) { mOverA2DP=true; } else { Log.d(LOGTAG, "FMRadio: sending the intent"); //reason for resending the Speaker option is we are sending //ACTION_FM=1 to AudioManager, the previous state of Speaker we set //need not be retained by the Audio Manager. if (isSpeakerEnabled()) { mSpeakerPhoneOn = true; AudioSystem.setForceUse(AudioSystem.FOR_MEDIA, AudioSystem.FORCE_SPEAKER); } Intent intent = new Intent(Intent.ACTION_FM); intent.putExtra("state", 1); getApplicationContext().sendBroadcast(intent); } mPlaybackInProgress = true; } private void stopFM(){ Log.d(LOGTAG, "In stopFM"); if (mOverA2DP==true){ mOverA2DP=false; stopA2dpPlayback(); }else{ Log.d(LOGTAG, "FMRadio: sending the intent"); Intent intent = new Intent(Intent.ACTION_FM); intent.putExtra("state", 0); getApplicationContext().sendBroadcast(intent); } mPlaybackInProgress = false; } public boolean startRecording() { Log.d(LOGTAG, "In startRecording of Recorder"); if( (true == mSingleRecordingInstanceSupported) && (true == mOverA2DP )) { Toast.makeText( this, "playback on BT in progress,can't record now", Toast.LENGTH_SHORT).show(); return false; } stopRecording(); mSampleFile = null; File sampleDir = Environment.getExternalStorageDirectory(); if (!sampleDir.canWrite()) // Workaround for broken sdcard support on // the device. sampleDir = new File("/sdcard/sdcard"); try { mSampleFile = File .createTempFile("FMRecording", ".3gpp", sampleDir); } catch (IOException e) { Log.e(LOGTAG, "Not able to access SD Card"); Toast.makeText(this, "Not able to access SD Card", Toast.LENGTH_SHORT).show(); return false; } mRecorder = new MediaRecorder(); if (mRecorder == null) { Toast.makeText(this,"MediaRecorder failed to create an instance", Toast.LENGTH_SHORT).show(); return false; } try { mRecorder.setAudioSource(MediaRecorder.AudioSource.FM_RX); mRecorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP); mRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC); } catch (RuntimeException exception) { mRecorder.reset(); mRecorder.release(); mRecorder = null; return false; } mRecorder.setOutputFile(mSampleFile.getAbsolutePath()); try { mRecorder.prepare(); mRecorder.start(); } catch (IOException e) { mRecorder.reset(); mRecorder.release(); mRecorder = null; return false; } catch (RuntimeException e) { mRecorder.reset(); mRecorder.release(); mRecorder = null; return false; } mFmRecordingOn = true; mSampleStart = System.currentTimeMillis(); return true; } public boolean startA2dpPlayback() { Log.d(LOGTAG, "In startA2dpPlayback"); if( (true == mSingleRecordingInstanceSupported) && (true == mFmRecordingOn )) { Toast.makeText(this, "Recording already in progress,can't play on BT", Toast.LENGTH_SHORT).show(); return false; } stopA2dpPlayback(); mA2dp = new MediaRecorder(); if (mA2dp == null) { Toast.makeText(this,"A2dpPlayback failed to create an instance", Toast.LENGTH_SHORT).show(); return false; } try { mA2dp.setAudioSource(MediaRecorder.AudioSource.FM_RX_A2DP); mA2dp.setOutputFormat(MediaRecorder.OutputFormat.RAW_AMR); mA2dp.setAudioEncoder(MediaRecorder.OutputFormat.DEFAULT); File sampleDir = Environment.getExternalStorageDirectory(); if (!sampleDir.canWrite()) sampleDir = new File("/sdcard/sdcard"); try { mA2DPSampleFile = File .createTempFile("FMRecording", ".3gpp", sampleDir); } catch (IOException e) { Log.e(LOGTAG, "Not able to access SD Card"); Toast.makeText(this, "Not able to access SD Card", Toast.LENGTH_SHORT).show(); return false; } mA2dp.setOutputFile(mA2DPSampleFile.getAbsolutePath()); mA2dp.prepare(); mA2dp.start(); } catch (Exception exception) { mA2dp.reset(); mA2dp.release(); mA2dp = null; return false; } return true; } public void stopA2dpPlayback() { if (mA2dp == null) return; if(mA2DPSampleFile != null) { try { mA2DPSampleFile.delete(); } catch (Exception e) { Log.e(LOGTAG, "Not able to delete file"); } } try { mA2dp.stop(); mA2dp.reset(); mA2dp.release(); mA2dp = null; } catch (Exception exception ) { Log.e( LOGTAG, "Stop failed with exception"+ exception); } return; } public void stopRecording() { mFmRecordingOn = false; if (mRecorder == null) return; mRecorder.stop(); mRecorder.reset(); mRecorder.release(); mRecorder = null; int sampleLength = (int)((System.currentTimeMillis() - mSampleStart)/1000 ); if (sampleLength == 0) return; String state = Environment.getExternalStorageState(); Log.d(LOGTAG, "storage state is " + state); if (Environment.MEDIA_MOUNTED.equals(state)) { this.addToMediaDB(mSampleFile); } else{ Log.e(LOGTAG, "SD card must have removed during recording. "); Toast.makeText(this, "Recording aborted", Toast.LENGTH_SHORT).show(); } try { if((mServiceInUse) && (mCallbacks != null) ) { mCallbacks.onRecordingStopped(); } } catch (RemoteException e) { e.printStackTrace(); } return; } /* * Adds file and returns content uri. */ private Uri addToMediaDB(File file) { Log.d(LOGTAG, "In addToMediaDB"); Resources res = getResources(); ContentValues cv = new ContentValues(); long current = System.currentTimeMillis(); long modDate = file.lastModified(); Date date = new Date(current); SimpleDateFormat formatter = new SimpleDateFormat( res.getString(R.string.audio_db_title_format)); String title = formatter.format(date); // Lets label the recorded audio file as NON-MUSIC so that the file // won't be displayed automatically, except for in the playlist. cv.put(MediaStore.Audio.Media.IS_MUSIC, "1"); cv.put(MediaStore.Audio.Media.TITLE, title); cv.put(MediaStore.Audio.Media.DATA, file.getAbsolutePath()); cv.put(MediaStore.Audio.Media.DATE_ADDED, (int) (current / 1000)); cv.put(MediaStore.Audio.Media.DATE_MODIFIED, (int) (modDate / 1000)); cv.put(MediaStore.Audio.Media.MIME_TYPE, "AUDIO_AAC_MP4"); cv.put(MediaStore.Audio.Media.ARTIST, res.getString(R.string.audio_db_artist_name)); cv.put(MediaStore.Audio.Media.ALBUM, res.getString(R.string.audio_db_album_name)); Log.d(LOGTAG, "Inserting audio record: " + cv.toString()); ContentResolver resolver = getContentResolver(); Uri base = MediaStore.Audio.Media.EXTERNAL_CONTENT_URI; Log.d(LOGTAG, "ContentURI: " + base); Uri result = resolver.insert(base, cv); if (result == null) { Toast.makeText(this, "Unable to save recorded audio", Toast.LENGTH_SHORT).show(); return null; } if (getPlaylistId(res) == -1) { createPlaylist(res, resolver); } int audioId = Integer.valueOf(result.getLastPathSegment()); addToPlaylist(resolver, audioId, getPlaylistId(res)); // Notify those applications such as Music listening to the // scanner events that a recorded audio file just created. sendBroadcast(new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE, result)); return result; } private int getPlaylistId(Resources res) { Uri uri = MediaStore.Audio.Playlists.getContentUri("external"); final String[] ids = new String[] { MediaStore.Audio.Playlists._ID }; final String where = MediaStore.Audio.Playlists.NAME + "=?"; final String[] args = new String[] { res.getString(R.string.audio_db_playlist_name) }; Cursor cursor = query(uri, ids, where, args, null); if (cursor == null) { Log.v(LOGTAG, "query returns null"); } int id = -1; if (cursor != null) { cursor.moveToFirst(); if (!cursor.isAfterLast()) { id = cursor.getInt(0); } cursor.close(); } return id; } private Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs, String sortOrder) { try { ContentResolver resolver = getContentResolver(); if (resolver == null) { return null; } return resolver.query(uri, projection, selection, selectionArgs, sortOrder); } catch (UnsupportedOperationException ex) { return null; } } private Uri createPlaylist(Resources res, ContentResolver resolver) { ContentValues cv = new ContentValues(); cv.put(MediaStore.Audio.Playlists.NAME, res.getString(R.string.audio_db_playlist_name)); Uri uri = resolver.insert(MediaStore.Audio.Playlists.getContentUri("external"), cv); if (uri == null) { Toast.makeText(this, "Unable to save recorded audio", Toast.LENGTH_SHORT).show(); } return uri; } private void addToPlaylist(ContentResolver resolver, int audioId, long playlistId) { String[] cols = new String[] { "count(*)" }; Uri uri = MediaStore.Audio.Playlists.Members.getContentUri("external", playlistId); Cursor cur = resolver.query(uri, cols, null, null, null); final int base; if (cur != null) { cur.moveToFirst(); base = cur.getInt(0); cur.close(); } else { base = 0; } ContentValues values = new ContentValues(); values.put(MediaStore.Audio.Playlists.Members.PLAY_ORDER, Integer.valueOf(base + audioId)); values.put(MediaStore.Audio.Playlists.Members.AUDIO_ID, audioId); resolver.insert(uri, values); } private void fmActionOnCallState( int state ) { //if Call Status is non IDLE we need to Mute FM as well stop recording if //any. Similarly once call is ended FM should be unmuted. AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); if((TelephonyManager.CALL_STATE_OFFHOOK == state)|| (TelephonyManager.CALL_STATE_RINGING == state)) { if (state == TelephonyManager.CALL_STATE_RINGING) { int ringvolume = audioManager.getStreamVolume(AudioManager.STREAM_RING); if (ringvolume == 0) { return; } } boolean bTempSpeaker = mSpeakerPhoneOn; //need to restore SpeakerPhone boolean bTempMute = mMuted;// need to restore Mute status fmOff(); try { // Notify the UI/Activity, only if the service is "bound" // by an activity and if Callbacks are registered if((mServiceInUse) && (mCallbacks != null) ) { mCallbacks.onDisabled(); } } catch (RemoteException e) { e.printStackTrace(); } mResumeAfterCall = true; mSpeakerPhoneOn = bTempSpeaker; mMuted = bTempMute; } else if (state == TelephonyManager.CALL_STATE_IDLE) { // start playing again if (mResumeAfterCall) { // resume playback only if FM Radio was playing // when the call was answered if ( (isAntennaAvailable()) && (!isFmOn()) && (mServiceInUse) && (mCallbacks != null)) { if (mRadioState) { Log.d(LOGTAG, "Resuming after call:" ); if( true != fmOn() ) { return; } mResumeAfterCall = false; try { mCallbacks.onEnabled(); } catch (RemoteException e) { e.printStackTrace(); } } } } }//idle } /* Handle Phone Call + FM Concurrency */ private PhoneStateListener mPhoneStateListener = new PhoneStateListener() { @Override public void onCallStateChanged(int state, String incomingNumber) { Log.d(LOGTAG, "onCallStateChanged: State - " + state ); Log.d(LOGTAG, "onCallStateChanged: incomingNumber - " + incomingNumber ); fmActionOnCallState(state ); } @Override public void onDataActivity (int direction) { Log.d(LOGTAG, "onDataActivity - " + direction ); if (direction == TelephonyManager.DATA_ACTIVITY_NONE || direction == TelephonyManager.DATA_ACTIVITY_DORMANT) { if (mReceiver != null) { Message msg = mDelayedStopHandler.obtainMessage(RESET_NOTCH_FILTER); mDelayedStopHandler.sendMessageDelayed(msg, 10000); } } else { if (mReceiver != null) { if( true == mNotchFilterSet ) { mDelayedStopHandler.removeMessages(RESET_NOTCH_FILTER); } else { mReceiver.setNotchFilter(true); mNotchFilterSet = true; } } } } }; private Handler mDelayedStopHandler = new Handler() { @Override public void handleMessage(Message msg) { switch (msg.what) { case FM_STOP: // Check again to make sure nothing is playing right now if (isFmOn() || mServiceInUse) { return; } Log.d(LOGTAG, "mDelayedStopHandler: stopSelf"); stopSelf(mServiceStartId); break; case RESET_NOTCH_FILTER: if (mReceiver != null) { mReceiver.setNotchFilter(false); mNotchFilterSet = false; } break; case STOPSERVICE_ONSLEEP: fmOff(); break; case STOPRECORD_ONTIMEOUT: stopRecording(); break; case FOCUSCHANGE: if( false == isFmOn() ) { Log.v(LOGTAG, "FM is not running, not handling change"); return; } switch (msg.arg1) { case AudioManager.AUDIOFOCUS_LOSS: Log.v(LOGTAG, "AudioFocus: received AUDIOFOCUS_LOSS"); //intentional fall through. case AudioManager.AUDIOFOCUS_LOSS_TRANSIENT: Log.v(LOGTAG, "AudioFocus: received AUDIOFOCUS_LOSS_TRANSIENT"); if (mSpeakerPhoneOn) { mSpeakerPhoneOn = false; AudioSystem.setForceUse(AudioSystem.FOR_MEDIA, AudioSystem.FORCE_NONE); } if(true == isFmRecordingOn()) stopRecording(); if(true == mPlaybackInProgress) stopFM(); mStoppedOnFocusLoss = true; break; case AudioManager.AUDIOFOCUS_GAIN: Log.v(LOGTAG, "AudioFocus: received AUDIOFOCUS_GAIN"); if(false == mPlaybackInProgress) startFM(); mStoppedOnFocusLoss = false; break; case AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK: default: Log.e(LOGTAG, "Unknown audio focus change code"+msg.arg1); } break; } } }; /** * Registers an intent to listen for * ACTION_SCREEN_ON/ACTION_SCREEN_OFF notifications. This intent * is called to know iwhen the screen is turned on/off. */ public void registerScreenOnOffListener() { if (mScreenOnOffReceiver == null) { mScreenOnOffReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { String action = intent.getAction(); if (action.equals(Intent.ACTION_SCREEN_ON)) { Log.d(LOGTAG, "ACTION_SCREEN_ON Intent received"); //Screen turned on, set FM module into normal power mode mHandler.post(mScreenOnHandler); } else if (action.equals(Intent.ACTION_SCREEN_OFF)) { Log.d(LOGTAG, "ACTION_SCREEN_OFF Intent received"); //Screen turned on, set FM module into low power mode mHandler.post(mScreenOffHandler); } } }; IntentFilter iFilter = new IntentFilter(); iFilter.addAction(Intent.ACTION_SCREEN_ON); iFilter.addAction(Intent.ACTION_SCREEN_OFF); registerReceiver(mScreenOnOffReceiver, iFilter); } } /* Handle all the Screen On actions: Set FM Power mode to Normal */ final Runnable mScreenOnHandler = new Runnable() { public void run() { setLowPowerMode(false); } }; /* Handle all the Screen Off actions: Set FM Power mode to Low Power This will reduce all the interrupts coming up from the SoC, saving power */ final Runnable mScreenOffHandler = new Runnable() { public void run() { setLowPowerMode(true); } }; /* Show the FM Notification */ public void startNotification() { RemoteViews views = new RemoteViews(getPackageName(), R.layout.statusbar); views.setImageViewResource(R.id.icon, R.drawable.stat_notify_fm); if (isFmOn()) { views.setTextViewText(R.id.frequency, getTunedFrequencyString()); } else { views.setTextViewText(R.id.frequency, ""); } Notification status = new Notification(); status.contentView = views; status.flags |= Notification.FLAG_ONGOING_EVENT; status.icon = R.drawable.stat_notify_fm; status.contentIntent = PendingIntent.getActivity(this, 0, new Intent("com.quicinc.fmradio.FMRADIO_ACTIVITY"), 0); startForeground(FMRADIOSERVICE_STATUS, status); //NotificationManager nm = (NotificationManager) // getSystemService(Context.NOTIFICATION_SERVICE); //nm.notify(FMRADIOSERVICE_STATUS, status); //setForeground(true); mFMOn = true; } private void stop() { gotoIdleState(); mFMOn = false; } private void gotoIdleState() { mDelayedStopHandler.removeCallbacksAndMessages(null); Message msg = mDelayedStopHandler.obtainMessage(); msg.what = FM_STOP; mDelayedStopHandler.sendMessageDelayed(msg, IDLE_DELAY); //NotificationManager nm = //(NotificationManager) getSystemService(Context.NOTIFICATION_SERVICE); //nm.cancel(FMRADIOSERVICE_STATUS); //setForeground(false); stopForeground(true); } /** Read's the internal Antenna available state from the FM * Device. */ public void readInternalAntennaAvailable() { mInternalAntennaAvailable = false; if (mReceiver != null) { mInternalAntennaAvailable = mReceiver.getInternalAntenna(); Log.d(LOGTAG, "getInternalAntenna: " + mInternalAntennaAvailable); } } /* * By making this a static class with a WeakReference to the Service, we * ensure that the Service can be GCd even when the system process still * has a remote reference to the stub. */ static class ServiceStub extends IFMRadioService.Stub { WeakReference<FMRadioService> mService; ServiceStub(FMRadioService service) { mService = new WeakReference<FMRadioService>(service); } public boolean fmOn() throws RemoteException { mRadioState=true; return(mService.get().fmOn()); } public boolean fmOff() throws RemoteException { mRadioState=false; return(mService.get().fmOff()); } public boolean isFmOn() { return(mService.get().isFmOn()); } public boolean isAnalogModeEnabled() { return(mService.get().isAnalogModeEnabled()); } public boolean isFmRecordingOn() { return(mService.get().isFmRecordingOn()); } public boolean isSpeakerEnabled() { return(mService.get().isSpeakerEnabled()); } public boolean fmReconfigure() { return(mService.get().fmReconfigure()); } public void registerCallbacks(IFMRadioServiceCallbacks cb) throws RemoteException { mService.get().registerCallbacks(cb); } public void unregisterCallbacks() throws RemoteException { mService.get().unregisterCallbacks(); } public boolean routeAudio(int device) { return(mService.get().routeAudio(device)); } public boolean mute() { return(mService.get().mute()); } public boolean unMute() { return(mService.get().unMute()); } public boolean isMuted() { return(mService.get().isMuted()); } public boolean startRecording() { return(mService.get().startRecording()); } public void stopRecording() { mService.get().stopRecording(); } public boolean tune(int frequency) { return(mService.get().tune(frequency)); } public boolean seek(boolean up) { return(mService.get().seek(up)); } public void enableSpeaker(boolean speakerOn) { mService.get().enableSpeaker(speakerOn); } public boolean scan(int pty) { return(mService.get().scan(pty)); } public boolean seekPI(int piCode) { return(mService.get().seekPI(piCode)); } public boolean searchStrongStationList(int numStations) { return(mService.get().searchStrongStationList(numStations)); } public boolean cancelSearch() { return(mService.get().cancelSearch()); } public String getProgramService() { return(mService.get().getProgramService()); } public String getRadioText() { return(mService.get().getRadioText()); } public int getProgramType() { return(mService.get().getProgramType()); } public int getProgramID() { return(mService.get().getProgramID()); } public int[] getSearchList() { return(mService.get().getSearchList()); } public boolean setLowPowerMode(boolean enable) { return(mService.get().setLowPowerMode(enable)); } public int getPowerMode() { return(mService.get().getPowerMode()); } public boolean enableAutoAF(boolean bEnable) { return(mService.get().enableAutoAF(bEnable)); } public boolean enableStereo(boolean bEnable) { return(mService.get().enableStereo(bEnable)); } public boolean isAntennaAvailable() { return(mService.get().isAntennaAvailable()); } public boolean isWiredHeadsetAvailable() { return(mService.get().isWiredHeadsetAvailable()); } public boolean isCallActive() { return(mService.get().isCallActive()); } public int getRssi() { return (mService.get().getRssi()); } public int getIoC() { return (mService.get().getIoC()); } public int getMpxDcc() { return (mService.get().getMpxDcc()); } public int getIntDet() { return (mService.get().getIntDet()); } public void setHiLoInj(int inj) { mService.get().setHiLoInj(inj); } public void delayedStop(long duration, int nType) { mService.get().delayedStop(duration, nType); } public void cancelDelayedStop(int nType) { mService.get().cancelDelayedStop(nType); } public void requestFocus() { mService.get().requestFocus(); } } private final IBinder mBinder = new ServiceStub(this); private boolean setAudioPath(boolean analogMode) { if (mReceiver == null) { return false; } if (isAnalogModeEnabled() == analogMode) { Log.d(LOGTAG,"Analog Path already is set to "+analogMode); return false; } if (!isAnalogModeSupported()) { Log.d(LOGTAG,"Analog Path is not supported "); return false; } if (SystemProperties.getBoolean("hw.fm.digitalpath",false)) { return false; } boolean state = mReceiver.setAnalogMode(analogMode); if (false == state) { Log.d(LOGTAG, "Error in toggling analog/digital path " + analogMode); return false; } misAnalogPathEnabled = analogMode; return true; } /* * Turn ON FM: Powers up FM hardware, and initializes the FM module * . * @return true if fm Enable api was invoked successfully, false if the api failed. */ private boolean fmOn() { boolean bStatus=false; if ( TelephonyManager.CALL_STATE_IDLE != getCallState() ) { return bStatus; } if(mReceiver == null) { try { mReceiver = new FmReceiver(FMRADIO_DEVICE_FD_STRING, fmCallbacks); } catch (InstantiationException e) { throw new RuntimeException("FmReceiver service not available!"); } } if (mReceiver != null) { if (isFmOn()) { /* FM Is already on,*/ bStatus = true; Log.d(LOGTAG, "mReceiver.already enabled"); } else { // This sets up the FM radio device FmConfig config = FmSharedPreferences.getFMConfiguration(); Log.d(LOGTAG, "fmOn: RadioBand :"+ config.getRadioBand()); Log.d(LOGTAG, "fmOn: Emphasis :"+ config.getEmphasis()); Log.d(LOGTAG, "fmOn: ChSpacing :"+ config.getChSpacing()); Log.d(LOGTAG, "fmOn: RdsStd :"+ config.getRdsStd()); Log.d(LOGTAG, "fmOn: LowerLimit :"+ config.getLowerLimit()); Log.d(LOGTAG, "fmOn: UpperLimit :"+ config.getUpperLimit()); bStatus = mReceiver.enable(FmSharedPreferences.getFMConfiguration()); - setAudioPath(true); + if (isSpeakerEnabled()) { + setAudioPath(false); + } else { + setAudioPath(true); + } Log.d(LOGTAG, "mReceiver.enable done, Status :" + bStatus); } if (bStatus == true) { /* Put the hardware into normal mode */ bStatus = setLowPowerMode(false); Log.d(LOGTAG, "setLowPowerMode done, Status :" + bStatus); AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); if( (audioManager != null) &&(false == mPlaybackInProgress) ) { Log.d(LOGTAG, "mAudioManager.setFmRadioOn = true \n" ); //audioManager.setParameters("FMRadioOn="+mAudioDevice); int state = getCallState(); if ( TelephonyManager.CALL_STATE_IDLE != getCallState() ) { fmActionOnCallState(state); } else { startFM(); // enable FM Audio only when Call is IDLE } Log.d(LOGTAG, "mAudioManager.setFmRadioOn done \n" ); } if (mReceiver != null) { bStatus = mReceiver.registerRdsGroupProcessing(FmReceiver.FM_RX_RDS_GRP_RT_EBL| FmReceiver.FM_RX_RDS_GRP_PS_EBL| FmReceiver.FM_RX_RDS_GRP_AF_EBL| FmReceiver.FM_RX_RDS_GRP_PS_SIMPLE_EBL); Log.d(LOGTAG, "registerRdsGroupProcessing done, Status :" + bStatus); } bStatus = enableAutoAF(FmSharedPreferences.getAutoAFSwitch()); Log.d(LOGTAG, "enableAutoAF done, Status :" + bStatus); /* There is no internal Antenna*/ bStatus = mReceiver.setInternalAntenna(false); Log.d(LOGTAG, "setInternalAntenna done, Status :" + bStatus); /* Read back to verify the internal Antenna mode*/ readInternalAntennaAvailable(); startNotification(); bStatus = true; } else { mReceiver = null; // as enable failed no need to disable // failure of enable can be because handle // already open which gets effected if // we disable stop(); } } return(bStatus); } /* * Turn OFF FM: Disable the FM Host and hardware . * . * @return true if fm Disable api was invoked successfully, false if the api failed. */ private boolean fmOff() { boolean bStatus=false; if ( mSpeakerPhoneOn) { mSpeakerPhoneOn = false; AudioSystem.setForceUse(AudioSystem.FOR_MEDIA, AudioSystem.FORCE_NONE); } if (isFmRecordingOn()) { stopRecording(); } AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); if(audioManager != null) { Log.d(LOGTAG, "audioManager.setFmRadioOn = false \n" ); unMute(); stopFM(); //audioManager.setParameters("FMRadioOn=false"); Log.d(LOGTAG, "audioManager.setFmRadioOn false done \n" ); } if (isAnalogModeEnabled()) { SystemProperties.set("hw.fm.isAnalog","false"); misAnalogPathEnabled = false; } // This will disable the FM radio device if (mReceiver != null) { bStatus = mReceiver.disable(); mReceiver = null; } stop(); return(bStatus); } /* Returns whether FM hardware is ON. * * @return true if FM was tuned, searching. (at the end of * the search FM goes back to tuned). * */ public boolean isFmOn() { return mFMOn; } /* Returns true if Analog Path is enabled */ public boolean isAnalogModeEnabled() { return misAnalogPathEnabled; } public boolean isAnalogModeSupported() { return misAnalogModeSupported; } public boolean isFmRecordingOn() { return mFmRecordingOn; } public boolean isSpeakerEnabled() { return mSpeakerPhoneOn; } public void enableSpeaker(boolean speakerOn) { if(isCallActive()) return ; mSpeakerPhoneOn = speakerOn; boolean analogmode = isAnalogModeSupported(); if (false == speakerOn) { stopFM(); if (analogmode) { setAudioPath(true); } AudioSystem.setForceUse(AudioSystem.FOR_MEDIA, AudioSystem.FORCE_NONE); startFM(); } //Need to turn off BT path when Speaker is set on vice versa. if( !analogmode && true == mA2dpDeviceState.isDeviceAvailable()) { if( ((true == mOverA2DP) && (true == speakerOn)) || ((false == mOverA2DP) && (false == speakerOn)) ) { //disable A2DP playback for speaker option stopFM(); startFM(); } } if (speakerOn) { stopFM(); if (analogmode) { if (mMuted) { setAudioPath(false); } else { mute(); setAudioPath(false); unMute(); } } AudioSystem.setForceUse(AudioSystem.FOR_MEDIA, AudioSystem.FORCE_SPEAKER); startFM(); } } /* * ReConfigure the FM Setup parameters * - Band * - Channel Spacing (50/100/200 KHz) * - Emphasis (50/75) * - Frequency limits * - RDS/RBDS standard * * @return true if configure api was invoked successfully, false if the api failed. */ public boolean fmReconfigure() { boolean bStatus=false; Log.d(LOGTAG, "fmReconfigure"); if (mReceiver != null) { // This sets up the FM radio device FmConfig config = FmSharedPreferences.getFMConfiguration(); Log.d(LOGTAG, "RadioBand :"+ config.getRadioBand()); Log.d(LOGTAG, "Emphasis :"+ config.getEmphasis()); Log.d(LOGTAG, "ChSpacing :"+ config.getChSpacing()); Log.d(LOGTAG, "RdsStd :"+ config.getRdsStd()); Log.d(LOGTAG, "LowerLimit :"+ config.getLowerLimit()); Log.d(LOGTAG, "UpperLimit :"+ config.getUpperLimit()); bStatus = mReceiver.configure(config); } return(bStatus); } /* * Register UI/Activity Callbacks */ public void registerCallbacks(IFMRadioServiceCallbacks cb) { mCallbacks = cb; } /* * unRegister UI/Activity Callbacks */ public void unregisterCallbacks() { mCallbacks=null; } /* * Route Audio to headset or speaker phone * @return true if routeAudio call succeeded, false if the route call failed. */ public boolean routeAudio(int audioDevice) { boolean bStatus=false; AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); //Log.d(LOGTAG, "routeAudio: " + audioDevice); switch (audioDevice) { case RADIO_AUDIO_DEVICE_WIRED_HEADSET: mAudioDevice = "headset"; break; case RADIO_AUDIO_DEVICE_SPEAKER: mAudioDevice = "speaker"; break; default: mAudioDevice = "headset"; break; } if (mReceiver != null) { //audioManager.setParameters("FMRadioOn=false"); //Log.d(LOGTAG, "mAudioManager.setFmRadioOn =" + mAudioDevice ); //audioManager.setParameters("FMRadioOn="+mAudioDevice); //Log.d(LOGTAG, "mAudioManager.setFmRadioOn done \n"); } return bStatus; } /* * Mute FM Hardware (SoC) * @return true if set mute mode api was invoked successfully, false if the api failed. */ public boolean mute() { boolean bCommandSent=true; if(isMuted()) return bCommandSent; if(isCallActive()) return false; AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); Log.d(LOGTAG, "mute:"); if (audioManager != null) { mMuted = true; audioManager.setStreamMute(AudioManager.STREAM_FM,true); } return bCommandSent; } /* * UnMute FM Hardware (SoC) * @return true if set mute mode api was invoked successfully, false if the api failed. */ public boolean unMute() { boolean bCommandSent=true; if(!isMuted()) return bCommandSent; if(isCallActive()) return false; Log.d(LOGTAG, "unMute:"); AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); if (audioManager != null) { mMuted = false; audioManager.setStreamMute(AudioManager.STREAM_FM,false); if (mResumeAfterCall) { //We are unmuting FM in a voice call. Need to enable FM audio routing. startFM(); } } return bCommandSent; } /* Returns whether FM Hardware(Soc) Audio is Muted. * * @return true if FM Audio is muted, false if not muted. * */ public boolean isMuted() { return mMuted; } /* Tunes to the specified frequency * * @return true if Tune command was invoked successfully, false if not muted. * Note: Callback FmRxEvRadioTuneStatus will be called when the tune * is complete */ public boolean tune(int frequency) { boolean bCommandSent=false; double doubleFrequency = frequency/1000.00; Log.d(LOGTAG, "tuneRadio: " + doubleFrequency); if (mReceiver != null) { mReceiver.setStation(frequency); bCommandSent = true; } return bCommandSent; } /* Seeks (Search for strong station) to the station in the direction specified * relative to the tuned station. * boolean up: true - Search in the forward direction. * false - Search in the backward direction. * @return true if Seek command was invoked successfully, false if not muted. * Note: 1. Callback FmRxEvSearchComplete will be called when the Search * is complete * 2. Callback FmRxEvRadioTuneStatus will also be called when tuned to a station * at the end of the Search or if the seach was cancelled. */ public boolean seek(boolean up) { boolean bCommandSent=false; if (mReceiver != null) { if (up == true) { Log.d(LOGTAG, "seek: Up"); mReceiver.searchStations(FmReceiver.FM_RX_SRCH_MODE_SEEK, FmReceiver.FM_RX_DWELL_PERIOD_1S, FmReceiver.FM_RX_SEARCHDIR_UP); } else { Log.d(LOGTAG, "seek: Down"); mReceiver.searchStations(FmReceiver.FM_RX_SRCH_MODE_SEEK, FmReceiver.FM_RX_DWELL_PERIOD_1S, FmReceiver.FM_RX_SEARCHDIR_DOWN); } bCommandSent = true; } return bCommandSent; } /* Scan (Search for station with a "preview" of "n" seconds) * FM Stations. It always scans in the forward direction relative to the * current tuned station. * int pty: 0 or a reserved PTY value- Perform a "strong" station search of all stations. * Valid/Known PTY - perform RDS Scan for that pty. * * @return true if Scan command was invoked successfully, false if not muted. * Note: 1. Callback FmRxEvRadioTuneStatus will be called when tuned to various stations * during the Scan. * 2. Callback FmRxEvSearchComplete will be called when the Search * is complete * 3. Callback FmRxEvRadioTuneStatus will also be called when tuned to a station * at the end of the Search or if the seach was cancelled. * */ public boolean scan(int pty) { boolean bCommandSent=false; if (mReceiver != null) { Log.d(LOGTAG, "scan: PTY: " + pty); if(FmSharedPreferences.isRBDSStd()) { /* RBDS : Validate PTY value?? */ if( ((pty > 0) && (pty <= 23)) || ((pty >= 29) && (pty <= 31)) ) { bCommandSent = mReceiver.searchStations(FmReceiver.FM_RX_SRCHRDS_MODE_SCAN_PTY, FmReceiver.FM_RX_DWELL_PERIOD_2S, FmReceiver.FM_RX_SEARCHDIR_UP, pty, 0); } else { bCommandSent = mReceiver.searchStations(FmReceiver.FM_RX_SRCH_MODE_SCAN, FmReceiver.FM_RX_DWELL_PERIOD_2S, FmReceiver.FM_RX_SEARCHDIR_UP); } } else { /* RDS : Validate PTY value?? */ if( (pty > 0) && (pty <= 31) ) { bCommandSent = mReceiver.searchStations(FmReceiver.FM_RX_SRCHRDS_MODE_SCAN_PTY, FmReceiver.FM_RX_DWELL_PERIOD_2S, FmReceiver.FM_RX_SEARCHDIR_UP, pty, 0); } else { bCommandSent = mReceiver.searchStations(FmReceiver.FM_RX_SRCH_MODE_SCAN, FmReceiver.FM_RX_DWELL_PERIOD_2S, FmReceiver.FM_RX_SEARCHDIR_UP); } } } return bCommandSent; } /* Search for the 'numStations' number of strong FM Stations. * * It searches in the forward direction relative to the current tuned station. * int numStations: maximum number of stations to search. * * @return true if Search command was invoked successfully, false if not muted. * Note: 1. Callback FmRxEvSearchListComplete will be called when the Search * is complete * 2. Callback FmRxEvRadioTuneStatus will also be called when tuned to * the previously tuned station. */ public boolean searchStrongStationList(int numStations) { boolean bCommandSent=false; if (mReceiver != null) { Log.d(LOGTAG, "searchStrongStationList: numStations: " + numStations); bCommandSent = mReceiver.searchStationList(FmReceiver.FM_RX_SRCHLIST_MODE_STRONG, FmReceiver.FM_RX_SEARCHDIR_UP, numStations, 0); } return bCommandSent; } /* Search for the FM Station that matches the RDS PI (Program Identifier) code. * It always scans in the forward direction relative to the current tuned station. * int piCode: PI Code of the station to search. * * @return true if Search command was invoked successfully, false if not muted. * Note: 1. Callback FmRxEvSearchComplete will be called when the Search * is complete * 2. Callback FmRxEvRadioTuneStatus will also be called when tuned to a station * at the end of the Search or if the seach was cancelled. */ public boolean seekPI(int piCode) { boolean bCommandSent=false; if (mReceiver != null) { Log.d(LOGTAG, "seekPI: piCode: " + piCode); bCommandSent = mReceiver.searchStations(FmReceiver.FM_RX_SRCHRDS_MODE_SEEK_PI, FmReceiver.FM_RX_DWELL_PERIOD_1S, FmReceiver.FM_RX_SEARCHDIR_UP, 0, piCode ); } return bCommandSent; } /* Cancel any ongoing Search (Seek/Scan/SearchStationList). * * @return true if Search command was invoked successfully, false if not muted. * Note: 1. Callback FmRxEvSearchComplete will be called when the Search * is complete/cancelled. * 2. Callback FmRxEvRadioTuneStatus will also be called when tuned to a station * at the end of the Search or if the seach was cancelled. */ public boolean cancelSearch() { boolean bCommandSent=false; if (mReceiver != null) { Log.d(LOGTAG, "cancelSearch"); bCommandSent = mReceiver.cancelSearch(); } return bCommandSent; } /* Retrieves the RDS Program Service (PS) String. * * @return String - RDS PS String. * Note: 1. This is a synchronous call that should typically called when * Callback FmRxEvRdsPsInfo is invoked. * 2. Since PS contains multiple fields, this Service reads all the fields and "caches" * the values and provides this helper routine for the Activity to get only the information it needs. * 3. The "cached" data fields are always "cleared" when the tune status changes. */ public String getProgramService() { String str = ""; if (mFMRxRDSData != null) { str = mFMRxRDSData.getPrgmServices(); if(str == null) { str= ""; } } Log.d(LOGTAG, "Program Service: [" + str + "]"); return str; } /* Retrieves the RDS Radio Text (RT) String. * * @return String - RDS RT String. * Note: 1. This is a synchronous call that should typically called when * Callback FmRxEvRdsRtInfo is invoked. * 2. Since RT contains multiple fields, this Service reads all the fields and "caches" * the values and provides this helper routine for the Activity to get only the information it needs. * 3. The "cached" data fields are always "cleared" when the tune status changes. */ public String getRadioText() { String str = ""; if (mFMRxRDSData != null) { str = mFMRxRDSData.getRadioText(); if(str == null) { str= ""; } } Log.d(LOGTAG, "Radio Text: [" + str + "]"); return str; } /* Retrieves the RDS Program Type (PTY) code. * * @return int - RDS PTY code. * Note: 1. This is a synchronous call that should typically called when * Callback FmRxEvRdsRtInfo and or FmRxEvRdsPsInfo is invoked. * 2. Since RT/PS contains multiple fields, this Service reads all the fields and "caches" * the values and provides this helper routine for the Activity to get only the information it needs. * 3. The "cached" data fields are always "cleared" when the tune status changes. */ public int getProgramType() { int pty = -1; if (mFMRxRDSData != null) { pty = mFMRxRDSData.getPrgmType(); } Log.d(LOGTAG, "PTY: [" + pty + "]"); return pty; } /* Retrieves the RDS Program Identifier (PI). * * @return int - RDS PI code. * Note: 1. This is a synchronous call that should typically called when * Callback FmRxEvRdsRtInfo and or FmRxEvRdsPsInfo is invoked. * 2. Since RT/PS contains multiple fields, this Service reads all the fields and "caches" * the values and provides this helper routine for the Activity to get only the information it needs. * 3. The "cached" data fields are always "cleared" when the tune status changes. */ public int getProgramID() { int pi = -1; if (mFMRxRDSData != null) { pi = mFMRxRDSData.getPrgmId(); } Log.d(LOGTAG, "PI: [" + pi + "]"); return pi; } /* Retrieves the station list from the SearchStationlist. * * @return Array of integers that represents the station frequencies. * Note: 1. This is a synchronous call that should typically called when * Callback onSearchListComplete. */ public int[] getSearchList() { int[] frequencyList = null; if (mReceiver != null) { Log.d(LOGTAG, "getSearchList: "); frequencyList = mReceiver.getStationList(); } return frequencyList; } /* Set the FM Power Mode on the FM hardware SoC. * Typically used when UI/Activity is in the background, so the Host is interrupted less often. * * boolean bLowPower: true: Enable Low Power mode on FM hardware. * false: Disable Low Power mode on FM hardware. (Put into normal power mode) * @return true if set power mode api was invoked successfully, false if the api failed. */ public boolean setLowPowerMode(boolean bLowPower) { boolean bCommandSent=false; if (mReceiver != null) { Log.d(LOGTAG, "setLowPowerMode: " + bLowPower); if(bLowPower) { bCommandSent = mReceiver.setPowerMode(FmReceiver.FM_RX_LOW_POWER_MODE); } else { bCommandSent = mReceiver.setPowerMode(FmReceiver.FM_RX_NORMAL_POWER_MODE); } } return bCommandSent; } /* Get the FM Power Mode on the FM hardware SoC. * * @return the device power mode. */ public int getPowerMode() { int powerMode=FmReceiver.FM_RX_NORMAL_POWER_MODE; if (mReceiver != null) { powerMode = mReceiver.getPowerMode(); Log.d(LOGTAG, "getLowPowerMode: " + powerMode); } return powerMode; } /* Set the FM module to auto switch to an Alternate Frequency for the * station if one the signal strength of that frequency is stronger than the * current tuned frequency. * * boolean bEnable: true: Auto switch to stronger alternate frequency. * false: Do not switch to alternate frequency. * * @return true if set Auto AF mode api was invoked successfully, false if the api failed. * Note: Callback FmRxEvRadioTuneStatus will be called when tune * is complete to a different frequency. */ public boolean enableAutoAF(boolean bEnable) { boolean bCommandSent=false; if (mReceiver != null) { Log.d(LOGTAG, "enableAutoAF: " + bEnable); bCommandSent = mReceiver.enableAFjump(bEnable); } return bCommandSent; } /* Set the FM module to Stereo Mode or always force it to Mono Mode. * Note: The stereo mode will be available only when the station is broadcasting * in Stereo mode. * * boolean bEnable: true: Enable Stereo Mode. * false: Always stay in Mono Mode. * * @return true if set Stereo mode api was invoked successfully, false if the api failed. */ public boolean enableStereo(boolean bEnable) { boolean bCommandSent=false; if (mReceiver != null) { Log.d(LOGTAG, "enableStereo: " + bEnable); bCommandSent = mReceiver.setStereoMode(bEnable); } return bCommandSent; } /** Determines if an internal Antenna is available. * Returns the cached value initialized on FMOn. * * @return true if internal antenna is available or wired * headset is plugged in, false if internal antenna is * not available and wired headset is not plugged in. */ public boolean isAntennaAvailable() { boolean bAvailable = false; if ((mInternalAntennaAvailable) || (mHeadsetPlugged) ) { bAvailable = true; } return bAvailable; } /** Determines if a Wired headset is plugged in. Returns the * cached value initialized on broadcast receiver * initialization. * * @return true if wired headset is plugged in, false if wired * headset is not plugged in. */ public boolean isWiredHeadsetAvailable() { return (mHeadsetPlugged); } public boolean isCallActive() { //Non-zero: Call state is RINGING or OFFHOOK on the available subscriptions //zero: Call state is IDLE on all the available subscriptions if(0 != getCallState()) return true; return false; } public int getCallState() { TelephonyManager tmgr = (TelephonyManager) getSystemService(Context.TELEPHONY_SERVICE); return tmgr.getCallState(); } /* Receiver callbacks back from the FM Stack */ FmRxEvCallbacksAdaptor fmCallbacks = new FmRxEvCallbacksAdaptor() { public void FmRxEvEnableReceiver() { Log.d(LOGTAG, "FmRxEvEnableReceiver"); } public void FmRxEvDisableReceiver() { Log.d(LOGTAG, "FmRxEvDisableReceiver"); } public void FmRxEvConfigReceiver() { Log.d(LOGTAG, "FmRxEvConfigReceiver"); } public void FmRxEvMuteModeSet() { Log.d(LOGTAG, "FmRxEvMuteModeSet"); } public void FmRxEvStereoModeSet() { Log.d(LOGTAG, "FmRxEvStereoModeSet"); } public void FmRxEvRadioStationSet() { Log.d(LOGTAG, "FmRxEvRadioStationSet"); } public void FmRxEvPowerModeSet() { Log.d(LOGTAG, "FmRxEvPowerModeSet"); } public void FmRxEvSetSignalThreshold() { Log.d(LOGTAG, "FmRxEvSetSignalThreshold"); } public void FmRxEvRadioTuneStatus(int frequency) { Log.d(LOGTAG, "FmRxEvRadioTuneStatus: Tuned Frequency: " +frequency); try { FmSharedPreferences.setTunedFrequency(frequency); mPrefs.Save(); //Log.d(LOGTAG, "Call mCallbacks.onTuneStatusChanged"); /* Since the Tuned Status changed, clear out the RDSData cached */ mFMRxRDSData = null; if(mCallbacks != null) { mCallbacks.onTuneStatusChanged(); } /* Update the frequency in the StatusBar's Notification */ startNotification(); } catch (RemoteException e) { e.printStackTrace(); } } public void FmRxEvStationParameters() { Log.d(LOGTAG, "FmRxEvStationParameters"); } public void FmRxEvRdsLockStatus(boolean bRDSSupported) { Log.d(LOGTAG, "FmRxEvRdsLockStatus: " + bRDSSupported); try { if(mCallbacks != null) { mCallbacks.onStationRDSSupported(bRDSSupported); } } catch (RemoteException e) { e.printStackTrace(); } } public void FmRxEvStereoStatus(boolean stereo) { Log.d(LOGTAG, "FmRxEvStereoStatus: " + stereo); try { if(mCallbacks != null) { mCallbacks.onAudioUpdate(stereo); } } catch (RemoteException e) { e.printStackTrace(); } } public void FmRxEvServiceAvailable(boolean signal) { Log.d(LOGTAG, "FmRxEvServiceAvailable"); if(signal) { Log.d(LOGTAG, "FmRxEvServiceAvailable: Tuned frequency is above signal threshold level"); } else { Log.d(LOGTAG, "FmRxEvServiceAvailable: Tuned frequency is below signal threshold level"); } } public void FmRxEvGetSignalThreshold() { Log.d(LOGTAG, "FmRxEvGetSignalThreshold"); } public void FmRxEvSearchInProgress() { Log.d(LOGTAG, "FmRxEvSearchInProgress"); } public void FmRxEvSearchRdsInProgress() { Log.d(LOGTAG, "FmRxEvSearchRdsInProgress"); } public void FmRxEvSearchListInProgress() { Log.d(LOGTAG, "FmRxEvSearchListInProgress"); } public void FmRxEvSearchComplete(int frequency) { Log.d(LOGTAG, "FmRxEvSearchComplete: Tuned Frequency: " +frequency); try { FmSharedPreferences.setTunedFrequency(frequency); //Log.d(LOGTAG, "Call mCallbacks.onSearchComplete"); /* Since the Tuned Status changed, clear out the RDSData cached */ mFMRxRDSData = null; if(mCallbacks != null) { mCallbacks.onSearchComplete(); } /* Update the frequency in the StatusBar's Notification */ startNotification(); } catch (RemoteException e) { e.printStackTrace(); } } public void FmRxEvSearchRdsComplete() { Log.d(LOGTAG, "FmRxEvSearchRdsComplete"); } public void FmRxEvSearchListComplete() { Log.d(LOGTAG, "FmRxEvSearchListComplete"); try { if(mCallbacks != null) { mCallbacks.onSearchListComplete(); } } catch (RemoteException e) { e.printStackTrace(); } } public void FmRxEvSearchCancelled() { Log.d(LOGTAG, "FmRxEvSearchCancelled: Cancelled the on-going search operation."); } public void FmRxEvRdsGroupData() { Log.d(LOGTAG, "FmRxEvRdsGroupData"); } public void FmRxEvRdsPsInfo() { Log.d(LOGTAG, "FmRxEvRdsPsInfo: "); try { if(mReceiver != null) { mFMRxRDSData = mReceiver.getPSInfo(); if(mFMRxRDSData != null) { Log.d(LOGTAG, "PI: [" + mFMRxRDSData.getPrgmId() + "]"); Log.d(LOGTAG, "PTY: [" + mFMRxRDSData.getPrgmType() + "]"); Log.d(LOGTAG, "PS: [" + mFMRxRDSData.getPrgmServices() + "]"); } if(mCallbacks != null) { mCallbacks.onProgramServiceChanged(); } } } catch (RemoteException e) { e.printStackTrace(); } } public void FmRxEvRdsRtInfo() { Log.d(LOGTAG, "FmRxEvRdsRtInfo"); try { //Log.d(LOGTAG, "Call mCallbacks.onRadioTextChanged"); if(mReceiver != null) { mFMRxRDSData = mReceiver.getRTInfo(); if(mFMRxRDSData != null) { Log.d(LOGTAG, "PI: [" + mFMRxRDSData.getPrgmId() + "]"); Log.d(LOGTAG, "PTY: [" + mFMRxRDSData.getPrgmType() + "]"); Log.d(LOGTAG, "RT: [" + mFMRxRDSData.getRadioText() + "]"); } if(mCallbacks != null) { mCallbacks.onRadioTextChanged(); } } } catch (RemoteException e) { e.printStackTrace(); } } public void FmRxEvRdsAfInfo() { Log.d(LOGTAG, "FmRxEvRdsAfInfo"); } public void FmRxEvRdsPiMatchAvailable() { Log.d(LOGTAG, "FmRxEvRdsPiMatchAvailable"); } public void FmRxEvRdsGroupOptionsSet() { Log.d(LOGTAG, "FmRxEvRdsGroupOptionsSet"); } public void FmRxEvRdsProcRegDone() { Log.d(LOGTAG, "FmRxEvRdsProcRegDone"); } public void FmRxEvRdsPiMatchRegDone() { Log.d(LOGTAG, "FmRxEvRdsPiMatchRegDone"); } }; /* * Read the Tuned Frequency from the FM module. */ private String getTunedFrequencyString() { double frequency = FmSharedPreferences.getTunedFrequency() / 1000.0; String frequencyString = getString(R.string.stat_notif_frequency, (""+frequency)); return frequencyString; } public int getRssi() { return mReceiver.getRssi(); } public int getIoC(){ return mReceiver.getIoverc(); } public int getIntDet(){ return mReceiver.getIntDet(); } public int getMpxDcc(){ return mReceiver.getMpxDcc(); } public void setHiLoInj(int inj){ mReceiver.setHiLoInj(inj); } //handling the sleep and record stop when FM App not in focus private void delayedStop(long duration, int nType) { int whatId = (nType == STOP_SERVICE) ? STOPSERVICE_ONSLEEP: STOPRECORD_ONTIMEOUT ; Message finished = mDelayedStopHandler.obtainMessage(whatId); mDelayedStopHandler.sendMessageDelayed(finished,duration); } private void cancelDelayedStop(int nType) { int whatId = (nType == STOP_SERVICE) ? STOPSERVICE_ONSLEEP: STOPRECORD_ONTIMEOUT ; mDelayedStopHandler.removeMessages(whatId); } private void requestFocus() { if( (false == mPlaybackInProgress) && (true == mStoppedOnFocusLoss) ) { // adding code for audio focus gain. AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); audioManager.requestAudioFocus(mAudioFocusListener, AudioManager.STREAM_FM, AudioManager.AUDIOFOCUS_GAIN_TRANSIENT); startFM(); mStoppedOnFocusLoss = false; } } private OnAudioFocusChangeListener mAudioFocusListener = new OnAudioFocusChangeListener() { public void onAudioFocusChange(int focusChange) { mDelayedStopHandler.obtainMessage(FOCUSCHANGE, focusChange, 0).sendToTarget(); } }; }
true
true
private boolean fmOn() { boolean bStatus=false; if ( TelephonyManager.CALL_STATE_IDLE != getCallState() ) { return bStatus; } if(mReceiver == null) { try { mReceiver = new FmReceiver(FMRADIO_DEVICE_FD_STRING, fmCallbacks); } catch (InstantiationException e) { throw new RuntimeException("FmReceiver service not available!"); } } if (mReceiver != null) { if (isFmOn()) { /* FM Is already on,*/ bStatus = true; Log.d(LOGTAG, "mReceiver.already enabled"); } else { // This sets up the FM radio device FmConfig config = FmSharedPreferences.getFMConfiguration(); Log.d(LOGTAG, "fmOn: RadioBand :"+ config.getRadioBand()); Log.d(LOGTAG, "fmOn: Emphasis :"+ config.getEmphasis()); Log.d(LOGTAG, "fmOn: ChSpacing :"+ config.getChSpacing()); Log.d(LOGTAG, "fmOn: RdsStd :"+ config.getRdsStd()); Log.d(LOGTAG, "fmOn: LowerLimit :"+ config.getLowerLimit()); Log.d(LOGTAG, "fmOn: UpperLimit :"+ config.getUpperLimit()); bStatus = mReceiver.enable(FmSharedPreferences.getFMConfiguration()); setAudioPath(true); Log.d(LOGTAG, "mReceiver.enable done, Status :" + bStatus); } if (bStatus == true) { /* Put the hardware into normal mode */ bStatus = setLowPowerMode(false); Log.d(LOGTAG, "setLowPowerMode done, Status :" + bStatus); AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); if( (audioManager != null) &&(false == mPlaybackInProgress) ) { Log.d(LOGTAG, "mAudioManager.setFmRadioOn = true \n" ); //audioManager.setParameters("FMRadioOn="+mAudioDevice); int state = getCallState(); if ( TelephonyManager.CALL_STATE_IDLE != getCallState() ) { fmActionOnCallState(state); } else { startFM(); // enable FM Audio only when Call is IDLE } Log.d(LOGTAG, "mAudioManager.setFmRadioOn done \n" ); } if (mReceiver != null) { bStatus = mReceiver.registerRdsGroupProcessing(FmReceiver.FM_RX_RDS_GRP_RT_EBL| FmReceiver.FM_RX_RDS_GRP_PS_EBL| FmReceiver.FM_RX_RDS_GRP_AF_EBL| FmReceiver.FM_RX_RDS_GRP_PS_SIMPLE_EBL); Log.d(LOGTAG, "registerRdsGroupProcessing done, Status :" + bStatus); } bStatus = enableAutoAF(FmSharedPreferences.getAutoAFSwitch()); Log.d(LOGTAG, "enableAutoAF done, Status :" + bStatus); /* There is no internal Antenna*/ bStatus = mReceiver.setInternalAntenna(false); Log.d(LOGTAG, "setInternalAntenna done, Status :" + bStatus); /* Read back to verify the internal Antenna mode*/ readInternalAntennaAvailable(); startNotification(); bStatus = true; } else { mReceiver = null; // as enable failed no need to disable // failure of enable can be because handle // already open which gets effected if // we disable stop(); } } return(bStatus); }
private boolean fmOn() { boolean bStatus=false; if ( TelephonyManager.CALL_STATE_IDLE != getCallState() ) { return bStatus; } if(mReceiver == null) { try { mReceiver = new FmReceiver(FMRADIO_DEVICE_FD_STRING, fmCallbacks); } catch (InstantiationException e) { throw new RuntimeException("FmReceiver service not available!"); } } if (mReceiver != null) { if (isFmOn()) { /* FM Is already on,*/ bStatus = true; Log.d(LOGTAG, "mReceiver.already enabled"); } else { // This sets up the FM radio device FmConfig config = FmSharedPreferences.getFMConfiguration(); Log.d(LOGTAG, "fmOn: RadioBand :"+ config.getRadioBand()); Log.d(LOGTAG, "fmOn: Emphasis :"+ config.getEmphasis()); Log.d(LOGTAG, "fmOn: ChSpacing :"+ config.getChSpacing()); Log.d(LOGTAG, "fmOn: RdsStd :"+ config.getRdsStd()); Log.d(LOGTAG, "fmOn: LowerLimit :"+ config.getLowerLimit()); Log.d(LOGTAG, "fmOn: UpperLimit :"+ config.getUpperLimit()); bStatus = mReceiver.enable(FmSharedPreferences.getFMConfiguration()); if (isSpeakerEnabled()) { setAudioPath(false); } else { setAudioPath(true); } Log.d(LOGTAG, "mReceiver.enable done, Status :" + bStatus); } if (bStatus == true) { /* Put the hardware into normal mode */ bStatus = setLowPowerMode(false); Log.d(LOGTAG, "setLowPowerMode done, Status :" + bStatus); AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); if( (audioManager != null) &&(false == mPlaybackInProgress) ) { Log.d(LOGTAG, "mAudioManager.setFmRadioOn = true \n" ); //audioManager.setParameters("FMRadioOn="+mAudioDevice); int state = getCallState(); if ( TelephonyManager.CALL_STATE_IDLE != getCallState() ) { fmActionOnCallState(state); } else { startFM(); // enable FM Audio only when Call is IDLE } Log.d(LOGTAG, "mAudioManager.setFmRadioOn done \n" ); } if (mReceiver != null) { bStatus = mReceiver.registerRdsGroupProcessing(FmReceiver.FM_RX_RDS_GRP_RT_EBL| FmReceiver.FM_RX_RDS_GRP_PS_EBL| FmReceiver.FM_RX_RDS_GRP_AF_EBL| FmReceiver.FM_RX_RDS_GRP_PS_SIMPLE_EBL); Log.d(LOGTAG, "registerRdsGroupProcessing done, Status :" + bStatus); } bStatus = enableAutoAF(FmSharedPreferences.getAutoAFSwitch()); Log.d(LOGTAG, "enableAutoAF done, Status :" + bStatus); /* There is no internal Antenna*/ bStatus = mReceiver.setInternalAntenna(false); Log.d(LOGTAG, "setInternalAntenna done, Status :" + bStatus); /* Read back to verify the internal Antenna mode*/ readInternalAntennaAvailable(); startNotification(); bStatus = true; } else { mReceiver = null; // as enable failed no need to disable // failure of enable can be because handle // already open which gets effected if // we disable stop(); } } return(bStatus); }
diff --git a/src/jp/co/qsdn/android/iwashi3d/AtlantisService.java b/src/jp/co/qsdn/android/iwashi3d/AtlantisService.java index 0dd751a..6313248 100644 --- a/src/jp/co/qsdn/android/iwashi3d/AtlantisService.java +++ b/src/jp/co/qsdn/android/iwashi3d/AtlantisService.java @@ -1,556 +1,560 @@ /* * Copyright (C) 2011 QSDN,Inc. * Copyright (C) 2011 Atsushi Konno * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package jp.co.qsdn.android.iwashi3d; import android.content.Context; import android.content.Intent; import android.os.Bundle; import android.service.wallpaper.WallpaperService; import android.util.Log; import android.view.SurfaceHolder; import android.widget.Toast; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import javax.microedition.khronos.egl.EGL10; import javax.microedition.khronos.egl.EGL11; import javax.microedition.khronos.egl.EGLConfig; import javax.microedition.khronos.egl.EGLContext; import javax.microedition.khronos.egl.EGLDisplay; import javax.microedition.khronos.egl.EGLSurface; import javax.microedition.khronos.opengles.GL10; import jp.co.qsdn.android.iwashi3d.GLRenderer; import jp.co.qsdn.android.iwashi3d.util.MatrixTrackingGL; public class AtlantisService extends WallpaperService { private static final String TAG = AtlantisService.class.getName(); private static final boolean _debug = false; private static final int RETRY_COUNT = 3; private class AtlantisEngine extends Engine { private final String TAG = AtlantisEngine.class.getName(); private int width = 0; private int height = 0; private boolean binded = false; private boolean mInitialized = false; private long BASE_TICK = 45410157L; private MatrixTrackingGL gl10 = null; private EGL10 egl10 = null; private EGLContext eglContext = null; private EGLDisplay eglDisplay = null; private EGLSurface eglSurface = null; private GLRenderer glRenderer = null; private ExecutorService getExecutor() { if (executor == null) { executor = Executors.newSingleThreadExecutor(); } return executor; } private ExecutorService executor = null; private Runnable drawCommand = null; @Override public void onCreate(final SurfaceHolder holder) { if (_debug) Log.d(TAG, "start onCreate() [" + this + "]"); super.onCreate(holder); setTouchEventsEnabled(false); if (! isPreview()) { AtlantisNotification.putNotice(AtlantisService.this); } if (_debug) Log.d(TAG, "end onCreate() [" + this + "]"); } @Override public void onDestroy() { if (_debug) Log.d(TAG, "start onDestroy() [" + this + "]"); if (! isPreview()) { AtlantisNotification.removeNotice(getApplicationContext()); } else { } super.onDestroy(); System.gc(); if (_debug) Log.d(TAG, "end onDestroy() [" + this + "]"); } private void doExecute(Runnable command) { if (command == null) { return; } while(true) { try { getExecutor().execute(command); } catch (RejectedExecutionException e) { if (getExecutor().isShutdown()) { // ignore } else { Log.e(TAG, "command execute failure", e); waitNano(); System.gc(); continue; } } break; } } int[][] configSpec = { { // RGB565 color EGL10.EGL_RED_SIZE, 5, EGL10.EGL_GREEN_SIZE,6, EGL10.EGL_BLUE_SIZE, 5, EGL10.EGL_ALPHA_SIZE, EGL10.EGL_DONT_CARE, EGL10.EGL_DEPTH_SIZE, 24, EGL10.EGL_STENCIL_SIZE, EGL10.EGL_DONT_CARE, // window (and not a pixmap or a pbuffer) EGL10.EGL_SURFACE_TYPE, EGL10.EGL_WINDOW_BIT, EGL10.EGL_NONE , }, { // RGB565 color EGL10.EGL_RED_SIZE, 5, EGL10.EGL_GREEN_SIZE,6, EGL10.EGL_BLUE_SIZE,5, EGL10.EGL_ALPHA_SIZE, EGL10.EGL_DONT_CARE, EGL10.EGL_DEPTH_SIZE, 16, EGL10.EGL_STENCIL_SIZE, EGL10.EGL_DONT_CARE, // window (and not a pixmap or a pbuffer) EGL10.EGL_SURFACE_TYPE, EGL10.EGL_WINDOW_BIT, EGL10.EGL_NONE , }, { EGL10.EGL_NONE, } }; @Override public void onSurfaceCreated(final SurfaceHolder holder) { if (_debug) Log.d(TAG, "start onSurfaceCreated() [" + this + "]"); super.onSurfaceCreated(holder); Runnable surfaceCreatedCommand = new Runnable() { @Override public void run() { doSurfaceCreated(); } protected void doSurfaceCreated() { if (mInitialized) { if (_debug) Log.d(TAG, "already Initialized(surfaceCreatedCommand)"); return; } boolean ret; /* OpenGLの初期化 */ int counter = 0; int specCounter = 0; while(true) { if (_debug) Log.d(TAG, "start EGLContext.getEGL()"); exitEgl(); egl10 = (EGL10) EGLContext.getEGL(); if (_debug) Log.d(TAG, "end EGLContext.getEGL()"); if (_debug) Log.d(TAG, "start eglGetDisplay"); eglDisplay = egl10.eglGetDisplay(EGL10.EGL_DEFAULT_DISPLAY); if (_debug) Log.d(TAG, "end eglGetDisplay"); if (eglDisplay == null || EGL10.EGL_NO_DISPLAY.equals(eglDisplay)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "eglGetDisplayがEGL_NO_DISPLAY [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG, "egl10.eglCreateContextがEGL_NO_DISPLAY"); throw new RuntimeException("OpenGL Error(EGL_NO_DISPLAY) " + errStr + ": " ); } if (_debug) Log.d(TAG, "RETRY"); System.gc(); waitNano(); continue; } { egl10.eglMakeCurrent(eglDisplay, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_CONTEXT); } int[] version = new int[2]; if (! egl10.eglInitialize(eglDisplay, version)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglInitializeがfalse [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG,"egl10.eglInitializeがfalse"); throw new RuntimeException("OpenGL Error(eglInitialize) " + errStr + ": " ); } if (_debug) Log.d(TAG,"RETRY"); System.gc(); waitNano(); continue; } EGLConfig[] configs = new EGLConfig[1]; int[] numConfig = new int[1]; egl10.eglChooseConfig(eglDisplay, configSpec[specCounter++], configs, 1, numConfig); if (numConfig[0] == 0) { if (_debug) Log.d(TAG, "numConfig[0]=" + numConfig[0] + ""); String errStr = AtlantisService.getErrorString(egl10.eglGetError()); errStr += " eglChooseConfig numConfig == 0 "; errStr += " numConfig:[" + numConfig[0] + "]:"; errStr += " configSpec:[" + (specCounter - 1) + "]:"; Log.e(TAG,"eglChooseConfig失敗:" + errStr); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG,"eglChooseConfig失敗:" + errStr); throw new RuntimeException("OpenGL Error " + errStr + " :" ); } + if (_debug) Log.d(TAG,"RETRY"); + System.gc(); + waitNano(); + continue; } EGLConfig config = configs[0]; eglContext = egl10.eglCreateContext(eglDisplay, config, EGL10.EGL_NO_CONTEXT, null); if (eglContext == null || EGL10.EGL_NO_CONTEXT.equals(eglContext)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglCreateContext == EGL_NO_CONTEXT [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG, "egl10.eglCreateContextがEGL_NO_CONTEXT"); throw new RuntimeException("OpenGL Error(EGL_NO_CONTEXT) " + errStr + " :" ); } if (_debug) Log.d(TAG, "RETRY"); System.gc(); waitNano(); continue; } if (_debug) Log.d(TAG, "eglCreateContext done."); eglSurface = egl10.eglCreateWindowSurface(eglDisplay, config, holder, null); if (eglSurface == null || EGL10.EGL_NO_SURFACE.equals(eglSurface)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglCreateWindowSurface == EGL_NO_SURFACE [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG, "egl10.eglCreateWindowSurfaceがEGL_NO_SURFACE"); throw new RuntimeException("OpenGL Error(EGL_NO_SURFACE) " + errStr + " :" ); } if (_debug) Log.e(TAG, "RETRY"); System.gc(); waitNano(); continue; } if (_debug) Log.d(TAG, "eglCreateWindowSurface done."); if (! egl10.eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglMakeCurrent == false [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG,"egl10.eglMakeCurrentがfalse"); throw new RuntimeException("OpenGL Error(eglMakeCurrent) " + errStr + " :" ); } if (_debug) Log.d(TAG,"RETRY"); System.gc(); waitNano(); continue; } if (_debug) Log.d(TAG, "eglMakeCurrent done."); if (_debug) Log.d(TAG, "now create gl10 object"); gl10 = new MatrixTrackingGL((GL10) (eglContext.getGL())); glRenderer = GLRenderer.getInstance(getApplicationContext()); synchronized (glRenderer) { glRenderer.onSurfaceCreated(gl10, config, getApplicationContext()); } if (_debug) Log.d(TAG, "EGL initalize done."); mInitialized = true; if (drawCommand == null) { drawCommand = new Runnable() { public void run() { doDrawCommand(); } protected void doDrawCommand() { if (mInitialized && glRenderer != null && gl10 != null) { synchronized (glRenderer) { long nowTime = System.nanoTime(); if (nowTime - glRenderer.prevTick < BASE_TICK) { try { TimeUnit.NANOSECONDS.sleep(nowTime - glRenderer.prevTick); } catch (InterruptedException e) { } } glRenderer.onDrawFrame(gl10); glRenderer.prevTick = nowTime; } egl10.eglSwapBuffers(eglDisplay, eglSurface); if (!getExecutor().isShutdown() && isVisible() && egl10.eglGetError() != EGL11.EGL_CONTEXT_LOST) { doExecute(drawCommand); } } } }; doExecute(drawCommand); } break; } Log.d(TAG, "selected config spec for opengl is No." + counter); } }; doExecute(surfaceCreatedCommand); if (_debug) Log.d(TAG, "end onSurfaceCreated() [" + this + "]"); } @Override public void onSurfaceDestroyed(final SurfaceHolder holder) { if (_debug) Log.d(TAG, "start onSurfaceDestroyed() [" + this + "]"); Runnable surfaceDestroyedCommand = new Runnable() { @Override public void run() { doSurfaceDestroyedCommand(); } private void doSurfaceDestroyedCommand() { synchronized (glRenderer) { glRenderer.onSurfaceDestroyed(gl10); } exitEgl(); gl10.shutdown(); gl10 = null; System.gc(); mInitialized = false; } }; doExecute(surfaceDestroyedCommand); getExecutor().shutdown(); try { if (!getExecutor().awaitTermination(60, TimeUnit.SECONDS)) { getExecutor().shutdownNow(); if (!getExecutor().awaitTermination(60, TimeUnit.SECONDS)) { if (_debug) Log.d(TAG,"ExecutorService did not terminate...."); getExecutor().shutdownNow(); Thread.currentThread().interrupt(); } } } catch (InterruptedException e) { executor.shutdownNow(); Thread.currentThread().interrupt(); } drawCommand = null; super.onSurfaceDestroyed(holder); if (_debug) Log.d(TAG, "end onSurfaceDestroyed() [" + this + "]"); } @Override public void onSurfaceChanged(final SurfaceHolder holder, final int format, final int width, final int height) { if (_debug) Log.d(TAG, "start onSurfaceChanged() [" + this + "]"); super.onSurfaceChanged(holder, format, width, height); this.width = width; this.height = height; Runnable surfaceChangedCommand = new Runnable() { public void run() { doSurfaceChanged(); } private void doSurfaceChanged() { if (glRenderer != null && gl10 != null && mInitialized) { synchronized (glRenderer) { glRenderer.onSurfaceChanged(gl10, width, height); } } }; }; doExecute(surfaceChangedCommand); if (_debug) Log.d(TAG, "end onSurfaceChanged() [" + this + "]"); } @Override public void onVisibilityChanged(final boolean visible) { if (_debug) Log.d(TAG, "start onVisibilityChanged()"); super.onVisibilityChanged(visible); if (visible && drawCommand != null && mInitialized) { if (glRenderer != null) { synchronized (glRenderer) { glRenderer.updateSetting(getApplicationContext()); } } doExecute(drawCommand); } if (_debug) Log.d(TAG, "end onVisibilityChanged()"); } @Override public void onOffsetsChanged(final float xOffset, final float yOffset, final float xOffsetStep, final float yOffsetStep, final int xPixelOffset, final int yPixelOffset) { if (_debug) Log.d(TAG, "start onOffsetsChanged()"); super.onOffsetsChanged(xOffset, yOffset, xOffsetStep, yOffsetStep, xPixelOffset, yPixelOffset); if (xOffsetStep == 0.0f && yOffsetStep == 0.0f) { if (_debug) Log.d(TAG, "end onOffsetChanged() no execute"); return; } Runnable offsetsChangedCommand = new Runnable() { public void run() { doOffsetsChanged(); } private void doOffsetsChanged() { if (mInitialized && glRenderer != null && gl10 != null) { synchronized (glRenderer) { glRenderer.onOffsetsChanged(gl10, xOffset, yOffset, xOffsetStep, yOffsetStep, xPixelOffset, yPixelOffset); } } }; }; doExecute(offsetsChangedCommand); if (_debug) Log.d(TAG, "end onOffsetChanged()"); } @Override public Bundle onCommand(final String action, final int x, final int y, final int z, final Bundle extras, final boolean resultRequested){ if (_debug) { Log.d(TAG, "start onCommand " + "action:[" + action + "]:" + "x:[" + x + "]:" + "y:[" + y + "]:" + "z:[" + z + "]:" + "extras:[" + extras + "]:" + "resultRequested:[" + resultRequested + "]:" ); } if (action.equals("android.wallpaper.tap")) { Runnable onCommandCommand = new Runnable() { public void run() { doCommandCommand(); } private void doCommandCommand() { if (mInitialized && glRenderer != null && gl10 != null) { synchronized (glRenderer) { glRenderer.onCommand(gl10, action, x, y, z, extras, resultRequested); } } } }; doExecute(onCommandCommand); } Bundle ret = super.onCommand(action, x, y, z, extras, resultRequested); if (_debug) Log.d(TAG, "end onCommand"); return ret; } public void exitEgl() { if (_debug) Log.d(TAG, "start exitEgl"); if (egl10 != null) { if (eglDisplay != null && ! eglDisplay.equals(EGL10.EGL_NO_DISPLAY)) { if (! egl10.eglMakeCurrent(eglDisplay, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_CONTEXT)) { Log.e(TAG, "eglMakeCurrentがfalse [" + AtlantisService.getErrorString(egl10.eglGetError()) + "]"); } if (eglSurface != null && ! eglSurface.equals(EGL10.EGL_NO_SURFACE)) { if (! egl10.eglDestroySurface(eglDisplay, eglSurface)) { Log.e(TAG, "eglDestroySurfaceがfalse [" + AtlantisService.getErrorString(egl10.eglGetError()) + "]"); } eglSurface = null; } if (eglContext != null && ! eglContext.equals(EGL10.EGL_NO_CONTEXT)) { if (! egl10.eglDestroyContext(eglDisplay, eglContext)) { Log.e(TAG, "eglDestroyContextがfalse [" + AtlantisService.getErrorString(egl10.eglGetError()) + "]"); } eglContext = null; } if (! egl10.eglTerminate(eglDisplay)) { Log.e(TAG, "eglTerminateがfalse [" + AtlantisService.getErrorString(egl10.eglGetError()) + "]"); } eglDisplay = null; } egl10 = null; } if (_debug) Log.d(TAG, "end exitEgl"); } } @Override public Engine onCreateEngine() { if (_debug) Log.d(TAG, "start onCreateEngine()"); AtlantisEngine engine = new AtlantisEngine(); if (_debug) Log.d(TAG, "engine:[" + engine + "]"); if (_debug) Log.d(TAG, "end onCreateEngine()"); return engine; } public void waitNano() { if (_debug) Log.d(TAG, "start waitNano"); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException e) { } if (_debug) Log.d(TAG, "end waitNano"); } public static String getErrorString(int err) { switch (err) { case EGL10.EGL_NOT_INITIALIZED: return "EGL_NOT_INITIALIZED"; case EGL10.EGL_BAD_ACCESS: return "EGL_BAD_ACCESS"; case EGL10.EGL_BAD_ALLOC: return "EGL_BAD_ALLOC"; case EGL10.EGL_BAD_ATTRIBUTE: return "EGL_BAD_ATTRIBUTE"; case EGL10.EGL_BAD_CONTEXT: return "EGL_BAD_CONTEXT"; case EGL10.EGL_BAD_CONFIG: return "EGL_BAD_CONFIG"; case EGL10.EGL_BAD_CURRENT_SURFACE: return "EGL_BAD_CURRENT_SURFACE"; case EGL10.EGL_BAD_DISPLAY: return "EGL_BAD_DISPLAY"; case EGL10.EGL_BAD_SURFACE: return "EGL_BAD_SURFACE"; case EGL10.EGL_BAD_MATCH: return "EGL_BAD_MATCH"; case EGL10.EGL_BAD_PARAMETER: return "EGL_BAD_PARAMETER"; case EGL10.EGL_BAD_NATIVE_PIXMAP: return "EGL_BAD_NATIVE_PIXMAP"; case EGL10.EGL_BAD_NATIVE_WINDOW: return "EGL_BAD_NATIVE_WINDOW"; case EGL11.EGL_CONTEXT_LOST: return "EGL_CONTEXT_LOST"; default: return "OTHER err:[" + err + "]"; } } }
true
true
public void onSurfaceCreated(final SurfaceHolder holder) { if (_debug) Log.d(TAG, "start onSurfaceCreated() [" + this + "]"); super.onSurfaceCreated(holder); Runnable surfaceCreatedCommand = new Runnable() { @Override public void run() { doSurfaceCreated(); } protected void doSurfaceCreated() { if (mInitialized) { if (_debug) Log.d(TAG, "already Initialized(surfaceCreatedCommand)"); return; } boolean ret; /* OpenGLの初期化 */ int counter = 0; int specCounter = 0; while(true) { if (_debug) Log.d(TAG, "start EGLContext.getEGL()"); exitEgl(); egl10 = (EGL10) EGLContext.getEGL(); if (_debug) Log.d(TAG, "end EGLContext.getEGL()"); if (_debug) Log.d(TAG, "start eglGetDisplay"); eglDisplay = egl10.eglGetDisplay(EGL10.EGL_DEFAULT_DISPLAY); if (_debug) Log.d(TAG, "end eglGetDisplay"); if (eglDisplay == null || EGL10.EGL_NO_DISPLAY.equals(eglDisplay)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "eglGetDisplayがEGL_NO_DISPLAY [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG, "egl10.eglCreateContextがEGL_NO_DISPLAY"); throw new RuntimeException("OpenGL Error(EGL_NO_DISPLAY) " + errStr + ": " ); } if (_debug) Log.d(TAG, "RETRY"); System.gc(); waitNano(); continue; } { egl10.eglMakeCurrent(eglDisplay, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_CONTEXT); } int[] version = new int[2]; if (! egl10.eglInitialize(eglDisplay, version)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglInitializeがfalse [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG,"egl10.eglInitializeがfalse"); throw new RuntimeException("OpenGL Error(eglInitialize) " + errStr + ": " ); } if (_debug) Log.d(TAG,"RETRY"); System.gc(); waitNano(); continue; } EGLConfig[] configs = new EGLConfig[1]; int[] numConfig = new int[1]; egl10.eglChooseConfig(eglDisplay, configSpec[specCounter++], configs, 1, numConfig); if (numConfig[0] == 0) { if (_debug) Log.d(TAG, "numConfig[0]=" + numConfig[0] + ""); String errStr = AtlantisService.getErrorString(egl10.eglGetError()); errStr += " eglChooseConfig numConfig == 0 "; errStr += " numConfig:[" + numConfig[0] + "]:"; errStr += " configSpec:[" + (specCounter - 1) + "]:"; Log.e(TAG,"eglChooseConfig失敗:" + errStr); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG,"eglChooseConfig失敗:" + errStr); throw new RuntimeException("OpenGL Error " + errStr + " :" ); } } EGLConfig config = configs[0]; eglContext = egl10.eglCreateContext(eglDisplay, config, EGL10.EGL_NO_CONTEXT, null); if (eglContext == null || EGL10.EGL_NO_CONTEXT.equals(eglContext)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglCreateContext == EGL_NO_CONTEXT [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG, "egl10.eglCreateContextがEGL_NO_CONTEXT"); throw new RuntimeException("OpenGL Error(EGL_NO_CONTEXT) " + errStr + " :" ); } if (_debug) Log.d(TAG, "RETRY"); System.gc(); waitNano(); continue; } if (_debug) Log.d(TAG, "eglCreateContext done."); eglSurface = egl10.eglCreateWindowSurface(eglDisplay, config, holder, null); if (eglSurface == null || EGL10.EGL_NO_SURFACE.equals(eglSurface)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglCreateWindowSurface == EGL_NO_SURFACE [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG, "egl10.eglCreateWindowSurfaceがEGL_NO_SURFACE"); throw new RuntimeException("OpenGL Error(EGL_NO_SURFACE) " + errStr + " :" ); } if (_debug) Log.e(TAG, "RETRY"); System.gc(); waitNano(); continue; } if (_debug) Log.d(TAG, "eglCreateWindowSurface done."); if (! egl10.eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglMakeCurrent == false [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG,"egl10.eglMakeCurrentがfalse"); throw new RuntimeException("OpenGL Error(eglMakeCurrent) " + errStr + " :" ); } if (_debug) Log.d(TAG,"RETRY"); System.gc(); waitNano(); continue; } if (_debug) Log.d(TAG, "eglMakeCurrent done."); if (_debug) Log.d(TAG, "now create gl10 object"); gl10 = new MatrixTrackingGL((GL10) (eglContext.getGL())); glRenderer = GLRenderer.getInstance(getApplicationContext()); synchronized (glRenderer) { glRenderer.onSurfaceCreated(gl10, config, getApplicationContext()); } if (_debug) Log.d(TAG, "EGL initalize done."); mInitialized = true; if (drawCommand == null) { drawCommand = new Runnable() { public void run() { doDrawCommand(); } protected void doDrawCommand() { if (mInitialized && glRenderer != null && gl10 != null) { synchronized (glRenderer) { long nowTime = System.nanoTime(); if (nowTime - glRenderer.prevTick < BASE_TICK) { try { TimeUnit.NANOSECONDS.sleep(nowTime - glRenderer.prevTick); } catch (InterruptedException e) { } } glRenderer.onDrawFrame(gl10); glRenderer.prevTick = nowTime; } egl10.eglSwapBuffers(eglDisplay, eglSurface); if (!getExecutor().isShutdown() && isVisible() && egl10.eglGetError() != EGL11.EGL_CONTEXT_LOST) { doExecute(drawCommand); } } } }; doExecute(drawCommand); } break; } Log.d(TAG, "selected config spec for opengl is No." + counter); } }; doExecute(surfaceCreatedCommand); if (_debug) Log.d(TAG, "end onSurfaceCreated() [" + this + "]"); }
public void onSurfaceCreated(final SurfaceHolder holder) { if (_debug) Log.d(TAG, "start onSurfaceCreated() [" + this + "]"); super.onSurfaceCreated(holder); Runnable surfaceCreatedCommand = new Runnable() { @Override public void run() { doSurfaceCreated(); } protected void doSurfaceCreated() { if (mInitialized) { if (_debug) Log.d(TAG, "already Initialized(surfaceCreatedCommand)"); return; } boolean ret; /* OpenGLの初期化 */ int counter = 0; int specCounter = 0; while(true) { if (_debug) Log.d(TAG, "start EGLContext.getEGL()"); exitEgl(); egl10 = (EGL10) EGLContext.getEGL(); if (_debug) Log.d(TAG, "end EGLContext.getEGL()"); if (_debug) Log.d(TAG, "start eglGetDisplay"); eglDisplay = egl10.eglGetDisplay(EGL10.EGL_DEFAULT_DISPLAY); if (_debug) Log.d(TAG, "end eglGetDisplay"); if (eglDisplay == null || EGL10.EGL_NO_DISPLAY.equals(eglDisplay)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "eglGetDisplayがEGL_NO_DISPLAY [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG, "egl10.eglCreateContextがEGL_NO_DISPLAY"); throw new RuntimeException("OpenGL Error(EGL_NO_DISPLAY) " + errStr + ": " ); } if (_debug) Log.d(TAG, "RETRY"); System.gc(); waitNano(); continue; } { egl10.eglMakeCurrent(eglDisplay, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_SURFACE, EGL10.EGL_NO_CONTEXT); } int[] version = new int[2]; if (! egl10.eglInitialize(eglDisplay, version)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglInitializeがfalse [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG,"egl10.eglInitializeがfalse"); throw new RuntimeException("OpenGL Error(eglInitialize) " + errStr + ": " ); } if (_debug) Log.d(TAG,"RETRY"); System.gc(); waitNano(); continue; } EGLConfig[] configs = new EGLConfig[1]; int[] numConfig = new int[1]; egl10.eglChooseConfig(eglDisplay, configSpec[specCounter++], configs, 1, numConfig); if (numConfig[0] == 0) { if (_debug) Log.d(TAG, "numConfig[0]=" + numConfig[0] + ""); String errStr = AtlantisService.getErrorString(egl10.eglGetError()); errStr += " eglChooseConfig numConfig == 0 "; errStr += " numConfig:[" + numConfig[0] + "]:"; errStr += " configSpec:[" + (specCounter - 1) + "]:"; Log.e(TAG,"eglChooseConfig失敗:" + errStr); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG,"eglChooseConfig失敗:" + errStr); throw new RuntimeException("OpenGL Error " + errStr + " :" ); } if (_debug) Log.d(TAG,"RETRY"); System.gc(); waitNano(); continue; } EGLConfig config = configs[0]; eglContext = egl10.eglCreateContext(eglDisplay, config, EGL10.EGL_NO_CONTEXT, null); if (eglContext == null || EGL10.EGL_NO_CONTEXT.equals(eglContext)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglCreateContext == EGL_NO_CONTEXT [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG, "egl10.eglCreateContextがEGL_NO_CONTEXT"); throw new RuntimeException("OpenGL Error(EGL_NO_CONTEXT) " + errStr + " :" ); } if (_debug) Log.d(TAG, "RETRY"); System.gc(); waitNano(); continue; } if (_debug) Log.d(TAG, "eglCreateContext done."); eglSurface = egl10.eglCreateWindowSurface(eglDisplay, config, holder, null); if (eglSurface == null || EGL10.EGL_NO_SURFACE.equals(eglSurface)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglCreateWindowSurface == EGL_NO_SURFACE [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG, "egl10.eglCreateWindowSurfaceがEGL_NO_SURFACE"); throw new RuntimeException("OpenGL Error(EGL_NO_SURFACE) " + errStr + " :" ); } if (_debug) Log.e(TAG, "RETRY"); System.gc(); waitNano(); continue; } if (_debug) Log.d(TAG, "eglCreateWindowSurface done."); if (! egl10.eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext)) { String errStr = AtlantisService.getErrorString(egl10.eglGetError()); if (_debug) Log.d(TAG, "egl10.eglMakeCurrent == false [" + errStr + "]"); exitEgl(); if (++counter >= AtlantisService.RETRY_COUNT) { Log.e(TAG,"egl10.eglMakeCurrentがfalse"); throw new RuntimeException("OpenGL Error(eglMakeCurrent) " + errStr + " :" ); } if (_debug) Log.d(TAG,"RETRY"); System.gc(); waitNano(); continue; } if (_debug) Log.d(TAG, "eglMakeCurrent done."); if (_debug) Log.d(TAG, "now create gl10 object"); gl10 = new MatrixTrackingGL((GL10) (eglContext.getGL())); glRenderer = GLRenderer.getInstance(getApplicationContext()); synchronized (glRenderer) { glRenderer.onSurfaceCreated(gl10, config, getApplicationContext()); } if (_debug) Log.d(TAG, "EGL initalize done."); mInitialized = true; if (drawCommand == null) { drawCommand = new Runnable() { public void run() { doDrawCommand(); } protected void doDrawCommand() { if (mInitialized && glRenderer != null && gl10 != null) { synchronized (glRenderer) { long nowTime = System.nanoTime(); if (nowTime - glRenderer.prevTick < BASE_TICK) { try { TimeUnit.NANOSECONDS.sleep(nowTime - glRenderer.prevTick); } catch (InterruptedException e) { } } glRenderer.onDrawFrame(gl10); glRenderer.prevTick = nowTime; } egl10.eglSwapBuffers(eglDisplay, eglSurface); if (!getExecutor().isShutdown() && isVisible() && egl10.eglGetError() != EGL11.EGL_CONTEXT_LOST) { doExecute(drawCommand); } } } }; doExecute(drawCommand); } break; } Log.d(TAG, "selected config spec for opengl is No." + counter); } }; doExecute(surfaceCreatedCommand); if (_debug) Log.d(TAG, "end onSurfaceCreated() [" + this + "]"); }
diff --git a/src/main/java/hudson/plugins/clearcase/ClearToolExec.java b/src/main/java/hudson/plugins/clearcase/ClearToolExec.java index a4d7faf..c35b019 100755 --- a/src/main/java/hudson/plugins/clearcase/ClearToolExec.java +++ b/src/main/java/hudson/plugins/clearcase/ClearToolExec.java @@ -1,180 +1,180 @@ package hudson.plugins.clearcase; import hudson.AbortException; import hudson.FilePath; import hudson.util.ArgumentListBuilder; import hudson.util.VariableResolver; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.io.Reader; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; public abstract class ClearToolExec implements ClearTool { private transient Pattern viewListPattern; protected ClearToolLauncher launcher; protected VariableResolver variableResolver; public ClearToolExec(VariableResolver variableResolver, ClearToolLauncher launcher) { this.variableResolver = variableResolver; this.launcher = launcher; } public ClearToolLauncher getLauncher() { return launcher; } protected abstract FilePath getRootViewPath(ClearToolLauncher launcher); public Reader lshistory(String format, Date lastBuildDate, String viewName, String branch, String[] viewPaths) throws IOException, InterruptedException { SimpleDateFormat formatter = new SimpleDateFormat("d-MMM.HH:mm:ss"); ArgumentListBuilder cmd = new ArgumentListBuilder(); cmd.add("lshistory"); cmd.add("-r"); cmd.add("-since", formatter.format(lastBuildDate).toLowerCase()); cmd.add("-fmt", format); if ((branch != null) && (branch.length() > 0)) { cmd.add("-branch", "brtype:" + branch); } cmd.add("-nco"); FilePath viewPath = getRootViewPath(launcher).child(viewName); - for (String path : viewPaths) { - cmd.add(path); + for (String path : viewPaths) { + cmd.add(path.replace("\\n","")); } Reader returnReader = null; ByteArrayOutputStream baos = new ByteArrayOutputStream(); if (launcher.run(cmd.toCommandArray(), null, baos, viewPath)) { returnReader = new InputStreamReader(new ByteArrayInputStream(baos .toByteArray())); } baos.close(); return returnReader; } public Reader lsactivity(String activity, String commandFormat, String viewname) throws IOException, InterruptedException { ArgumentListBuilder cmd = new ArgumentListBuilder(); cmd.add("lsactivity"); cmd.add("-fmt", commandFormat); cmd.add(activity); // changed the path from workspace to getRootViewPath to make Dynamic UCM work FilePath viewPath = getRootViewPath(launcher).child(viewname); ByteArrayOutputStream baos = new ByteArrayOutputStream(); launcher.run(cmd.toCommandArray(), null, baos, viewPath); InputStreamReader reader = new InputStreamReader( new ByteArrayInputStream(baos.toByteArray())); baos.close(); return reader; } public void mklabel(String viewName, String label) throws IOException, InterruptedException { throw new AbortException(); } public List<String> lsview(boolean onlyActiveDynamicViews) throws IOException, InterruptedException { viewListPattern = getListPattern(); ArgumentListBuilder cmd = new ArgumentListBuilder(); cmd.add("lsview"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); if (launcher.run(cmd.toCommandArray(), null, baos, null)) { return parseListOutput(new InputStreamReader( new ByteArrayInputStream(baos.toByteArray())), onlyActiveDynamicViews); } return new ArrayList<String>(); } public List<String> lsvob(boolean onlyMOunted) throws IOException, InterruptedException { viewListPattern = getListPattern(); ArgumentListBuilder cmd = new ArgumentListBuilder(); cmd.add("lsvob"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); if (launcher.run(cmd.toCommandArray(), null, baos, null)) { return parseListOutput(new InputStreamReader( new ByteArrayInputStream(baos.toByteArray())), onlyMOunted); } return new ArrayList<String>(); } public String catcs(String viewName) throws IOException, InterruptedException { ArgumentListBuilder cmd = new ArgumentListBuilder(); cmd.add("catcs"); cmd.add("-tag", viewName); ByteArrayOutputStream baos = new ByteArrayOutputStream(); String retString = ""; if (launcher.run(cmd.toCommandArray(), null, baos, null)) { BufferedReader reader = new BufferedReader(new InputStreamReader( new ByteArrayInputStream(baos.toByteArray()))); String line = reader.readLine(); StringBuilder builder = new StringBuilder(); while (line != null) { if (builder.length() > 0) { builder.append("\n"); } builder.append(line); line = reader.readLine(); } reader.close(); retString = builder.toString(); } baos.close(); return retString; } private List<String> parseListOutput(Reader consoleReader, boolean onlyStarMarked) throws IOException { List<String> views = new ArrayList<String>(); BufferedReader reader = new BufferedReader(consoleReader); String line = reader.readLine(); while (line != null) { Matcher matcher = viewListPattern.matcher(line); if (matcher.find() && matcher.groupCount() == 3) { if ((!onlyStarMarked) || (onlyStarMarked && matcher.group(1).equals("*"))) { String vob = matcher.group(2); int pos = Math.max(vob.lastIndexOf('\\'), vob .lastIndexOf('/')); if (pos != -1) { vob = vob.substring(pos + 1); } views.add(vob); } } line = reader.readLine(); } reader.close(); return views; } private Pattern getListPattern() { if (viewListPattern == null) { viewListPattern = Pattern.compile("(.)\\s*(\\S*)\\s*(\\S*)"); } return viewListPattern; } }
true
true
public Reader lshistory(String format, Date lastBuildDate, String viewName, String branch, String[] viewPaths) throws IOException, InterruptedException { SimpleDateFormat formatter = new SimpleDateFormat("d-MMM.HH:mm:ss"); ArgumentListBuilder cmd = new ArgumentListBuilder(); cmd.add("lshistory"); cmd.add("-r"); cmd.add("-since", formatter.format(lastBuildDate).toLowerCase()); cmd.add("-fmt", format); if ((branch != null) && (branch.length() > 0)) { cmd.add("-branch", "brtype:" + branch); } cmd.add("-nco"); FilePath viewPath = getRootViewPath(launcher).child(viewName); for (String path : viewPaths) { cmd.add(path); } Reader returnReader = null; ByteArrayOutputStream baos = new ByteArrayOutputStream(); if (launcher.run(cmd.toCommandArray(), null, baos, viewPath)) { returnReader = new InputStreamReader(new ByteArrayInputStream(baos .toByteArray())); } baos.close(); return returnReader; }
public Reader lshistory(String format, Date lastBuildDate, String viewName, String branch, String[] viewPaths) throws IOException, InterruptedException { SimpleDateFormat formatter = new SimpleDateFormat("d-MMM.HH:mm:ss"); ArgumentListBuilder cmd = new ArgumentListBuilder(); cmd.add("lshistory"); cmd.add("-r"); cmd.add("-since", formatter.format(lastBuildDate).toLowerCase()); cmd.add("-fmt", format); if ((branch != null) && (branch.length() > 0)) { cmd.add("-branch", "brtype:" + branch); } cmd.add("-nco"); FilePath viewPath = getRootViewPath(launcher).child(viewName); for (String path : viewPaths) { cmd.add(path.replace("\\n","")); } Reader returnReader = null; ByteArrayOutputStream baos = new ByteArrayOutputStream(); if (launcher.run(cmd.toCommandArray(), null, baos, viewPath)) { returnReader = new InputStreamReader(new ByteArrayInputStream(baos .toByteArray())); } baos.close(); return returnReader; }
diff --git a/StatusProvider/src/org/nchelp/meteor/provider/status/StatusServlet.java b/StatusProvider/src/org/nchelp/meteor/provider/status/StatusServlet.java index a0dd341f..3aaa9d41 100755 --- a/StatusProvider/src/org/nchelp/meteor/provider/status/StatusServlet.java +++ b/StatusProvider/src/org/nchelp/meteor/provider/status/StatusServlet.java @@ -1,322 +1,321 @@ /** * * Copyright 2002 - 2007 NCHELP * * Author: Tim Bornholtz - The Bornholtz Group * * * This code is part of the Meteor system as defined and specified * by the National Council of Higher Education Loan Programs, Inc. * (NCHELP) and the Meteor Sponsors, and developed by Priority * Technologies, Inc. (PTI). * * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * ********************************************************************************/ package org.nchelp.meteor.provider.status; import java.io.IOException; import java.io.InputStream; import java.io.PrintWriter; import java.io.StringReader; import java.net.URL; import java.util.Iterator; import java.util.List; import java.util.Vector; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.xml.transform.Source; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; import javax.xml.transform.TransformerFactory; import javax.xml.transform.stream.StreamResult; import javax.xml.transform.stream.StreamSource; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.xpath.XPathAPI; import org.nchelp.meteor.provider.DataProvider; import org.nchelp.meteor.registry.Directory; import org.nchelp.meteor.registry.DirectoryFactory; import org.nchelp.meteor.util.DateDelta; import org.nchelp.meteor.util.Resource; import org.nchelp.meteor.util.ResourceFactory; import org.nchelp.meteor.util.XMLParser; import org.nchelp.meteor.util.exception.DirectoryException; import org.nchelp.meteor.util.exception.ParameterException; import org.nchelp.meteor.util.exception.ParsingException; import org.w3c.dom.Document; import org.w3c.dom.Node; /** * This servlet queries the registry for a list of all current data providers * subscribed to the Meteor network. The results are translated into an xml * document and applied to an xsl template in order to display the results to * the user. * * @since Meteor1.0 */ public class StatusServlet extends HttpServlet { /** * */ private static final long serialVersionUID = -971376452505018384L; private final Log log = LogFactory.getLog(this.getClass()); /** * This is the main workhorse of this object. This is the method responsible * for handling the HTTP request and responding. * * @param req * HTTP request object * @param res * HTTP response object */ public void doGet (HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException { try { String xslFile = ""; String[] providers = req.getParameterValues("dataprovider"); String xmlReturn; Resource resource = ResourceFactory.createResource("statusprovider.properties"); if(providers == null || providers.length == 0){ xmlReturn = this.getDataProviderList(req); xslFile = resource.getProperty("statusprovider.dataproviderlist"); } else { try{ xmlReturn = this.queryDataProviders(providers); } catch(ParameterException e){ - this.criticalError(res, "Error retrieving data responses", e); - return; + xmlReturn = "<DataProviders><Errors>Error retrieving data responses" + e.getLocalizedMessage() + "</Errors></DataProviders>"; } xslFile = resource.getProperty("statusprovider.dataproviderresults"); } if (xslFile == null) { criticalError(res, "Error reading XSL stylesheet", null); return; } // // Convert the xml and send response to user // res.setContentType("text/html"); PrintWriter out = res.getWriter(); try { // Get the XML input document and the stylesheet, both in the servlet // engine document directory. Source xmlSource = new StreamSource(new StringReader(xmlReturn)); InputStream is = getServletContext().getResourceAsStream(xslFile); StreamSource xslSource = new StreamSource(is); URL xslURL = getServletContext().getResource(xslFile); xslSource.setSystemId(xslURL.toExternalForm()); // Generate the transformer. TransformerFactory tFactory = TransformerFactory.newInstance(); Transformer transformer = tFactory.newTransformer(xslSource); // Put the Servlet name in the xslt Parameters too transformer.setParameter("SERVLET", req.getContextPath() + req.getServletPath()); // Set Context Path transformer.setParameter("CONTEXTPATH", req.getContextPath()); // Perform the transformation, sending the output to the response. transformer.transform(xmlSource, new StreamResult(out)); } // If an Exception occurs, return the error to the client. catch (Exception e) { // Send it to the console too log.error("Error Transforming XSL: " + xslFile, e); out.write(e.getMessage()); e.printStackTrace(out); } out.close(); } catch (Exception e) { criticalError(res, "Unspecified error occurred: " + e.getMessage(), e); } } /** * Handles any errors occurring during processing of the request * * @param msg * Error message */ private void criticalError (HttpServletResponse res, String msg, Exception e) throws java.io.IOException { log.error("Critical Error occured in StatusService: " + msg, e); // Send the response res.setContentType("text/html"); PrintWriter out = res.getWriter(); out.println("<html>"); out.println("<head><title>Error Page</title></head>"); out.println("<body>"); out.println("<h1>The follow error has occurred while processing " + "the Meteor request: " + msg + "</h1>"); out.println("</body></html>"); if (e != null) e.printStackTrace(out); out.close(); } /** * @see HttpServlet#doPost(HttpServletRequest, HttpServletResponse) */ protected void doPost (HttpServletRequest arg0, HttpServletResponse arg1) throws ServletException, IOException { this.doGet(arg0, arg1); } private String getDataProviderList(HttpServletRequest req) throws DirectoryException{ List dplist = null; String xmlReturn = "<DataProviders>"; Directory dir = null; dir = DirectoryFactory.getInstance().getDirectory(); dplist = dir.getDataProviders(); if (dplist == null) dplist = new Vector(); Iterator i = dplist.iterator(); while (i.hasNext()) { DataProvider dp = (DataProvider)i.next(); String id = dp.getId(); if (dp.getName() == null) { dp.setName(dir.getInstitutionName(id)); } xmlReturn += "<DataProvider><Identifier>" + id + "</Identifier><Name>" + dp.getName() + "</Name>" + "</DataProvider>"; } xmlReturn += "</DataProviders>"; log.debug("Data Providers: " + xmlReturn); return xmlReturn; } private String queryDataProviders(String[] providers) throws ParameterException, DirectoryException { StatusService service = new StatusService(); Directory dir = DirectoryFactory.getInstance().getDirectory(); List results; results = service.callProviders(providers); StringBuffer retStr = new StringBuffer("<DataProviders>"); Iterator iter = results.iterator(); while(iter.hasNext()){ Provider p = (Provider)iter.next(); DataProvider dp = p.getProviderStatus().getDataProvider(); if (dp.getName() == null) { dp.setName(dir.getInstitutionName(dp.getId())); } retStr.append("<DataProvider>"); retStr.append("<Identifier>"); retStr.append(dp.getId()); retStr.append("</Identifier>"); retStr.append("<Name>"); retStr.append(dp.getName()); retStr.append("</Name>"); retStr.append("<StartTime>"); retStr.append(dp.getStartTime()); retStr.append("</StartTime>"); retStr.append("<EndTime>"); retStr.append(dp.getEndTime()); retStr.append("</EndTime>"); retStr.append("<ElapsedTime>"); retStr.append(DateDelta.getSecondDelta(dp.getStartTime(), dp.getEndTime())); retStr.append("</ElapsedTime>"); retStr.append("<Status>"); retStr.append(dp.getStatus()); retStr.append("</Status>"); String response = dp.getResponse(); retStr.append("<Messages>"); retStr.append(this.getMessages(response)); retStr.append("</Messages>"); retStr.append("<Result><![CDATA["); retStr.append(response); retStr.append("]]></Result>"); retStr.append("</DataProvider>"); } retStr.append("</DataProviders>"); if(log.isDebugEnabled()){ log.debug("Final XML: " + retStr.toString()); } return retStr.toString(); } private String getMessages(String response){ try { Document doc = XMLParser.parseXML(response); Node n = XPathAPI.selectSingleNode(doc, "//MeteorDataProviderMsg"); if(n == null){ return ""; } return XMLParser.xmlToString(XMLParser.createDocument(n)); } catch (ParsingException e) { return "Unable to parse response"; } catch (TransformerException e) { return "Unable to retrieve messages from response"; } } }
true
true
public void doGet (HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException { try { String xslFile = ""; String[] providers = req.getParameterValues("dataprovider"); String xmlReturn; Resource resource = ResourceFactory.createResource("statusprovider.properties"); if(providers == null || providers.length == 0){ xmlReturn = this.getDataProviderList(req); xslFile = resource.getProperty("statusprovider.dataproviderlist"); } else { try{ xmlReturn = this.queryDataProviders(providers); } catch(ParameterException e){ this.criticalError(res, "Error retrieving data responses", e); return; } xslFile = resource.getProperty("statusprovider.dataproviderresults"); } if (xslFile == null) { criticalError(res, "Error reading XSL stylesheet", null); return; } // // Convert the xml and send response to user // res.setContentType("text/html"); PrintWriter out = res.getWriter(); try { // Get the XML input document and the stylesheet, both in the servlet // engine document directory. Source xmlSource = new StreamSource(new StringReader(xmlReturn)); InputStream is = getServletContext().getResourceAsStream(xslFile); StreamSource xslSource = new StreamSource(is); URL xslURL = getServletContext().getResource(xslFile); xslSource.setSystemId(xslURL.toExternalForm()); // Generate the transformer. TransformerFactory tFactory = TransformerFactory.newInstance(); Transformer transformer = tFactory.newTransformer(xslSource); // Put the Servlet name in the xslt Parameters too transformer.setParameter("SERVLET", req.getContextPath() + req.getServletPath()); // Set Context Path transformer.setParameter("CONTEXTPATH", req.getContextPath()); // Perform the transformation, sending the output to the response. transformer.transform(xmlSource, new StreamResult(out)); } // If an Exception occurs, return the error to the client. catch (Exception e) { // Send it to the console too log.error("Error Transforming XSL: " + xslFile, e); out.write(e.getMessage()); e.printStackTrace(out); } out.close(); } catch (Exception e) { criticalError(res, "Unspecified error occurred: " + e.getMessage(), e); } }
public void doGet (HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException { try { String xslFile = ""; String[] providers = req.getParameterValues("dataprovider"); String xmlReturn; Resource resource = ResourceFactory.createResource("statusprovider.properties"); if(providers == null || providers.length == 0){ xmlReturn = this.getDataProviderList(req); xslFile = resource.getProperty("statusprovider.dataproviderlist"); } else { try{ xmlReturn = this.queryDataProviders(providers); } catch(ParameterException e){ xmlReturn = "<DataProviders><Errors>Error retrieving data responses" + e.getLocalizedMessage() + "</Errors></DataProviders>"; } xslFile = resource.getProperty("statusprovider.dataproviderresults"); } if (xslFile == null) { criticalError(res, "Error reading XSL stylesheet", null); return; } // // Convert the xml and send response to user // res.setContentType("text/html"); PrintWriter out = res.getWriter(); try { // Get the XML input document and the stylesheet, both in the servlet // engine document directory. Source xmlSource = new StreamSource(new StringReader(xmlReturn)); InputStream is = getServletContext().getResourceAsStream(xslFile); StreamSource xslSource = new StreamSource(is); URL xslURL = getServletContext().getResource(xslFile); xslSource.setSystemId(xslURL.toExternalForm()); // Generate the transformer. TransformerFactory tFactory = TransformerFactory.newInstance(); Transformer transformer = tFactory.newTransformer(xslSource); // Put the Servlet name in the xslt Parameters too transformer.setParameter("SERVLET", req.getContextPath() + req.getServletPath()); // Set Context Path transformer.setParameter("CONTEXTPATH", req.getContextPath()); // Perform the transformation, sending the output to the response. transformer.transform(xmlSource, new StreamResult(out)); } // If an Exception occurs, return the error to the client. catch (Exception e) { // Send it to the console too log.error("Error Transforming XSL: " + xslFile, e); out.write(e.getMessage()); e.printStackTrace(out); } out.close(); } catch (Exception e) { criticalError(res, "Unspecified error occurred: " + e.getMessage(), e); } }
diff --git a/src/main/java/de/lessvoid/nifty/elements/Element.java b/src/main/java/de/lessvoid/nifty/elements/Element.java index 7b045e15..617f8bf3 100644 --- a/src/main/java/de/lessvoid/nifty/elements/Element.java +++ b/src/main/java/de/lessvoid/nifty/elements/Element.java @@ -1,1867 +1,1870 @@ package de.lessvoid.nifty.elements; import java.util.ArrayList; import java.util.List; import java.util.logging.Logger; import de.lessvoid.nifty.EndNotify; import de.lessvoid.nifty.Nifty; import de.lessvoid.nifty.NiftyMethodInvoker; import de.lessvoid.nifty.controls.Controller; import de.lessvoid.nifty.controls.FocusHandler; import de.lessvoid.nifty.controls.NiftyControl; import de.lessvoid.nifty.controls.NiftyInputControl; import de.lessvoid.nifty.effects.Effect; import de.lessvoid.nifty.effects.EffectEventId; import de.lessvoid.nifty.effects.EffectManager; import de.lessvoid.nifty.effects.Falloff; import de.lessvoid.nifty.elements.render.ElementRenderer; import de.lessvoid.nifty.elements.render.TextRenderer; import de.lessvoid.nifty.input.keyboard.KeyboardInputEvent; import de.lessvoid.nifty.input.mouse.MouseInputEvent; import de.lessvoid.nifty.layout.LayoutPart; import de.lessvoid.nifty.layout.align.HorizontalAlign; import de.lessvoid.nifty.layout.align.VerticalAlign; import de.lessvoid.nifty.layout.manager.LayoutManager; import de.lessvoid.nifty.loaderv2.types.ElementType; import de.lessvoid.nifty.render.NiftyRenderEngine; import de.lessvoid.nifty.screen.KeyInputHandler; import de.lessvoid.nifty.screen.MouseOverHandler; import de.lessvoid.nifty.screen.Screen; import de.lessvoid.nifty.screen.ScreenController; import de.lessvoid.nifty.tools.SizeValue; import de.lessvoid.nifty.tools.TimeProvider; /** * The Element. * @author void */ public class Element { /** * Time before we start an automated click when mouse button is holded. */ private static final long REPEATED_CLICK_START_TIME = 100; /** * The time between automatic clicks. */ private static final long REPEATED_CLICK_TIME = 100; /** * the logger. */ private static Logger log = Logger.getLogger(Element.class.getName()); /** * element type. */ private ElementType elementType; /** * our identification. */ private String id; /** * the parent element. */ private Element parent; /** * the child elements. */ private ArrayList < Element > elements = new ArrayList < Element >(); /** * The LayoutManager we should use for all child elements. */ private LayoutManager layoutManager; /** * The LayoutPart for layout this element. */ private LayoutPart layoutPart; /** * The ElementRenderer we should use to render this element. */ private ElementRenderer[] elementRenderer; /** * the effect manager for this element. */ private EffectManager effectManager; /** * Element interaction. */ private ElementInteraction interaction; /** * Nifty instance this element is attached to. */ private Nifty nifty; /** * The focus handler this element is attached to. */ private FocusHandler focusHandler; /** * Listeners listening for changes to this element */ private List<ElementChangeListener> listeners; /** * enable element. */ private boolean enabled; /** * visible element. */ private boolean visible; /** * this is set to true, when there's no interaction with the element * possible. this happens when the onEndScreen effect starts. */ private boolean done; /** * this is set to true when there's no interaction with this element possibe. * (as long as onStartScreen and onEndScreen events are active even when this * element is not using the onStartScreen effect at all but a parent element did) */ private boolean interactionBlocked; /** * mouse down flag. */ private boolean mouseDown; /** * visible to mouse events. */ private boolean visibleToMouseEvents; /** * Last position of mouse X. */ private int lastMouseX; /** * Last position of mouse Y. */ private int lastMouseY; /** * mouse first click down time. */ private long mouseDownTime; /** * Time the last repeat has been activated. */ private long lastRepeatStartTime; /** * clip children. */ private boolean clipChildren; /** * attached control when this element is an control. */ private NiftyInputControl attachedInputControl = null; /** * remember if we've calculated this constraint ourself. */ private boolean isCalcWidthConstraint; /** * remember if we've calculated this constraint ourself. */ private boolean isCalcHeightConstraint; /** * focusable. */ private boolean focusable = false; /** * screen we're connected to. */ private Screen screen; /** * TimeProvider. */ private TimeProvider time; private boolean parentClipArea = false; private int parentClipX; private int parentClipY; private int parentClipWidth; private int parentClipHeight; /** * construct new instance of Element. * @param newNifty Nifty * @param newElementType elementType * @param newId the id * @param newParent new parent * @param newFocusHandler the new focus handler * @param newVisibleToMouseEvents visible to mouse * @param newTimeProvider TimeProvider * @param newElementRenderer the element renderer */ public Element( final Nifty newNifty, final ElementType newElementType, final String newId, final Element newParent, final FocusHandler newFocusHandler, final boolean newVisibleToMouseEvents, final TimeProvider newTimeProvider, final ElementRenderer ... newElementRenderer) { initialize( newNifty, newElementType, newId, newParent, newElementRenderer, new LayoutPart(), newFocusHandler, newVisibleToMouseEvents, newTimeProvider); } /** * construct new instance of Element using the given layoutPart instance. * @param newNifty Nifty * @param newElementType element type * @param newId the id * @param newParent new parent * @param newLayoutPart the layout part * @param newFocusHandler the new focus handler * @param newVisibleToMouseEvents visible to mouse * @param newTimeProvider TimeProvider * @param newElementRenderer the element renderer */ public Element( final Nifty newNifty, final ElementType newElementType, final String newId, final Element newParent, final LayoutPart newLayoutPart, final FocusHandler newFocusHandler, final boolean newVisibleToMouseEvents, final TimeProvider newTimeProvider, final ElementRenderer ... newElementRenderer) { initialize( newNifty, newElementType, newId, newParent, newElementRenderer, newLayoutPart, newFocusHandler, newVisibleToMouseEvents, newTimeProvider); } /** * initialize this instance helper. * @param newNifty Nifty * @param newElementType element * @param newId the id * @param newParent parent * @param newElementRenderer the element renderer to use * @param newLayoutPart the layoutPart to use * @param newFocusHandler the focus handler that this element is attached to * @param newVisibleToMouseEvents visible to mouse * @param timeProvider TimeProvider to use */ private void initialize( final Nifty newNifty, final ElementType newElementType, final String newId, final Element newParent, final ElementRenderer[] newElementRenderer, final LayoutPart newLayoutPart, final FocusHandler newFocusHandler, final boolean newVisibleToMouseEvents, final TimeProvider timeProvider) { this.nifty = newNifty; this.elementType = newElementType; this.id = newId; this.parent = newParent; this.elementRenderer = newElementRenderer; this.effectManager = new EffectManager(); this.effectManager.setAlternateKey(nifty.getAlternateKey()); this.layoutPart = newLayoutPart; this.enabled = true; this.visible = true; this.done = false; this.interactionBlocked = false; this.focusHandler = newFocusHandler; this.visibleToMouseEvents = newVisibleToMouseEvents; this.time = timeProvider; this.setMouseDown(false, 0); this.interaction = new ElementInteraction(nifty); } /** * get the id of this element. * @return the id */ public String getId() { return id; } /** * get parent. * @return parent */ public Element getParent() { return parent; } public void setParent(final Element element) { parent = element; } /** * get element state as string. * @param offset offset string * @return the element state as string. */ public String getElementStateString(final String offset) { String pos = "" + " style [" + getElementType().getAttributes().get("style") + "]\n" + offset + " state [" + getState() + "]\n" + offset + " position [x=" + getX() + ", y=" + getY() + ", w=" + getWidth() + ", h=" + getHeight() + "]\n" + offset + " constraint [" + outputSizeValue(layoutPart.getBoxConstraints().getX()) + ", " + outputSizeValue(layoutPart.getBoxConstraints().getY()) + ", " + outputSizeValue(layoutPart.getBoxConstraints().getWidth()) + ", " + outputSizeValue(layoutPart.getBoxConstraints().getHeight()) + "]\n" + offset + " padding [" + outputSizeValue(layoutPart.getBoxConstraints().getPaddingLeft()) + ", " + outputSizeValue(layoutPart.getBoxConstraints().getPaddingRight()) + ", " + outputSizeValue(layoutPart.getBoxConstraints().getPaddingTop()) + ", " + outputSizeValue(layoutPart.getBoxConstraints().getPaddingBottom()) + "]\n" + offset + " focusable [" + focusable + "]\n" + offset + " mouseable [" + visibleToMouseEvents + "]"; return pos; } private String getState() { if (isEffectActive(EffectEventId.onStartScreen)) { return "starting"; } if (isEffectActive(EffectEventId.onEndScreen)) { return "ending"; } if (!visible) { return "hidden"; } if (interactionBlocked) { return "interactionBlocked"; } return "normal"; } /** * Output SizeValue. * @param value SizeValue * @return value string */ private String outputSizeValue(final SizeValue value) { if (value == null) { return "null"; } else { return value.toString(); } } /** * get x. * @return x position of this element. */ public int getX() { return layoutPart.getBox().getX(); } /** * get y. * @return the y position of this element. */ public int getY() { return layoutPart.getBox().getY(); } /** * get height. * @return the height of this element. */ public int getHeight() { return layoutPart.getBox().getHeight(); } /** * get width. * @return the width of this element. */ public int getWidth() { return layoutPart.getBox().getWidth(); } /** * Sets the height of this element * @param height the new height in pixels */ public void setHeight(int height) { layoutPart.getBox().setHeight(height); } /** * Sets the width of this element * @param width the new width in pixels */ public void setWidth(int width) { layoutPart.getBox().setWidth(width); } /** * get all child elements of this element. * @return the list of child elements */ public List < Element > getElements() { return elements; } /** * add a child element. * @param widget the child to add */ public void add(final Element widget) { elements.add(widget); } /** * render this element. * @param r the RenderDevice to use */ public void render(final NiftyRenderEngine r) { if (visible) { if (effectManager.isEmpty()) { r.saveState(null); renderElement(r); renderChildren(r); r.restoreState(); } else { r.saveState(null); effectManager.begin(r, this); effectManager.renderPre(r, this); renderElement(r); effectManager.renderPost(r, this); effectManager.end(r); renderChildren(r); r.restoreState(); r.saveState(null); effectManager.renderOverlay(r, this); r.restoreState(); } } } private void renderElement(final NiftyRenderEngine r) { if (elementRenderer != null) { for (ElementRenderer renderer : elementRenderer) { renderer.render(this, r); } } } private void renderChildren(final NiftyRenderEngine r) { if (clipChildren) { r.enableClip(getX(), getY(), getX() + getWidth(), getY() + getHeight()); renderInternalChildElements(r); r.disableClip(); } else { renderInternalChildElements(r); } } private void renderInternalChildElements(final NiftyRenderEngine r) { for (Element p : elements) { p.render(r); } } /** * Set a new LayoutManager. * @param newLayout the new LayoutManager to use. */ public void setLayoutManager(final LayoutManager newLayout) { this.layoutManager = newLayout; } private void preProcessConstraintWidth() { for (Element e : elements) { e.preProcessConstraintWidth(); } preProcessConstraintWidthThisLevel(); } private void preProcessConstraintWidthThisLevel() { // try the original width value first SizeValue myWidth = getConstraintWidth(); // is it empty and we have an layoutManager there's still hope for a width constraint if (layoutManager != null && (myWidth == null || isCalcWidthConstraint)) { // collect all child layoutPart that have a fixed pixel size in a list List < LayoutPart > layoutPartChild = new ArrayList < LayoutPart >(); for (Element e : elements) { SizeValue childWidth = e.getConstraintWidth(); if (childWidth != null && childWidth.isPixel()) { layoutPartChild.add(e.layoutPart); } } // if all (!) child elements have a pixel fixed width we can calculate a new width constraint for this element! if (elements.size() == layoutPartChild.size()) { SizeValue newWidth = layoutManager.calculateConstraintWidth(this.layoutPart, layoutPartChild); if (newWidth != null) { setConstraintWidth(newWidth); isCalcWidthConstraint = true; } } } } private void preProcessConstraintHeight() { for (Element e : elements) { e.preProcessConstraintHeight(); } preProcessConstraintHeightThisLevel(); } private void preProcessConstraintHeightThisLevel() { // try the original height value first SizeValue myHeight = getConstraintHeight(); // is it empty and we have an layoutManager there's still hope for a height constraint if (layoutManager != null && (myHeight == null || isCalcHeightConstraint)) { // collect all child layoutPart that have a fixed px size in a list List < LayoutPart > layoutPartChild = new ArrayList < LayoutPart >(); for (Element e : elements) { SizeValue childHeight = e.getConstraintHeight(); if (childHeight != null && childHeight.isPixel()) { layoutPartChild.add(e.layoutPart); } } // if all (!) child elements have a px fixed height we can calculate a new height constraint for this element! if (elements.size() == layoutPartChild.size()) { SizeValue newHeight = layoutManager.calculateConstraintHeight(this.layoutPart, layoutPartChild); if (newHeight != null) { setConstraintHeight(newHeight); isCalcHeightConstraint = true; } } } } private void processLayoutInternal() { for (Element w : elements) { TextRenderer textRenderer = w.getRenderer(TextRenderer.class); if (textRenderer != null) { textRenderer.setWidthConstraint(w, w.getConstraintWidth(), getWidth(), nifty.getRenderEngine()); } } } private void processLayout() { processLayoutInternal(); if (layoutManager != null) { // we need a list of LayoutPart and not of Element, so we'll build one on the fly here List < LayoutPart > layoutPartChild = new ArrayList < LayoutPart >(); for (Element w : elements) { layoutPartChild.add(w.layoutPart); } // use out layoutManager to layout our children layoutManager.layoutElements(layoutPart, layoutPartChild); // repeat this step for all child elements for (Element w : elements) { w.processLayout(); } } if (clipChildren) { for (Element w : elements) { w.setParentClipArea(getX(), getY(), getWidth(), getHeight()); } } } public void layoutElements() { prepareLayout(); processLayout(); prepareLayout(); processLayout(); } private void prepareLayout() { preProcessConstraintWidth(); preProcessConstraintHeight(); } private void setParentClipArea(final int x, final int y, final int width, final int height) { parentClipArea = true; parentClipX = x; parentClipY = y; parentClipWidth = width; parentClipHeight = height; for (Element w : elements) { w.setParentClipArea(parentClipX, parentClipY, parentClipWidth, parentClipHeight); } notifyListeners(); } /** * reset all effects. */ public void resetEffects() { // mouseDown = false; effectManager.reset(); for (Element w : elements) { w.resetEffects(); } } public void resetAllEffects() { // mouseDown = false; effectManager.resetAll(); for (Element w : elements) { w.resetAllEffects(); } } public void resetSingleEffect(final EffectEventId effectEventId) { // mouseDown = false; effectManager.resetSingleEffect(effectEventId); for (Element w : elements) { w.resetSingleEffect(effectEventId); } } public void resetSingleEffect(final EffectEventId effectEventId, final String customKey) { // mouseDown = false; effectManager.resetSingleEffect(effectEventId, customKey); for (Element w : elements) { w.resetSingleEffect(effectEventId, customKey); } } public void resetMouseDown() { // mouseDown = false; for (Element w : elements) { w.resetMouseDown(); } } /** * set new x position constraint. * @param newX new x constraint. */ public void setConstraintX(final SizeValue newX) { layoutPart.getBoxConstraints().setX(newX); notifyListeners(); } /** * set new y position constraint. * @param newY new y constaint. */ public void setConstraintY(final SizeValue newY) { layoutPart.getBoxConstraints().setY(newY); notifyListeners(); } /** * set new width constraint. * @param newWidth new width constraint. */ public void setConstraintWidth(final SizeValue newWidth) { layoutPart.getBoxConstraints().setWidth(newWidth); notifyListeners(); } /** * set new height constraint. * @param newHeight new height constraint. */ public void setConstraintHeight(final SizeValue newHeight) { layoutPart.getBoxConstraints().setHeight(newHeight); notifyListeners(); } public SizeValue getConstraintX() { return layoutPart.getBoxConstraints().getX(); } public SizeValue getConstraintY() { return layoutPart.getBoxConstraints().getY(); } /** * get current width constraint. * @return current width constraint */ public SizeValue getConstraintWidth() { return layoutPart.getBoxConstraints().getWidth(); } /** * get current height constraint. * @return current height constraint. */ public SizeValue getConstraintHeight() { return layoutPart.getBoxConstraints().getHeight(); } /** * set new horizontal align. * @param newHorizontalAlign new horizontal align. */ public void setConstraintHorizontalAlign(final HorizontalAlign newHorizontalAlign) { layoutPart.getBoxConstraints().setHorizontalAlign(newHorizontalAlign); } /** * set new vertical align. * @param newVerticalAlign new vertical align. */ public void setConstraintVerticalAlign(final VerticalAlign newVerticalAlign) { layoutPart.getBoxConstraints().setVerticalAlign(newVerticalAlign); } /** * get current horizontal align. * @return current horizontal align. */ public HorizontalAlign getConstraintHorizontalAlign() { return layoutPart.getBoxConstraints().getHorizontalAlign(); } /** * get current vertical align. * @return current vertical align. */ public VerticalAlign getConstraintVerticalAlign() { return layoutPart.getBoxConstraints().getVerticalAlign(); } /** * register an effect for this element. * @param theId the effect id * @param e the effect */ public void registerEffect( final EffectEventId theId, final Effect e) { log.fine("[" + this.getId() + "] register: " + theId.toString() + "(" + e.getStateString() + ")"); effectManager.registerEffect(theId, e); } public void startEffect(final EffectEventId effectEventId) { startEffect(effectEventId, null); } public void startEffect(final EffectEventId effectEventId, final EndNotify effectEndNotiy) { if (effectEventId == EffectEventId.onStartScreen) { if (!visible) { return; } done = false; interactionBlocked = true; } if (effectEventId == EffectEventId.onEndScreen) { if (!visible) { + // it doesn't make sense to start the onEndScreen effect when the element is hidden + // just call the effectEndNotify directly and quit + effectEndNotiy.perform(); return; } done = true; interactionBlocked = true; } // whenever the effect ends we forward to this event // that checks first, if all child elements are finished // and when yes forwards to the actual effectEndNotify event. // // this way we ensure that all child finished the effects // before forwarding this to the real event handler. // // little bit tricky though :/ LocalEndNotify forwardToSelf = new LocalEndNotify(effectEventId, effectEndNotiy); // start the effect for ourself effectManager.startEffect(effectEventId, this, time, forwardToSelf); // notify all child elements of the start effect for (Element w : getElements()) { w.startEffectInternal(effectEventId, forwardToSelf); } if (effectEventId == EffectEventId.onFocus) { if (attachedInputControl != null) { attachedInputControl.onFocus(true); } } // just in case there was no effect activated, we'll check here, if we're already done forwardToSelf.perform(); } public void startEffect(final EffectEventId effectEventId, final EndNotify effectEndNotiy, final String customKey) { if (effectEventId == EffectEventId.onStartScreen) { if (!visible) { return; } done = false; interactionBlocked = true; } if (effectEventId == EffectEventId.onEndScreen) { if (!visible) { return; } done = true; interactionBlocked = true; } // whenever the effect ends we forward to this event // that checks first, if all child elements are finished // and when yes forwards to the actual effectEndNotify event. // // this way we ensure that all child finished the effects // before forwarding this to the real event handler. // // little bit tricky though :/ LocalEndNotify forwardToSelf = new LocalEndNotify(effectEventId, effectEndNotiy); // start the effect for ourself effectManager.startEffect(effectEventId, this, time, forwardToSelf, customKey); // notify all child elements of the start effect for (Element w : getElements()) { w.startEffectInternal(effectEventId, forwardToSelf, customKey); } if (effectEventId == EffectEventId.onFocus) { if (attachedInputControl != null) { attachedInputControl.onFocus(true); } } // just in case there was no effect activated, we'll check here, if we're already done forwardToSelf.perform(); } public void startEffectInternal(final EffectEventId effectEventId, final EndNotify effectEndNotiy) { if (effectEventId == EffectEventId.onStartScreen) { if (!visible) { return; } done = false; interactionBlocked = true; } if (effectEventId == EffectEventId.onEndScreen) { if (!visible) { return; } done = true; interactionBlocked = true; } // whenever the effect ends we forward to this event // that checks first, if all child elements are finished // and when yes forwards to the actual effectEndNotify event. // // this way we ensure that all child finished the effects // before forwarding this to the real event handler. // // little bit tricky though :/ LocalEndNotify forwardToSelf = new LocalEndNotify(effectEventId, effectEndNotiy); // start the effect for ourself effectManager.startEffect(effectEventId, this, time, forwardToSelf); // notify all child elements of the start effect for (Element w : getElements()) { w.startEffectInternal(effectEventId, forwardToSelf); } if (effectEventId == EffectEventId.onFocus) { if (attachedInputControl != null) { attachedInputControl.onFocus(true); } } } public void startEffectInternal(final EffectEventId effectEventId, final EndNotify effectEndNotiy, final String customKey) { if (effectEventId == EffectEventId.onStartScreen) { if (!visible) { return; } done = false; interactionBlocked = true; } if (effectEventId == EffectEventId.onEndScreen) { if (!visible) { return; } done = true; interactionBlocked = true; } // whenever the effect ends we forward to this event // that checks first, if all child elements are finished // and when yes forwards to the actual effectEndNotify event. // // this way we ensure that all child finished the effects // before forwarding this to the real event handler. // // little bit tricky though :/ LocalEndNotify forwardToSelf = new LocalEndNotify(effectEventId, effectEndNotiy); // start the effect for ourself effectManager.startEffect(effectEventId, this, time, forwardToSelf, customKey); // notify all child elements of the start effect for (Element w : getElements()) { w.startEffectInternal(effectEventId, forwardToSelf, customKey); } if (effectEventId == EffectEventId.onFocus) { if (attachedInputControl != null) { attachedInputControl.onFocus(true); } } } /** * stop the given effect. * @param effectEventId effect event id to stop */ public void stopEffect(final EffectEventId effectEventId) { if (EffectEventId.onStartScreen == effectEventId || EffectEventId.onEndScreen == effectEventId) { interactionBlocked = false; if (!visible) { return; } } effectManager.stopEffect(effectEventId); // notify all child elements of the start effect for (Element w : getElements()) { w.stopEffect(effectEventId); } if (effectEventId == EffectEventId.onFocus) { if (attachedInputControl != null) { attachedInputControl.onFocus(false); } } } /** * check if a certain effect is still active. travels down to child elements. * @param effectEventId the effect type id to check * @return true, if the effect has ended and false otherwise */ public boolean isEffectActive(final EffectEventId effectEventId) { for (Element w : getElements()) { if (w.isEffectActive(effectEventId)) { return true; } } return effectManager.isActive(effectEventId); } /** * enable this element. */ public void enable() { enabled = true; } /** * disable this element. */ public void disable() { enabled = false; } /** * is this element enabled? * @return true, if enabled and false otherwise. */ public boolean isEnabled() { return enabled; } /** * show this element. */ public void show() { // don't show if show is still in progress if (isEffectActive(EffectEventId.onShow)) { return; } // stop any onHide effects when a new onShow effect is about to be started if (isEffectActive(EffectEventId.onHide)) { resetSingleEffect(EffectEventId.onHide); } // show internalShow(); startEffect(EffectEventId.onShow); } private void internalShow() { visible = true; for (Element element : elements) { element.internalShow(); } } public void setVisible(final boolean visibleParam) { if (visibleParam) { show(); } else { hide(); } } /** * hide this element. */ public void hide() { // don't hide if not visible if (!isVisible()) { return; } // don't hide if hide is still in progress if (isEffectActive(EffectEventId.onHide)) { return; } // stop any onShow effects when a new onHide effect is about to be started if (isEffectActive(EffectEventId.onShow)) { resetSingleEffect(EffectEventId.onShow); } // start effect and shizzle startEffect(EffectEventId.onHide, new EndNotify() { public void perform() { focusHandler.lostKeyboardFocus(Element.this); focusHandler.lostMouseFocus(Element.this); resetEffects(); internalHide(); } }); } public void showWithoutEffects() { internalShow(); } public void hideWithoutEffect() { // don't hide if not visible if (!isVisible()) { return; } resetEffects(); internalHide(); } private void internalHide() { visible = false; focusHandler.lostKeyboardFocus(Element.this); focusHandler.lostMouseFocus(Element.this); for (Element element : elements) { element.internalHide(); } } /** * check if this element is visible. * @return true, if this element is visible and false otherwise. */ public boolean isVisible() { return visible; } /** * set a new Falloff. * @param newFalloff new Falloff */ public void setHotSpotFalloff(final Falloff newFalloff) { effectManager.setFalloff(newFalloff); } public Falloff getFalloff() { return effectManager.getFalloff(); } /** * Checks if this element can handle mouse events. * @return true can handle mouse events, false can't handle them */ boolean canHandleMouseEvents() { if (isEffectActive(EffectEventId.onStartScreen)) { return false; } if (isEffectActive(EffectEventId.onEndScreen)) { return false; } if (!visible) { return false; } if (done) { return false; } if (!visibleToMouseEvents) { return false; } if (!focusHandler.canProcessMouseEvents(this)) { return false; } if (interactionBlocked) { return false; } return true; } /** * This should check of the mouse event is inside the current element and if it is * forward the event to it's child. The purpose of this is to build a list of all * elements from front to back that are available for a certain mouse position. * @param mouseEvent MouseInputEvent * @param eventTime time this event occured in ms * @param mouseOverHandler MouseOverHandler to fill */ public void buildMouseOverElements( final MouseInputEvent mouseEvent, final long eventTime, final MouseOverHandler mouseOverHandler) { if (canHandleMouseEvents()) { if (isInside(mouseEvent)) { mouseOverHandler.addMouseOverElement(this); } else { mouseOverHandler.addMouseElement(this); } } for (Element w : getElements()) { w.buildMouseOverElements(mouseEvent, eventTime, mouseOverHandler); } } /** * MouseEvent. * @param mouseEvent mouse event * @param eventTime event time */ public boolean mouseEvent(final MouseInputEvent mouseEvent, final long eventTime) { effectManager.handleHover(this, mouseEvent.getMouseX(), mouseEvent.getMouseY()); boolean mouseInside = isInside(mouseEvent); if (interaction.isOnClickRepeat()) { if (mouseInside && isMouseDown() && mouseEvent.isLeftButton()) { long deltaTime = eventTime - mouseDownTime; if (deltaTime > REPEATED_CLICK_START_TIME) { long pastTime = deltaTime - REPEATED_CLICK_START_TIME; long repeatTime = pastTime - lastRepeatStartTime; if (repeatTime > REPEATED_CLICK_TIME) { lastRepeatStartTime = pastTime; if (onClick(mouseEvent)) { return true; } } } } } if (mouseInside && !isMouseDown()) { if (mouseEvent.isInitialLeftButtonDown()) { setMouseDown(true, eventTime); if (focusable) { focusHandler.requestExclusiveMouseFocus(this); focusHandler.setKeyFocus(this); } return onClick(mouseEvent); } } else if (!mouseEvent.isLeftButton() && isMouseDown()) { setMouseDown(false, eventTime); effectManager.stopEffect(EffectEventId.onClick); focusHandler.lostMouseFocus(this); if (mouseInside) { onRelease(); } } if (isMouseDown()) { onClickMouseMove(mouseEvent); } return false; } /** * Handle the MouseOverEvent. Must not call child elements. This is handled by caller. * @param mouseEvent mouse event * @param eventTime event time * @return true the mouse event has been eated and false when the mouse event can be processed further down */ public boolean mouseOverEvent(final MouseInputEvent mouseEvent, final long eventTime) { boolean eatMouseEvent = false; if (interaction.onMouseOver(this, mouseEvent)) { eatMouseEvent = true; } return eatMouseEvent; } /** * checks to see if the given mouse position is inside of this element. * @param inputEvent MouseInputEvent * @return true when inside, false otherwise */ private boolean isInside(final MouseInputEvent inputEvent) { return isMouseInsideElement(inputEvent.getMouseX(), inputEvent.getMouseY()); } public boolean isMouseInsideElement(final int mouseX, final int mouseY) { if (parentClipArea) { // must be inside the parent to continue if (mouseX >= parentClipX && mouseX <= (parentClipX + parentClipWidth) && mouseY > (parentClipY) && mouseY < (parentClipY + parentClipHeight)) { return mouseX >= getX() && mouseX <= (getX() + getWidth()) && mouseY > (getY()) && mouseY < (getY() + getHeight()); } else { return false; } } else { return mouseX >= getX() && mouseX <= (getX() + getWidth()) && mouseY > (getY()) && mouseY < (getY() + getHeight()); } } /** * on click method. * @param inputEvent event */ public boolean onClick(final MouseInputEvent inputEvent) { if (canHandleInteraction()) { effectManager.startEffect(EffectEventId.onClick, this, time, null); lastMouseX = inputEvent.getMouseX(); lastMouseY = inputEvent.getMouseY(); return interaction.onClick(inputEvent); } else { return false; } } public void onClick() { if (canHandleInteraction()) { effectManager.startEffect(EffectEventId.onClick, this, time, null); interaction.onClick(); } } private boolean canHandleInteraction() { return !screen.isEffectActive(EffectEventId.onStartScreen) && !screen.isEffectActive(EffectEventId.onEndScreen); } public void onRelease() { interaction.onRelease(); } /** * on click mouse move method. * @param inputEvent MouseInputEvent */ private void onClickMouseMove(final MouseInputEvent inputEvent) { if (lastMouseX == inputEvent.getMouseX() && lastMouseY == inputEvent.getMouseY()) { return; } lastMouseX = inputEvent.getMouseX(); lastMouseY = inputEvent.getMouseY(); interaction.onClickMouseMoved(inputEvent); } /** * set on click method for the given screen. * @param methodInvoker the method to invoke * @param useRepeat repeat on click (true) or single event (false) */ public void setOnClickMethod(final NiftyMethodInvoker methodInvoker, final boolean useRepeat) { interaction.setOnClickMethod(methodInvoker, useRepeat); } public void setOnReleaseMethod(final NiftyMethodInvoker onReleaseMethod) { interaction.setOnReleaseMethod(onReleaseMethod); } /** * Set on click mouse move method. * @param methodInvoker the method to invoke */ public void setOnClickMouseMoveMethod(final NiftyMethodInvoker methodInvoker) { interaction.setOnClickMouseMoved(methodInvoker); } /** * set mouse down. * @param newMouseDown new state of mouse button. * @param eventTime the time in ms the event occured */ private void setMouseDown(final boolean newMouseDown, final long eventTime) { this.mouseDownTime = eventTime; this.lastRepeatStartTime = 0; this.mouseDown = newMouseDown; } /** * is mouse down. * @return mouse down state. */ private boolean isMouseDown() { return mouseDown; } /** * find an element by name. * * @param name the name of the element (id) * @return the element or null */ public Element findElementByName(final String name) { if (id != null && id.equals(name)) { return this; } for (Element e : elements) { Element found = e.findElementByName(name); if (found != null) { return found; } } return null; } /** * set a new alternate key. * @param newAlternateKey new alternate key to use */ public void setOnClickAlternateKey(final String newAlternateKey) { interaction.setAlternateKey(newAlternateKey); } /** * set alternate key. * @param alternateKey new alternate key */ public void setAlternateKey(final String alternateKey) { effectManager.setAlternateKey(alternateKey); for (Element e : elements) { e.setAlternateKey(alternateKey); } } /** * get the effect manager. * @return the EffectManager */ public EffectManager getEffectManager() { return effectManager; } /** * Set a New EffectManager. * @param effectManagerParam new Effectmanager */ public void setEffectManager(final EffectManager effectManagerParam) { effectManager = effectManagerParam; } public void bindToScreen(final Screen newScreen) { screen = newScreen; for (Element e : elements) { e.bindToScreen(newScreen); } } /** * On start screen event. * @param newScreen screen */ public void onStartScreen(final Screen newScreen) { screen = newScreen; for (Element e : elements) { e.onStartScreen(newScreen); } if (focusable) { focusHandler.addElement(this); } if (attachedInputControl != null) { attachedInputControl.onStartScreen(screen); } } /** * * @param <T> the ElementRenderer type * @param requestedRendererClass the special ElementRenderer type to check for * @return the ElementRenderer that matches the class */ public < T extends ElementRenderer > T getRenderer(final Class < T > requestedRendererClass) { for (ElementRenderer renderer : elementRenderer) { if (requestedRendererClass.isInstance(renderer)) { return requestedRendererClass.cast(renderer); } } return null; } /** * Set visible to mouse flag. * @param newVisibleToMouseEvents true or false */ public void setVisibleToMouseEvents(final boolean newVisibleToMouseEvents) { this.visibleToMouseEvents = newVisibleToMouseEvents; } /** * keyboard event. * @param inputEvent keyboard event */ public boolean keyEvent(final KeyboardInputEvent inputEvent) { if (attachedInputControl != null) { return attachedInputControl.keyEvent(inputEvent); } return false; } /** * Set clip children. * @param clipChildrenParam clip children flag */ public void setClipChildren(final boolean clipChildrenParam) { this.clipChildren = clipChildrenParam; } /** * Is clip children enabled? * @return clip children */ public boolean isClipChildren() { return this.clipChildren; } /** * Set the focus to this element. */ public void setFocus() { if (nifty != null && nifty.getCurrentScreen() != null) { if (focusable) { focusHandler.setKeyFocus(this); } } } /** * attach an input control to this element. * @param newInputControl input control */ public void attachInputControl(final NiftyInputControl newInputControl) { attachedInputControl = newInputControl; } /** * attach popup. * @param screenController screencontroller */ public void attachPopup(final ScreenController screenController) { log.fine("attachPopup(" + screenController + ") to element [" + id + "]"); attach(interaction.getOnClickMethod(), screenController); attach(interaction.getOnClickMouseMoveMethod(), screenController); attach(interaction.getOnReleaseMethod(), screenController); } /** * attach method. * @param method method * @param screenController method controller */ private void attach(final NiftyMethodInvoker method, final ScreenController screenController) { method.setFirst(screenController); for (Element e : elements) { e.attachPopup(screenController); } } private boolean hasParentActiveOnStartOrOnEndScreenEffect() { if (parent != null) { return parent.effectManager.isActive(EffectEventId.onStartScreen) || parent.effectManager.isActive(EffectEventId.onEndScreen) || parent.hasParentActiveOnStartOrOnEndScreenEffect(); } return false; } private void resetInteractionBlocked() { interactionBlocked = false; for (Element e : elements) { e.resetInteractionBlocked(); } } /** * LocalEndNotify helper class. * @author void */ public class LocalEndNotify implements EndNotify { /** * event id. */ private EffectEventId effectEventId; /** * end notify. */ private EndNotify effectEndNotiy; /** * create it. * @param effectEventIdParam event id * @param effectEndNotiyParam end notify */ public LocalEndNotify(final EffectEventId effectEventIdParam, final EndNotify effectEndNotiyParam) { effectEventId = effectEventIdParam; effectEndNotiy = effectEndNotiyParam; } /** * perform. */ public void perform() { if (effectEventId.equals(EffectEventId.onStartScreen) || effectEventId.equals(EffectEventId.onEndScreen)) { if (interactionBlocked && !hasParentActiveOnStartOrOnEndScreenEffect() && !isEffectActive(effectEventId)) { resetInteractionBlocked(); } } // notify parent if: // a) the effect is done for ourself // b) the effect is done for all of our children if (!isEffectActive(effectEventId)) { // all fine. we can notify the actual event handler if (effectEndNotiy != null) { effectEndNotiy.perform(); } } } } /** * set id. * @param newId new id */ public void setId(final String newId) { this.id = newId; } /** * get element type. * @return element type */ public ElementType getElementType() { return elementType; } /** * get element renderer. * @return element renderer array */ public ElementRenderer[] getElementRenderer() { return elementRenderer; } /** * set focusable flag. * @param newFocusable focusable flag */ public void setFocusable(final boolean newFocusable) { this.focusable = newFocusable; for (Element e : elements) { e.setFocusable(newFocusable); } } /** * @return the attachedInputControl */ public NiftyInputControl getAttachedInputControl() { return attachedInputControl; } /** * remove this and all children from the focushandler. */ public void removeFromFocusHandler() { if (screen != null) { if (screen.getFocusHandler() != null) { screen.getFocusHandler().remove(this); for (Element element : elements) { element.removeFromFocusHandler(); } } } } /** * set a new style. * @param newStyle new style to set */ public void setStyle(final String newStyle) { removeStyle(elementType.getAttributes().get("style")); elementType.getAttributes().set("style", newStyle); elementType.applyStyles(nifty.getDefaultStyleResolver()); elementType.applyAttributes(this, elementType.getAttributes(), nifty.getRenderEngine()); elementType.applyEffects(nifty, screen, this); elementType.applyInteract(nifty, screen, this); log.info("after setStyle [" + newStyle + "]\n" + elementType.output(0)); notifyListeners(); } void removeStyle(final String style) { log.info("before removeStyle [" + style + "]\n" + elementType.output(0)); elementType.removeWithTag(style); effectManager.removeAllEffects(); log.info("after removeStyle [" + style + "]\n" + elementType.output(0)); notifyListeners(); } /** * add additional input handler to this element or childs of the elements. * @param handler additiona handler */ public void addInputHandler(final KeyInputHandler handler) { if (attachedInputControl != null) { attachedInputControl.addInputHandler(handler); } for (Element element : elements) { element.addInputHandler(handler); } } public < T extends Controller > T findControl(final String elementName, final Class < T > requestedControlClass) { Element element = findElementByName(elementName); if (element == null) { return null; } return element.getControl(requestedControlClass); } /** * Get Control from element. * @param <T> Type * @param requestedControlClass requested class * @return controller or null */ public < T extends Controller > T getControl(final Class < T > requestedControlClass) { if (attachedInputControl != null) { T t = attachedInputControl.getControl(requestedControlClass); if (t != null) { return t; } } for (Element element : elements) { T t = element.getControl(requestedControlClass); if (t != null) { return t; } } return null; } public < T extends NiftyControl > T getNiftyControl(final Class < T > requestedControlClass) { if (attachedInputControl != null) { T t = attachedInputControl.getNiftyControl(requestedControlClass); if (t != null) { return t; } } for (Element element : elements) { T t = element.getNiftyControl(requestedControlClass); if (t != null) { return t; } } return null; } /** * is focusable? * @return focusable */ public boolean isFocusable() { return focusable; } /** * Set onMouseOverMethod. * @param onMouseOverMethod new on mouse over method */ public void setOnMouseOverMethod(final NiftyMethodInvoker onMouseOverMethod) { this.interaction.setOnMouseOver(onMouseOverMethod); } /** * Get LayoutPart. * @return LayoutPart */ public LayoutPart getLayoutPart() { return layoutPart; } /** * Get Element Interaction. * @return current ElementInteraction */ public ElementInteraction getInteraction() { return interaction; } /** * Set Element Interaction. * @param elementInteractionParam ElementInteraction */ public void setInteraction(final ElementInteraction elementInteractionParam) { interaction = elementInteractionParam; } /** * Is this element visible to mouse events. * @return true visible and false not visible */ public boolean isVisibleToMouseEvents() { return visibleToMouseEvents; } public void setPaddingLeft(final SizeValue paddingValue) { layoutPart.getBoxConstraints().setPaddingLeft(paddingValue); notifyListeners(); } public void setPaddingRight(final SizeValue paddingValue) { layoutPart.getBoxConstraints().setPaddingRight(paddingValue); notifyListeners(); } public void setPaddingTop(final SizeValue paddingValue) { layoutPart.getBoxConstraints().setPaddingTop(paddingValue); notifyListeners(); } public void setPaddingBottom(final SizeValue paddingValue) { layoutPart.getBoxConstraints().setPaddingBottom(paddingValue); notifyListeners(); } public String toString() { return id + " (" + super.toString() + ")"; } public boolean isStarted() { return isEffectActive(EffectEventId.onStartScreen); } public void markForRemoval() { markForRemoval(null); } public void markForRemoval(final EndNotify endNotify) { nifty.removeElement(screen, this, endNotify); } public void markForMove(final Element destination) { markForMove(destination, null); } public void markForMove(final Element destination, final EndNotify endNotify) { nifty.moveElement(screen, this, destination, endNotify); } public void reactivate() { done = false; for (Element element : elements) { element.reactivate(); } } public void addElementChangeListener(ElementChangeListener listener) { if(listeners == null) listeners = new ArrayList<ElementChangeListener>(); listeners.add(listener); } public void removeElementChangeListener(ElementChangeListener listener) { if(listeners == null) return; listeners.remove(listener); } private void notifyListeners() { if(listeners == null) return; for(ElementChangeListener listener : listeners) { listener.elementChanged(this); } } }
true
true
public void startEffect(final EffectEventId effectEventId, final EndNotify effectEndNotiy) { if (effectEventId == EffectEventId.onStartScreen) { if (!visible) { return; } done = false; interactionBlocked = true; } if (effectEventId == EffectEventId.onEndScreen) { if (!visible) { return; } done = true; interactionBlocked = true; } // whenever the effect ends we forward to this event // that checks first, if all child elements are finished // and when yes forwards to the actual effectEndNotify event. // // this way we ensure that all child finished the effects // before forwarding this to the real event handler. // // little bit tricky though :/ LocalEndNotify forwardToSelf = new LocalEndNotify(effectEventId, effectEndNotiy); // start the effect for ourself effectManager.startEffect(effectEventId, this, time, forwardToSelf); // notify all child elements of the start effect for (Element w : getElements()) { w.startEffectInternal(effectEventId, forwardToSelf); } if (effectEventId == EffectEventId.onFocus) { if (attachedInputControl != null) { attachedInputControl.onFocus(true); } } // just in case there was no effect activated, we'll check here, if we're already done forwardToSelf.perform(); }
public void startEffect(final EffectEventId effectEventId, final EndNotify effectEndNotiy) { if (effectEventId == EffectEventId.onStartScreen) { if (!visible) { return; } done = false; interactionBlocked = true; } if (effectEventId == EffectEventId.onEndScreen) { if (!visible) { // it doesn't make sense to start the onEndScreen effect when the element is hidden // just call the effectEndNotify directly and quit effectEndNotiy.perform(); return; } done = true; interactionBlocked = true; } // whenever the effect ends we forward to this event // that checks first, if all child elements are finished // and when yes forwards to the actual effectEndNotify event. // // this way we ensure that all child finished the effects // before forwarding this to the real event handler. // // little bit tricky though :/ LocalEndNotify forwardToSelf = new LocalEndNotify(effectEventId, effectEndNotiy); // start the effect for ourself effectManager.startEffect(effectEventId, this, time, forwardToSelf); // notify all child elements of the start effect for (Element w : getElements()) { w.startEffectInternal(effectEventId, forwardToSelf); } if (effectEventId == EffectEventId.onFocus) { if (attachedInputControl != null) { attachedInputControl.onFocus(true); } } // just in case there was no effect activated, we'll check here, if we're already done forwardToSelf.perform(); }
diff --git a/src/se/chalmers/tda367/std/core/GameController.java b/src/se/chalmers/tda367/std/core/GameController.java index adb4d52..56bc0ec 100644 --- a/src/se/chalmers/tda367/std/core/GameController.java +++ b/src/se/chalmers/tda367/std/core/GameController.java @@ -1,182 +1,184 @@ package se.chalmers.tda367.std.core; import de.lessvoid.nifty.input.NiftyInputEvent; import se.chalmers.tda367.std.core.events.WaveStartedEvent; import se.chalmers.tda367.std.core.factories.GameBoardFactory; import se.chalmers.tda367.std.core.factories.WaveFactory; import se.chalmers.tda367.std.core.tiles.IBuildableTile; import se.chalmers.tda367.std.core.tiles.TerrainTile; import se.chalmers.tda367.std.core.tiles.towers.ITower; import se.chalmers.tda367.std.utilities.BoardPosition; import se.chalmers.tda367.std.utilities.EventBus; import se.chalmers.tda367.std.utilities.Position; /** * The class that contains the game logic and controls the game. * @author Johan Andersson * @modified Johan Gustafsson * @date Mar 22, 2012 */ public class GameController { private Player player; private GameBoard board; private BuildController buildControl; private WaveController waveControl; private Properties prop = Properties.INSTANCE; private int tileScale; private final GameBoardFactory boardFactory; private int releasedWaves = 0; private int level; /** Constructor for the GameController, requires a player and a board to work. * * @param player - player playing the game. * @param board - board to play the game on. */ public GameController(Player player){ this.player = player; this.level = 1; tileScale = prop.getTileScale(); boardFactory = new GameBoardFactory(); init(); } private void init(){ this.board = boardFactory.create(level); buildControl = new BuildController(board, player); waveControl = new WaveController(board, player); } /** * Update the game to the next state. * @param delta - the amount of time in milliseconds from the previous update. */ public void updateGameState(final int delta) { waveControl.updateWaveRelated(delta); } /** * Starts the next wave of enemies. */ public void nextWave(){ Wave wave = new WaveFactory().create(++releasedWaves); waveControl.startWave(wave); EventBus.INSTANCE.post(new WaveStartedEvent(releasedWaves)); } /** Builds a tower on the board. * * @param tower - Tower to be built. * @param pos - Position to build upon. * @return - True if tower was build otherwise false */ public boolean buildTower(ITower tower, BoardPosition pos){ return buildControl.buildTower(tower, pos); } /** Sells a tower if possible. * * @param tower - Tower to be sold. * @param pos - Position on which the tower is built. * @return - True if tower is sold. */ public boolean sellTower(ITower tower, BoardPosition pos){ return buildControl.sellTower(tower, pos); } /** Upgrades a tower if possible. * * @param tower - Tower to be upgraded. * @return - True if tower was upgraded. */ public boolean upgradeTower(ITower tower){ return buildControl.upgradeTower(tower); } /** Tells if a player can afford to upgrade a tower. * * @param tower - Tower considered to upgrade. * @return - True if player can afford upgrade. */ public boolean playerCanAffordUpgrade(ITower tower) { return buildControl.playerCanAffordUpgrade(tower); } /** Tells if a spot is buildable or not. * * @param pos - Position to test buildability on. * @return - True if position is buildable on board. */ public boolean isBuildableSpot(BoardPosition pos) { return buildControl.isBuildableSpot(pos); } /** Tells if a player can afford a tower. * * @param tower - Tower to test affordability on. * @return - True if player can afford upgrade. */ public boolean playerCanAffordTower(ITower tower) { return buildControl.playerCanAffordTower(tower); } /** * Returns how many waves that have been released * * @return number of waves released */ public int getWavesReleased() { return releasedWaves; } /** * Returns the current level. * @return the current level. */ public int getLevel() { return this.level; } /** * @return the game board */ public GameBoard getGameBoard() { return board; } /** * Causes the player to move depending on the {@code MovementEnum} provided. * @param direction Enum to use for calculation. * @param delta time in milliseconds since last update. */ public void moveChar(MovementEnum direction, int delta) { Position playerPos = player.getCharacter().getPos(); if(isAbleToWalkTo(direction.newPosition(playerPos, delta))) { playerPos.copyFromPosition(direction.newPosition(playerPos, delta)); } } /** * Method to check if a given position is a buildable or terrain tile. * This determines if player character can move to this position or not. * @param p position to check. * @return true if it's a {@code IBuildableTile} or {@code TerrainTile}. */ public boolean isAbleToWalkTo(Position p) { //Calculate on which tile the position given is on. int x = (int)(p.getX()/tileScale); int y = (int)(p.getY()/tileScale); - if(board.getTileAt(x, y) instanceof IBuildableTile - || board.getTileAt(x, y) instanceof TerrainTile) { - return true; + if(board.posOnBoard(x, y)) { + if(board.getTileAt(x, y) instanceof IBuildableTile + || board.getTileAt(x, y) instanceof TerrainTile) { + return true; + } } return false; } }
true
true
public boolean isAbleToWalkTo(Position p) { //Calculate on which tile the position given is on. int x = (int)(p.getX()/tileScale); int y = (int)(p.getY()/tileScale); if(board.getTileAt(x, y) instanceof IBuildableTile || board.getTileAt(x, y) instanceof TerrainTile) { return true; } return false; }
public boolean isAbleToWalkTo(Position p) { //Calculate on which tile the position given is on. int x = (int)(p.getX()/tileScale); int y = (int)(p.getY()/tileScale); if(board.posOnBoard(x, y)) { if(board.getTileAt(x, y) instanceof IBuildableTile || board.getTileAt(x, y) instanceof TerrainTile) { return true; } } return false; }
diff --git a/com.piece_framework.makegood.launch/src/com/piece_framework/makegood/launch/PHPexeItemFactory.java b/com.piece_framework.makegood.launch/src/com/piece_framework/makegood/launch/PHPexeItemFactory.java index bbd7f08b..449483de 100644 --- a/com.piece_framework.makegood.launch/src/com/piece_framework/makegood/launch/PHPexeItemFactory.java +++ b/com.piece_framework.makegood.launch/src/com/piece_framework/makegood/launch/PHPexeItemFactory.java @@ -1,24 +1,24 @@ /** * Copyright (c) 2010 KUBO Atsuhiro <[email protected]>, * All rights reserved. * * This file is part of MakeGood. * * This program and the accompanying materials are made available under * the terms of the Eclipse Public License v1.0 which accompanies this * distribution, and is available at http://www.eclipse.org/legal/epl-v10.html */ package com.piece_framework.makegood.launch; import org.eclipse.core.resources.IProject; import org.eclipse.php.internal.debug.core.preferences.PHPexeItem; /** * @since 1.2.0 */ public class PHPexeItemFactory { - public static PHPexeItem create(IProject project) { - return null; - } + public static PHPexeItem create(IProject project) { + return null; + } }
true
true
public static PHPexeItem create(IProject project) { return null; }
public static PHPexeItem create(IProject project) { return null; }
diff --git a/servlet/src/org/tangram/components/servlet/ServletViewUtilities.java b/servlet/src/org/tangram/components/servlet/ServletViewUtilities.java index eb9f4a34..7dcf7c91 100644 --- a/servlet/src/org/tangram/components/servlet/ServletViewUtilities.java +++ b/servlet/src/org/tangram/components/servlet/ServletViewUtilities.java @@ -1,176 +1,177 @@ /** * * Copyright 2013 Martin Goellnitz * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package org.tangram.components.servlet; import java.io.IOException; import java.io.InputStreamReader; import java.io.StringWriter; import java.io.Writer; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Properties; import java.util.regex.Pattern; import javax.inject.Named; import javax.servlet.RequestDispatcher; import javax.servlet.ServletException; import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.velocity.VelocityContext; import org.apache.velocity.app.VelocityEngine; import org.tangram.Constants; import org.tangram.components.CodeResourceCache; import org.tangram.components.TangramServices; import org.tangram.view.RequestParameterAccess; import org.tangram.view.TemplateResolver; import org.tangram.view.ViewContext; import org.tangram.view.ViewContextFactory; import org.tangram.view.ViewUtilities; @Named public class ServletViewUtilities implements ViewUtilities { private static final Log log = LogFactory.getLog(ServletViewUtilities.class); private static final Pattern ID_PATTRN = Pattern.compile(Constants.ID_PATTERN); private static VelocityEngine velocityEngine; public ServletViewUtilities() { if (log.isDebugEnabled()) { log.debug("()"); } // if Properties velocityProperties = new Properties(); try { velocityProperties.load(this.getClass().getClassLoader().getResourceAsStream("tangram/velocity/velocity.properties")); } catch (IOException ex) { log.error("()", ex); } // try/catch if (log.isDebugEnabled()) { log.debug("() velocityProperties="+velocityProperties); } // if velocityEngine = new VelocityEngine(velocityProperties); } // ServletViewUtilities() /** * Creates a plain servlet api based request blob wrapper. * * @param request * @return request blob wrapper suitable for the given request */ @Override public RequestParameterAccess createParameterAccess(HttpServletRequest request) { return new ServletRequestParameterAccess(request); } // createParameterAccess() @Override @SuppressWarnings({"unchecked", "rawtypes"}) public void render(Writer out, Map<String, Object> model, String view) throws IOException { view = (view==null) ? Constants.DEFAULT_VIEW : view; HttpServletRequest request = (HttpServletRequest) model.get("request"); HttpServletResponse response = (HttpServletResponse) model.get("response"); String template = null; final List<TemplateResolver> resolvers = TangramServices.getResolvers(); if (log.isDebugEnabled()) { log.debug("render() resolvers="+resolvers); } // if for (TemplateResolver<String> resolver : resolvers) { if (log.isDebugEnabled()) { log.debug("render() resolver="+resolver); } // if if (template==null) { template = resolver.resolveTemplate(view, model, Locale.getDefault()); } // if } // for if (log.isDebugEnabled()) { log.debug("render() template="+template); } // if ViewContextFactory vcf = TangramServices.getViewContextFactory(); ViewContext vc = vcf.createViewContext(model, view); if (ID_PATTRN.matcher(template).matches()) { // Velocity: if (log.isDebugEnabled()) { log.debug("render() Velocity template="+template); } // if + if (out==null) { + response.getWriter().flush(); + } else { + out.flush(); + } // if VelocityContext context = new VelocityContext(model); Writer writer = new StringWriter(); if (log.isDebugEnabled()) { log.debug("render() resource.loader "+velocityEngine.getProperty("resource.loader")); } // if try { CodeResourceCache c = TangramServices.getCodeResourceCache(); velocityEngine.evaluate(context, writer, "tangram", new InputStreamReader(c.get(template).getStream())); } catch (Exception ex) { throw new IOException(ex.getCause()); } // try/catch writer.flush(); final String templateResult = writer.toString(); if (log.isDebugEnabled()) { log.debug("render() result size "+templateResult.length()); } // if - if (out!=null) { - response.getWriter().flush(); - out.flush(); - } // if (out==null ? response.getWriter() : out).write(templateResult); } else { // JSP: RequestDispatcher requestDispatcher = request.getRequestDispatcher(template); if (requestDispatcher!=null) { try { for (String key : model.keySet()) { request.setAttribute(key, model.get(key)); } // for if (log.isDebugEnabled()) { // log.debug("render() writer "+out+" "+response.getWriter()); log.debug("render() writer "+out); } // if // BufferResponse br = new BufferResponse(response); + response.getWriter().flush(); if (out!=null) { - response.getWriter().flush(); out.flush(); } // if requestDispatcher.include(request, response); // out.write(br.getContents()); // response.getOutputStream().write(br.getBytes()); } catch (ServletException ex) { log.error("render()", ex); throw new IOException("Problem while including JSP", ex.getCause()); } // try/catch } // if } // if } // render() @Override public void render(Writer out, Object bean, String view, ServletRequest request, ServletResponse response) throws IOException { render(out, TangramServices.getViewContextFactory().createModel(bean, request, response), view); } // render() } // ServletViewUtilities
false
true
public void render(Writer out, Map<String, Object> model, String view) throws IOException { view = (view==null) ? Constants.DEFAULT_VIEW : view; HttpServletRequest request = (HttpServletRequest) model.get("request"); HttpServletResponse response = (HttpServletResponse) model.get("response"); String template = null; final List<TemplateResolver> resolvers = TangramServices.getResolvers(); if (log.isDebugEnabled()) { log.debug("render() resolvers="+resolvers); } // if for (TemplateResolver<String> resolver : resolvers) { if (log.isDebugEnabled()) { log.debug("render() resolver="+resolver); } // if if (template==null) { template = resolver.resolveTemplate(view, model, Locale.getDefault()); } // if } // for if (log.isDebugEnabled()) { log.debug("render() template="+template); } // if ViewContextFactory vcf = TangramServices.getViewContextFactory(); ViewContext vc = vcf.createViewContext(model, view); if (ID_PATTRN.matcher(template).matches()) { // Velocity: if (log.isDebugEnabled()) { log.debug("render() Velocity template="+template); } // if VelocityContext context = new VelocityContext(model); Writer writer = new StringWriter(); if (log.isDebugEnabled()) { log.debug("render() resource.loader "+velocityEngine.getProperty("resource.loader")); } // if try { CodeResourceCache c = TangramServices.getCodeResourceCache(); velocityEngine.evaluate(context, writer, "tangram", new InputStreamReader(c.get(template).getStream())); } catch (Exception ex) { throw new IOException(ex.getCause()); } // try/catch writer.flush(); final String templateResult = writer.toString(); if (log.isDebugEnabled()) { log.debug("render() result size "+templateResult.length()); } // if if (out!=null) { response.getWriter().flush(); out.flush(); } // if (out==null ? response.getWriter() : out).write(templateResult); } else { // JSP: RequestDispatcher requestDispatcher = request.getRequestDispatcher(template); if (requestDispatcher!=null) { try { for (String key : model.keySet()) { request.setAttribute(key, model.get(key)); } // for if (log.isDebugEnabled()) { // log.debug("render() writer "+out+" "+response.getWriter()); log.debug("render() writer "+out); } // if // BufferResponse br = new BufferResponse(response); if (out!=null) { response.getWriter().flush(); out.flush(); } // if requestDispatcher.include(request, response); // out.write(br.getContents()); // response.getOutputStream().write(br.getBytes()); } catch (ServletException ex) { log.error("render()", ex); throw new IOException("Problem while including JSP", ex.getCause()); } // try/catch } // if } // if } // render()
public void render(Writer out, Map<String, Object> model, String view) throws IOException { view = (view==null) ? Constants.DEFAULT_VIEW : view; HttpServletRequest request = (HttpServletRequest) model.get("request"); HttpServletResponse response = (HttpServletResponse) model.get("response"); String template = null; final List<TemplateResolver> resolvers = TangramServices.getResolvers(); if (log.isDebugEnabled()) { log.debug("render() resolvers="+resolvers); } // if for (TemplateResolver<String> resolver : resolvers) { if (log.isDebugEnabled()) { log.debug("render() resolver="+resolver); } // if if (template==null) { template = resolver.resolveTemplate(view, model, Locale.getDefault()); } // if } // for if (log.isDebugEnabled()) { log.debug("render() template="+template); } // if ViewContextFactory vcf = TangramServices.getViewContextFactory(); ViewContext vc = vcf.createViewContext(model, view); if (ID_PATTRN.matcher(template).matches()) { // Velocity: if (log.isDebugEnabled()) { log.debug("render() Velocity template="+template); } // if if (out==null) { response.getWriter().flush(); } else { out.flush(); } // if VelocityContext context = new VelocityContext(model); Writer writer = new StringWriter(); if (log.isDebugEnabled()) { log.debug("render() resource.loader "+velocityEngine.getProperty("resource.loader")); } // if try { CodeResourceCache c = TangramServices.getCodeResourceCache(); velocityEngine.evaluate(context, writer, "tangram", new InputStreamReader(c.get(template).getStream())); } catch (Exception ex) { throw new IOException(ex.getCause()); } // try/catch writer.flush(); final String templateResult = writer.toString(); if (log.isDebugEnabled()) { log.debug("render() result size "+templateResult.length()); } // if (out==null ? response.getWriter() : out).write(templateResult); } else { // JSP: RequestDispatcher requestDispatcher = request.getRequestDispatcher(template); if (requestDispatcher!=null) { try { for (String key : model.keySet()) { request.setAttribute(key, model.get(key)); } // for if (log.isDebugEnabled()) { // log.debug("render() writer "+out+" "+response.getWriter()); log.debug("render() writer "+out); } // if // BufferResponse br = new BufferResponse(response); response.getWriter().flush(); if (out!=null) { out.flush(); } // if requestDispatcher.include(request, response); // out.write(br.getContents()); // response.getOutputStream().write(br.getBytes()); } catch (ServletException ex) { log.error("render()", ex); throw new IOException("Problem while including JSP", ex.getCause()); } // try/catch } // if } // if } // render()
diff --git a/asterix-runtime/src/main/java/edu/uci/ics/asterix/runtime/evaluators/common/SimilarityJaccardCheckEvaluator.java b/asterix-runtime/src/main/java/edu/uci/ics/asterix/runtime/evaluators/common/SimilarityJaccardCheckEvaluator.java index 3f160d60..22cae0fb 100644 --- a/asterix-runtime/src/main/java/edu/uci/ics/asterix/runtime/evaluators/common/SimilarityJaccardCheckEvaluator.java +++ b/asterix-runtime/src/main/java/edu/uci/ics/asterix/runtime/evaluators/common/SimilarityJaccardCheckEvaluator.java @@ -1,114 +1,114 @@ package edu.uci.ics.asterix.runtime.evaluators.common; import java.io.IOException; import edu.uci.ics.asterix.builders.IAOrderedListBuilder; import edu.uci.ics.asterix.builders.OrderedListBuilder; import edu.uci.ics.asterix.dataflow.data.nontagged.serde.AFloatSerializerDeserializer; import edu.uci.ics.asterix.formats.nontagged.AqlSerializerDeserializerProvider; import edu.uci.ics.asterix.om.base.ABoolean; import edu.uci.ics.asterix.om.types.AOrderedListType; import edu.uci.ics.asterix.om.types.BuiltinType; import edu.uci.ics.asterix.runtime.evaluators.functions.BinaryHashMap.BinaryEntry; import edu.uci.ics.hyracks.algebricks.common.exceptions.AlgebricksException; import edu.uci.ics.hyracks.algebricks.runtime.base.ICopyEvaluator; import edu.uci.ics.hyracks.algebricks.runtime.base.ICopyEvaluatorFactory; import edu.uci.ics.hyracks.api.dataflow.value.ISerializerDeserializer; import edu.uci.ics.hyracks.data.std.api.IDataOutputProvider; import edu.uci.ics.hyracks.data.std.primitive.IntegerPointable; import edu.uci.ics.hyracks.data.std.util.ArrayBackedValueStorage; import edu.uci.ics.hyracks.dataflow.common.data.accessors.IFrameTupleReference; public class SimilarityJaccardCheckEvaluator extends SimilarityJaccardEvaluator { protected final ICopyEvaluator jaccThreshEval; protected float jaccThresh = -1f; protected IAOrderedListBuilder listBuilder; protected ArrayBackedValueStorage inputVal; @SuppressWarnings("unchecked") protected final ISerializerDeserializer<ABoolean> booleanSerde = AqlSerializerDeserializerProvider.INSTANCE .getSerializerDeserializer(BuiltinType.ABOOLEAN); protected final AOrderedListType listType = new AOrderedListType(BuiltinType.ANY, "list"); public SimilarityJaccardCheckEvaluator(ICopyEvaluatorFactory[] args, IDataOutputProvider output) throws AlgebricksException { super(args, output); jaccThreshEval = args[2].createEvaluator(argOut); listBuilder = new OrderedListBuilder(); inputVal = new ArrayBackedValueStorage(); } @Override protected void runArgEvals(IFrameTupleReference tuple) throws AlgebricksException { super.runArgEvals(tuple); int jaccThreshStart = argOut.getLength(); jaccThreshEval.evaluate(tuple); jaccThresh = (float) AFloatSerializerDeserializer.getFloat(argOut.getByteArray(), jaccThreshStart + TYPE_INDICATOR_SIZE); } @Override protected int probeHashMap(AbstractAsterixListIterator probeIter, int buildListSize, int probeListSize) { // Apply length filter. int lengthLowerBound = (int) Math.ceil(jaccThresh * probeListSize); if ((lengthLowerBound > buildListSize) || (buildListSize > (int) Math.floor(1.0f / jaccThresh * probeListSize))) { return -1; } // Probe phase: Probe items from second list, and compute intersection size. int intersectionSize = 0; int probeListCount = 0; int minUnionSize = probeListSize; while (probeIter.hasNext()) { probeListCount++; byte[] buf = probeIter.getData(); int off = probeIter.getPos(); int len = getItemLen(buf, off); keyEntry.set(buf, off, len); BinaryEntry entry = hashMap.get(keyEntry); if (entry != null) { // Increment second value. - int firstValInt = IntegerPointable.getInteger(buf, 0); + int firstValInt = IntegerPointable.getInteger(entry.buf, entry.off); // Irrelevant for the intersection size. if (firstValInt == 0) { continue; } - int secondValInt = IntegerPointable.getInteger(buf, 4); + int secondValInt = IntegerPointable.getInteger(entry.buf, entry.off + 4); // Subtract old min value. intersectionSize -= (firstValInt < secondValInt) ? firstValInt : secondValInt; secondValInt++; // Add new min value. intersectionSize += (firstValInt < secondValInt) ? firstValInt : secondValInt; - IntegerPointable.setInteger(entry.buf, 0, secondValInt); + IntegerPointable.setInteger(entry.buf, entry.off + 4, secondValInt); } else { // Could not find element in other set. Increase min union size by 1. minUnionSize++; // Check whether jaccThresh can still be satisfied if there was a mismatch. int maxIntersectionSize = intersectionSize + (probeListSize - probeListCount); int lowerBound = (int) Math.floor(jaccThresh * minUnionSize); if (maxIntersectionSize < lowerBound) { // Cannot satisfy jaccThresh. return -1; } } probeIter.next(); } return intersectionSize; } @Override protected void writeResult(float jacc) throws IOException { listBuilder.reset(listType); boolean matches = (jacc < jaccThresh) ? false : true; inputVal.reset(); booleanSerde.serialize(matches ? ABoolean.TRUE : ABoolean.FALSE, inputVal.getDataOutput()); listBuilder.addItem(inputVal); inputVal.reset(); aFloat.setValue((matches) ? jacc : 0.0f); floatSerde.serialize(aFloat, inputVal.getDataOutput()); listBuilder.addItem(inputVal); listBuilder.write(out, true); } }
false
true
protected int probeHashMap(AbstractAsterixListIterator probeIter, int buildListSize, int probeListSize) { // Apply length filter. int lengthLowerBound = (int) Math.ceil(jaccThresh * probeListSize); if ((lengthLowerBound > buildListSize) || (buildListSize > (int) Math.floor(1.0f / jaccThresh * probeListSize))) { return -1; } // Probe phase: Probe items from second list, and compute intersection size. int intersectionSize = 0; int probeListCount = 0; int minUnionSize = probeListSize; while (probeIter.hasNext()) { probeListCount++; byte[] buf = probeIter.getData(); int off = probeIter.getPos(); int len = getItemLen(buf, off); keyEntry.set(buf, off, len); BinaryEntry entry = hashMap.get(keyEntry); if (entry != null) { // Increment second value. int firstValInt = IntegerPointable.getInteger(buf, 0); // Irrelevant for the intersection size. if (firstValInt == 0) { continue; } int secondValInt = IntegerPointable.getInteger(buf, 4); // Subtract old min value. intersectionSize -= (firstValInt < secondValInt) ? firstValInt : secondValInt; secondValInt++; // Add new min value. intersectionSize += (firstValInt < secondValInt) ? firstValInt : secondValInt; IntegerPointable.setInteger(entry.buf, 0, secondValInt); } else { // Could not find element in other set. Increase min union size by 1. minUnionSize++; // Check whether jaccThresh can still be satisfied if there was a mismatch. int maxIntersectionSize = intersectionSize + (probeListSize - probeListCount); int lowerBound = (int) Math.floor(jaccThresh * minUnionSize); if (maxIntersectionSize < lowerBound) { // Cannot satisfy jaccThresh. return -1; } } probeIter.next(); } return intersectionSize; }
protected int probeHashMap(AbstractAsterixListIterator probeIter, int buildListSize, int probeListSize) { // Apply length filter. int lengthLowerBound = (int) Math.ceil(jaccThresh * probeListSize); if ((lengthLowerBound > buildListSize) || (buildListSize > (int) Math.floor(1.0f / jaccThresh * probeListSize))) { return -1; } // Probe phase: Probe items from second list, and compute intersection size. int intersectionSize = 0; int probeListCount = 0; int minUnionSize = probeListSize; while (probeIter.hasNext()) { probeListCount++; byte[] buf = probeIter.getData(); int off = probeIter.getPos(); int len = getItemLen(buf, off); keyEntry.set(buf, off, len); BinaryEntry entry = hashMap.get(keyEntry); if (entry != null) { // Increment second value. int firstValInt = IntegerPointable.getInteger(entry.buf, entry.off); // Irrelevant for the intersection size. if (firstValInt == 0) { continue; } int secondValInt = IntegerPointable.getInteger(entry.buf, entry.off + 4); // Subtract old min value. intersectionSize -= (firstValInt < secondValInt) ? firstValInt : secondValInt; secondValInt++; // Add new min value. intersectionSize += (firstValInt < secondValInt) ? firstValInt : secondValInt; IntegerPointable.setInteger(entry.buf, entry.off + 4, secondValInt); } else { // Could not find element in other set. Increase min union size by 1. minUnionSize++; // Check whether jaccThresh can still be satisfied if there was a mismatch. int maxIntersectionSize = intersectionSize + (probeListSize - probeListCount); int lowerBound = (int) Math.floor(jaccThresh * minUnionSize); if (maxIntersectionSize < lowerBound) { // Cannot satisfy jaccThresh. return -1; } } probeIter.next(); } return intersectionSize; }
diff --git a/jing/chem/ThermoGAGroupLibrary.java b/jing/chem/ThermoGAGroupLibrary.java index 091e56d0..a5160cb0 100644 --- a/jing/chem/ThermoGAGroupLibrary.java +++ b/jing/chem/ThermoGAGroupLibrary.java @@ -1,863 +1,863 @@ //!******************************************************************************** //! //! RMG: Reaction Mechanism Generator //! //! Copyright: Jing Song, MIT, 2002, all rights reserved //! //! Author's Contact: [email protected] //! //! Restrictions: //! (1) RMG is only for non-commercial distribution; commercial usage //! must require other written permission. //! (2) Redistributions of RMG must retain the above copyright //! notice, this list of conditions and the following disclaimer. //! (3) The end-user documentation included with the redistribution, //! if any, must include the following acknowledgment: //! "This product includes software RMG developed by Jing Song, MIT." //! Alternately, this acknowledgment may appear in the software itself, //! if and wherever such third-party acknowledgments normally appear. //! //! RMG IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED //! WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES //! OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE //! DISCLAIMED. IN NO EVENT SHALL JING SONG BE LIABLE FOR //! ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR //! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT //! OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; //! OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF //! LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT //! (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF //! THE USE OF RMG, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //! //!****************************************************************************** package jing.chem; import java.io.*; import java.util.*; import jing.chemUtil.*; import jing.chemParser.*; import jing.chemUtil.HierarchyTree; //## package jing::chem //---------------------------------------------------------------------------- // jing\chem\ThermoGAGroupLibrary.java //---------------------------------------------------------------------------- /** There are four libraries: (1) group (2) radical (3) ring correction (4) other correction In each library, the key should be functional group (name + adjList), and the value should be ThermoGAValue. for (2), (3), (4), we scan the library to find match between chemgraph and functional group each time. search time O(n), where n is the library size. for (1), we first match chemgraph with a tree structure to find out the proper functional group, and then access the library by the key functional group, so the search tiem is O(1) + O(logN), where N is the tree size. */ //## class ThermoGAGroupLibrary public class ThermoGAGroupLibrary { protected static ThermoGAGroupLibrary INSTANCE = new ThermoGAGroupLibrary(); //## attribute INSTANCE protected HashMap groupDictionary; //## attribute groupDictionary protected HashMap groupLibrary; //## attribute groupLibrary /** Note: this kind of tree is different with the kinetics tree. In kinetics tree, tree nodes are FunctionalGroup or FunctionalGroupCollection. In thermo tree, tree nodes are Nodes with connectivity, */ protected HierarchyTree groupTree; //## attribute groupTree protected HashMap otherDictionary; //## attribute otherDictionary protected HashMap otherLibrary; //## attribute otherLibrary protected HierarchyTree otherTree; //## attribute otherTree protected HashMap radicalDictionary; //## attribute radicalDictionary protected HashMap radicalLibrary; //## attribute radicalLibrary protected HierarchyTree radicalTree; //## attribute radicalTree // begin pey protected HierarchyTree ringTree; protected HashMap ringDictionary; // end pey protected HashMap ringLibrary; //## attribute ringLibrary // Constructors //## operation ThermoGAGroupLibrary() private ThermoGAGroupLibrary() { //#[ operation ThermoGAGroupLibrary() groupTree = new HierarchyTree(); groupDictionary = new HashMap(); groupLibrary = new HashMap(); radicalTree = new HierarchyTree(); radicalDictionary = new HashMap(); radicalLibrary = new HashMap(); ringLibrary = new HashMap(); // begin pey ringDictionary = new HashMap(); ringTree = new HierarchyTree(); // end pey otherLibrary = new HashMap(); otherDictionary = new HashMap(); otherTree = new HierarchyTree(); String directory = System.getProperty("jing.chem.ThermoGAGroupLibrary.pathName"); if (directory == null) { System.out.println("undefined system property: jing.chem.ThermoGAGroupLibrary.pathName, exit!"); System.exit(0); } String separator = System.getProperty("file.separator"); if (!directory.endsWith(separator)) directory = directory + separator; System.out.println("\nReading thermo database from "+directory); String gDictionary = directory + "Group_Dictionary.txt"; String gTree = directory + "Group_Tree.txt"; String gLibrary = directory + "Group_Library.txt"; String rDictionary = directory + "Radical_Dictionary.txt"; String rTree = directory + "Radical_Tree.txt"; String rLibrary = directory + "Radical_Library.txt"; // begin pey //String ring = directory + "Ring_Corrections.txt"; String ringDictionary = directory + "Ring_Dictionary.txt"; String ringTree = directory + "Ring_Tree.txt"; String ringLibrary = directory + "Ring_Library.txt"; // end pey String otherLibrary = directory + "Other_Library_Dictionary.txt"; String otherTree = directory + "Other_Tree.txt"; read(gDictionary,gTree,gLibrary,rDictionary,rTree,rLibrary,ringDictionary,ringTree,ringLibrary,otherLibrary,otherTree); //#] } //## operation findCorrectionInLibrary(ChemGraph,HashMap) private ThermoData findCorrectionInLibrary(ChemGraph p_chemGraph, HashMap p_library) { //#[ operation findCorrectionInLibrary(ChemGraph,HashMap) p_chemGraph.clearCentralNode(); ThermoData result=new ThermoData(); int redundance; Iterator iter = p_library.keySet().iterator(); while (iter.hasNext()) { redundance = 0; FunctionalGroup f = (FunctionalGroup)iter.next(); HashSet gv = p_chemGraph.identifyThermoMatchedSite(f); if (gv != null) { redundance = gv.size(); if (redundance > 0) { ThermoGAValue ga = (ThermoGAValue)p_library.get(f); if (ga != null) { ThermoData temp = new ThermoData(ga); temp.multiply(redundance); result.plus(temp); temp = null; } } } } return result; //#] } /** Requires: the central node of p_chemGraph has been set to the thermo center atom. Effects: find a matched thermo functional group in the group tree for the pass-in p_chemGraph, return this functional group's thermo value. If no leaf is found, throw GroupNotFoundException Modifies: */ //## operation findGAGroup(ChemGraph) public ThermoGAValue findGAGroup(ChemGraph p_chemGraph) throws GroupNotFoundException, MultipleGroupFoundException, InvalidCenterTypeException { //#[ operation findGAGroup(ChemGraph) if (p_chemGraph == null) return null; Stack stack = groupTree.findMatchedPath(p_chemGraph); if (stack == null) return null; while (!stack.empty()) { HierarchyTreeNode node = (HierarchyTreeNode)stack.pop(); Matchable fg = (Matchable)node.getElement(); ThermoGAValue ga = (ThermoGAValue)groupLibrary.get(fg); if (ga != null) return ga; } return null; //#] } //## operation findOtherCorrection(ChemGraph) public ThermoGAValue findOtherCorrection(ChemGraph p_chemGraph) { //#[ operation findOtherCorrection(ChemGraph) if (p_chemGraph == null) return null; Stack stack = otherTree.findMatchedPath(p_chemGraph); if (stack == null) return null; while (!stack.empty()) { HierarchyTreeNode node = (HierarchyTreeNode)stack.pop(); FunctionalGroup fg = (FunctionalGroup)node.getElement(); ThermoGAValue ga = (ThermoGAValue)otherLibrary.get(fg); if (ga != null) return ga; } return null; //#] } //## operation findRadicalGroup(ChemGraph) public ThermoGAValue findRadicalGroup(ChemGraph p_chemGraph) throws InvalidThermoCenterException { //#[ operation findRadicalGroup(ChemGraph) if (p_chemGraph == null) return null; Stack stack = radicalTree.findMatchedPath(p_chemGraph); if (stack == null) return null; while (!stack.empty()) { HierarchyTreeNode node = (HierarchyTreeNode)stack.pop(); Matchable fg = (Matchable)node.getElement(); ThermoGAValue ga = (ThermoGAValue)radicalLibrary.get(fg); if (ga != null) return ga; } return null; //#] } //## operation findRingCorrection(ChemGraph) public ThermoGAValue findRingCorrection(ChemGraph p_chemGraph) { // end pey //#[ operation findRingCorrection(ChemGraph) // begin pey -- read tree instead of library // return findCorrectionInLibrary(p_chemGraph,ringLibrary); if (p_chemGraph == null) return null; // initialize int deepest = -1; Stack deepestStack = new Stack(); deepestStack = null; // iterate through nodes in chemgraph that are in a cycle Iterator iter = p_chemGraph.getNodeList(); while (iter.hasNext()) { Node node = (Node) iter.next(); Atom atom = (Atom)node.getElement(); if (!(atom.getType().equals("H"))) { // waiting on Jing to fix the inCycle issue for radicals that get saturated // if (node.getInCycle()) { // make the current node the central atom p_chemGraph.resetThermoSite(node); // find the match in the thermo tree Stack stack = ringTree.findMatchedPath(p_chemGraph); // check if it's the deepest match if (!stack.empty()) { HierarchyTreeNode htn = (HierarchyTreeNode) stack.peek(); if (htn.getDepth() > deepest) { deepestStack = stack; deepest = htn.getDepth(); } } } } if (deepestStack == null) return null; while (!deepestStack.empty()) { HierarchyTreeNode node = (HierarchyTreeNode)deepestStack.pop(); FunctionalGroup fg = (FunctionalGroup)node.getElement(); ThermoGAValue ga = (ThermoGAValue)ringLibrary.get(fg); if (ga != null) return ga; } return null; // end pey //#] } //## operation read(String,String,String,String,String,String,String,String,String) public void read(String p_groupDictionary, String p_groupTree, String p_groupLibrary, String p_radicalDictionary, String p_radicalTree, String p_radicalLibrary, String p_ringDictionary, String p_ringTree, String p_ringLibrary, String p_otherLibrary, String p_otherTree) { // end pey //#[ operation read(String,String,String,String,String,String,String,String,String) // try { // step 1: read in GA Groups // read thermo functional Group dictionary readGroupDictionary(p_groupDictionary); // read thermo functional Group tree structure readGroupTree(p_groupTree); // read group values readGroupLibrary(p_groupLibrary); // step 2: read in Radical Corrections // read radical dictionary readRadicalDictionary(p_radicalDictionary); // read radical tree readRadicalTree(p_radicalTree); // read radical value readRadicalLibrary(p_radicalLibrary); // step 3: read in Ring Correction // begin pey readRingDictionary(p_ringDictionary); readRingTree(p_ringTree); readRingLibrary(p_ringLibrary); // System.out.println("tree height = " + ringTree.height()); // end pey // step 4: read in Other Correction readOtherLibrary(p_otherLibrary); readOtherTree(p_otherTree); /*} catch (Exception e) { throw new ThermoIOException(e.getMessage()); } */ //#] } //## operation readGroupDictionary(String) public void readGroupDictionary(String p_fileName) { //#[ operation readGroupDictionary(String) try { groupDictionary = readStandardDictionary(p_fileName); return; } catch (Exception e) { System.err.println("Error in read group dictionary!"); System.exit(0); } //#] } //## operation readGroupLibrary(String) public void readGroupLibrary(String p_fileName) { //#[ operation readGroupLibrary(String) try { groupLibrary = readStandardLibrary(p_fileName, groupDictionary); return; } catch (Exception e) { System.err.println("Can't read Group Library!"); System.exit(0); } //#] } //## operation readGroupTree(String) public void readGroupTree(String p_fileName) { //#[ operation readGroupTree(String) try { groupTree = readStandardTree(p_fileName,groupDictionary,0); } catch (Exception e) { System.err.println("Can't read thermo group tree file!"); System.err.println("Error: " + e.getMessage()); System.exit(0); } //#] } //## operation readOtherLibrary(String) public void readOtherLibrary(String p_fileName) { //#[ operation readOtherLibrary(String) try { readStandardCorrectionLibrary(p_fileName, otherLibrary); Iterator iter = otherLibrary.keySet().iterator(); while (iter.hasNext()) { FunctionalGroup fg = (FunctionalGroup)iter.next(); otherDictionary.put(fg.name, fg); } return; } catch (Exception e) { System.err.println("Can't read other correction Library!"); System.err.println("Error: " + e.getClass().getName() + " : " + e.getMessage()); System.exit(0); } //#] } //## operation readOtherTree(String) public void readOtherTree(String p_fileName) { //#[ operation readOtherTree(String) try { otherTree = readStandardTree(p_fileName,otherDictionary,0); } catch (Exception e) { System.err.println("Can't read thermo Other tree file!"); System.err.println("Error: " + e.getMessage()); System.exit(0); } //#] } //## operation readRadicalDictionary(String) public void readRadicalDictionary(String p_fileName) { //#[ operation readRadicalDictionary(String) try { radicalDictionary = readStandardDictionary(p_fileName); return; } catch (Exception e) { System.err.println("Error in read radical dictionary!\n" + e.getMessage()); System.exit(0); } //#] } //## operation readRadicalLibrary(String) public void readRadicalLibrary(String p_fileName) { //#[ operation readRadicalLibrary(String) try { radicalLibrary = readStandardLibrary(p_fileName, radicalDictionary); return; } catch (Exception e) { System.err.println("Can't read radical Library!"); System.exit(0); } //#] } //## operation readRadicalTree(String) public void readRadicalTree(String p_fileName) { //#[ operation readRadicalTree(String) try { radicalTree = readStandardTree(p_fileName,radicalDictionary,0); } catch (Exception e) { System.err.println("Can't read thermo group tree file!"); System.err.println("Error: " + e.getMessage()); System.exit(0); } //#] } //## operation readRingLibrary(String) public void readRingDictionary(String p_fileName) { //#[ operation readRingDictionary(String) try { ringDictionary = readStandardDictionary(p_fileName); return; } catch (Exception e) { System.err.println("Error in read ring dictionary!\n" + e.getMessage()); System.exit(0); } //#] } //## operation readRingTree(String) public void readRingTree(String p_fileName) { //#[ operation readRingTree(String) try { ringTree = readStandardTree(p_fileName,ringDictionary,0); } catch (Exception e) { System.err.println("Can't read ring tree file!"); System.err.println("Error: " + e.getMessage()); System.exit(0); } //#] } // end pey //## operation readRingLibrary(String) public void readRingLibrary(String p_fileName) { //#[ operation readRingLibrary(String) try { // begin pey // readStandardCorrectionLibrary(p_fileName, ringLibrary); ringLibrary = readStandardLibrary(p_fileName, ringDictionary); // end pey return; } catch (Exception e) { System.err.println("Can't read Ring Correction Library!"); // begin pey System.err.println("Error: " + e); System.exit(0); // end pey } //#] } //## operation readStandardCorrectionLibrary(String,HashMap) protected void readStandardCorrectionLibrary(String p_fileName, HashMap p_library) throws IOException { //#[ operation readStandardCorrectionLibrary(String,HashMap) try { FileReader in = new FileReader(p_fileName); BufferedReader data = new BufferedReader(in); String line = ChemParser.readMeaningfulLine(data); while (line != null) { // step 1: read in index and name StringTokenizer token = new StringTokenizer(line); int index = Integer.parseInt(token.nextToken()); String name = token.nextToken(); if (p_library == ringLibrary) { String fomula = token.nextToken(); String sigma = token.nextToken(); } // setp 2: read in thermoGAValue String thermo=""; for (int i=0;i<12;i++) { thermo = thermo.concat(token.nextToken()); thermo = thermo.concat(" "); } ThermoGAValue gaValue = ChemParser.parseThermoGAValue(thermo); String comments = ""; while (token.hasMoreTokens()) { comments = comments + " " + token.nextToken(); } ThermoGAValue newGaValue = new ThermoGAValue(name,gaValue,comments); // step3: read in graph of the functional group Graph g = ChemParser.readFGGraph(data); if (g == null) throw new NullGraphException(); FunctionalGroup fg = FunctionalGroup.make(name, g); // step4: put in library Object previous = p_library.put(fg, newGaValue); if (previous != null) { throw new ReplaceThermoGAValueException(); } line = ChemParser.readMeaningfulLine(data); } in.close(); return; } catch (IOException e) { throw new IOException(); } //#] } //## operation readStandardDictionary(String) public HashMap readStandardDictionary(String p_fileName) throws FileNotFoundException, IOException { //#[ operation readStandardDictionary(String) try { FileReader in = new FileReader(p_fileName); BufferedReader data = new BufferedReader(in); HashMap dictionary = new HashMap(); HashMap unRead = new HashMap(); String line = ChemParser.readMeaningfulLine(data); read: while (line != null) { StringTokenizer st = new StringTokenizer(line); String fgname = st.nextToken(); data.mark(10000); line = ChemParser.readMeaningfulLine(data); if (line == null) break read; line = line.trim(); String prefix = line.substring(0,5); if (prefix.compareToIgnoreCase("union") == 0) { HashSet union = ChemParser.readUnion(line); unRead.put(fgname,union); } else { data.reset(); Graph fgGraph = null; try { fgGraph = ChemParser.readFGGraph(data); } catch (Exception e) { throw new InvalidFunctionalGroupException(fgname + ": " + e.getMessage()); } if (fgGraph == null) throw new InvalidFunctionalGroupException(fgname); FunctionalGroup fg = FunctionalGroup.make(fgname, fgGraph); Object old = dictionary.get(fgname); if (old == null) { dictionary.put(fgname,fg); } else { FunctionalGroup oldFG = (FunctionalGroup)old; if (!oldFG.equals(fg)) throw new ReplaceFunctionalGroupException(fgname); } } //System.out.println(line); line = ChemParser.readMeaningfulLine(data); } while (!unRead.isEmpty()) { String fgname = (String)(unRead.keySet().iterator().next()); ChemParser.findUnion(fgname,unRead,dictionary); } in.close(); return dictionary; } catch (FileNotFoundException e) { throw new FileNotFoundException(p_fileName); } catch (IOException e) { throw new IOException(p_fileName + ": " + e.getMessage()); } //#] } //## operation readStandardLibrary(String,HashMap) protected HashMap readStandardLibrary(String p_fileName, HashMap p_dictionary) throws IOException { //#[ operation readStandardLibrary(String,HashMap) try { FileReader in = new FileReader(p_fileName); BufferedReader data = new BufferedReader(in); HashMap library = new HashMap(); String line = ChemParser.readMeaningfulLine(data); while (line != null) { //System.out.println(line);// // step 1: read in index and name StringTokenizer token = new StringTokenizer(line); - int index = Integer.parseInt(token.nextToken()); + String index = token.nextToken(); //1/6/09 gmagoon changed index from integer to string, so that if/when ChemGreen/RMGVE adds a decimal after the entry number (after editing thermo library), RMG will still be able to read it String name = token.nextToken(); // step 2: find this functional group in dictionary by name Matchable fg = (Matchable)p_dictionary.get(name); if (fg == null) { throw new FunctionalGroupNotFoundException(); //System.out.println(name); } // step 3: read in thermoGAValue String thermo = token.nextToken(); // if there is a set of real thermo numbers, read them in and put the thermo data into library try { double H = Double.parseDouble(thermo); thermo = thermo.concat(" "); for (int i=0;i<11;i++) { thermo = thermo.concat(token.nextToken()); thermo = thermo.concat(" "); } ThermoGAValue gaValue = ChemParser.parseThermoGAValue(thermo); String comments = ""; while (token.hasMoreTokens()) { comments = comments + " " + token.nextToken(); } ThermoGAValue newGaValue=new ThermoGAValue(name,gaValue,comments); // step4: put in library Object previous = library.put(fg, newGaValue); if (previous != null) { throw new ReplaceThermoGAValueException(); } } // if there is a referenced name, put the name into library catch (NumberFormatException e) { Object o = p_dictionary.get(thermo); if (o == null) { //throw new FunctionalGroupNotFoundException(thermo); System.out.print(index); System.out.println(": " + thermo); } Object previous = library.put(fg, thermo); if (previous != null) { throw new ReplaceThermoGAValueException(); } } line = ChemParser.readMeaningfulLine(data); } // scan the library to give the ones having referenced name the real thermo data Iterator iter = library.keySet().iterator(); while (iter.hasNext()) { Matchable fg = (Matchable)iter.next(); Object gaValue = library.get(fg); String path = ""; if (gaValue instanceof String) { do { String name = (String)gaValue; path = path + "->" + name; gaValue = library.get((Matchable)p_dictionary.get(name)); } while (gaValue instanceof String); if (gaValue == null || !(gaValue instanceof ThermoGAValue)) { throw new InvalidReferenceThermoGAValueException(); } ThermoGAValue newGaValue = new ThermoGAValue(fg.getName(),(ThermoGAValue)gaValue, "Use the value of " + path); library.put(fg,newGaValue); } } in.close(); return library; } catch (IOException e) { throw new IOException(); } //#] } //## operation readStandardTree(String,HashMap,int) public HierarchyTree readStandardTree(String p_fileName, HashMap p_dictionary, int p_level) throws IOException { //#[ operation readStandardTree(String,HashMap,int) try { FileReader in = new FileReader(p_fileName); BufferedReader data = new BufferedReader(in); HierarchyTree tree = ChemParser.readHierarchyTree(data,p_dictionary,p_level); in.close(); return tree; } catch (IOException e) { throw new IOException(p_fileName); } //#] } protected static ThermoGAGroupLibrary getINSTANCE() { return INSTANCE; } public HashMap getGroupDictionary() { return groupDictionary; } public HashMap getGroupLibrary() { return groupLibrary; } public void setGroupLibrary(HashMap p_groupLibrary) { groupLibrary = p_groupLibrary; } protected HierarchyTree getGroupTree() { return groupTree; } public HashMap getOtherDictionary() { return otherDictionary; } public void setOtherDictionary(HashMap p_otherDictionary) { otherDictionary = p_otherDictionary; } public HashMap getOtherLibrary() { return otherLibrary; } public HierarchyTree getOtherTree() { return otherTree; } public void setOtherTree(HierarchyTree p_otherTree) { otherTree = p_otherTree; } public HashMap getRadicalDictionary() { return radicalDictionary; } public void setRadicalDictionary(HashMap p_radicalDictionary) { radicalDictionary = p_radicalDictionary; } protected HashMap getRadicalLibrary() { return radicalLibrary; } public HierarchyTree getRadicalTree() { return radicalTree; } public void setRadicalTree(HierarchyTree p_radicalTree) { radicalTree = p_radicalTree; } protected HashMap getRingLibrary() { return ringLibrary; } } /********************************************************************* File Path : RMG\RMG\jing\chem\ThermoGAGroupLibrary.java *********************************************************************/
true
true
protected HashMap readStandardLibrary(String p_fileName, HashMap p_dictionary) throws IOException { //#[ operation readStandardLibrary(String,HashMap) try { FileReader in = new FileReader(p_fileName); BufferedReader data = new BufferedReader(in); HashMap library = new HashMap(); String line = ChemParser.readMeaningfulLine(data); while (line != null) { //System.out.println(line);// // step 1: read in index and name StringTokenizer token = new StringTokenizer(line); int index = Integer.parseInt(token.nextToken()); String name = token.nextToken(); // step 2: find this functional group in dictionary by name Matchable fg = (Matchable)p_dictionary.get(name); if (fg == null) { throw new FunctionalGroupNotFoundException(); //System.out.println(name); } // step 3: read in thermoGAValue String thermo = token.nextToken(); // if there is a set of real thermo numbers, read them in and put the thermo data into library try { double H = Double.parseDouble(thermo); thermo = thermo.concat(" "); for (int i=0;i<11;i++) { thermo = thermo.concat(token.nextToken()); thermo = thermo.concat(" "); } ThermoGAValue gaValue = ChemParser.parseThermoGAValue(thermo); String comments = ""; while (token.hasMoreTokens()) { comments = comments + " " + token.nextToken(); } ThermoGAValue newGaValue=new ThermoGAValue(name,gaValue,comments); // step4: put in library Object previous = library.put(fg, newGaValue); if (previous != null) { throw new ReplaceThermoGAValueException(); } } // if there is a referenced name, put the name into library catch (NumberFormatException e) { Object o = p_dictionary.get(thermo); if (o == null) { //throw new FunctionalGroupNotFoundException(thermo); System.out.print(index); System.out.println(": " + thermo); } Object previous = library.put(fg, thermo); if (previous != null) { throw new ReplaceThermoGAValueException(); } } line = ChemParser.readMeaningfulLine(data); } // scan the library to give the ones having referenced name the real thermo data Iterator iter = library.keySet().iterator(); while (iter.hasNext()) { Matchable fg = (Matchable)iter.next(); Object gaValue = library.get(fg); String path = ""; if (gaValue instanceof String) { do { String name = (String)gaValue; path = path + "->" + name; gaValue = library.get((Matchable)p_dictionary.get(name)); } while (gaValue instanceof String); if (gaValue == null || !(gaValue instanceof ThermoGAValue)) { throw new InvalidReferenceThermoGAValueException(); } ThermoGAValue newGaValue = new ThermoGAValue(fg.getName(),(ThermoGAValue)gaValue, "Use the value of " + path); library.put(fg,newGaValue); } } in.close(); return library; } catch (IOException e) { throw new IOException(); } //#] }
protected HashMap readStandardLibrary(String p_fileName, HashMap p_dictionary) throws IOException { //#[ operation readStandardLibrary(String,HashMap) try { FileReader in = new FileReader(p_fileName); BufferedReader data = new BufferedReader(in); HashMap library = new HashMap(); String line = ChemParser.readMeaningfulLine(data); while (line != null) { //System.out.println(line);// // step 1: read in index and name StringTokenizer token = new StringTokenizer(line); String index = token.nextToken(); //1/6/09 gmagoon changed index from integer to string, so that if/when ChemGreen/RMGVE adds a decimal after the entry number (after editing thermo library), RMG will still be able to read it String name = token.nextToken(); // step 2: find this functional group in dictionary by name Matchable fg = (Matchable)p_dictionary.get(name); if (fg == null) { throw new FunctionalGroupNotFoundException(); //System.out.println(name); } // step 3: read in thermoGAValue String thermo = token.nextToken(); // if there is a set of real thermo numbers, read them in and put the thermo data into library try { double H = Double.parseDouble(thermo); thermo = thermo.concat(" "); for (int i=0;i<11;i++) { thermo = thermo.concat(token.nextToken()); thermo = thermo.concat(" "); } ThermoGAValue gaValue = ChemParser.parseThermoGAValue(thermo); String comments = ""; while (token.hasMoreTokens()) { comments = comments + " " + token.nextToken(); } ThermoGAValue newGaValue=new ThermoGAValue(name,gaValue,comments); // step4: put in library Object previous = library.put(fg, newGaValue); if (previous != null) { throw new ReplaceThermoGAValueException(); } } // if there is a referenced name, put the name into library catch (NumberFormatException e) { Object o = p_dictionary.get(thermo); if (o == null) { //throw new FunctionalGroupNotFoundException(thermo); System.out.print(index); System.out.println(": " + thermo); } Object previous = library.put(fg, thermo); if (previous != null) { throw new ReplaceThermoGAValueException(); } } line = ChemParser.readMeaningfulLine(data); } // scan the library to give the ones having referenced name the real thermo data Iterator iter = library.keySet().iterator(); while (iter.hasNext()) { Matchable fg = (Matchable)iter.next(); Object gaValue = library.get(fg); String path = ""; if (gaValue instanceof String) { do { String name = (String)gaValue; path = path + "->" + name; gaValue = library.get((Matchable)p_dictionary.get(name)); } while (gaValue instanceof String); if (gaValue == null || !(gaValue instanceof ThermoGAValue)) { throw new InvalidReferenceThermoGAValueException(); } ThermoGAValue newGaValue = new ThermoGAValue(fg.getName(),(ThermoGAValue)gaValue, "Use the value of " + path); library.put(fg,newGaValue); } } in.close(); return library; } catch (IOException e) { throw new IOException(); } //#] }
diff --git a/src/com/modcrafting/ultrabans/commands/Kick.java b/src/com/modcrafting/ultrabans/commands/Kick.java index 1566916..3e7e63c 100644 --- a/src/com/modcrafting/ultrabans/commands/Kick.java +++ b/src/com/modcrafting/ultrabans/commands/Kick.java @@ -1,159 +1,159 @@ package com.modcrafting.ultrabans.commands; import java.util.logging.Level; import java.util.logging.Logger; import org.bukkit.ChatColor; import org.bukkit.command.Command; import org.bukkit.command.CommandExecutor; import org.bukkit.command.CommandSender; import org.bukkit.configuration.file.YamlConfiguration; import org.bukkit.entity.Player; import com.modcrafting.ultrabans.UltraBan; public class Kick implements CommandExecutor{ public static final Logger log = Logger.getLogger("Minecraft"); UltraBan plugin; public Kick(UltraBan ultraBan) { this.plugin = ultraBan; } public boolean autoComplete; public String expandName(String p) { int m = 0; String Result = ""; for (int n = 0; n < plugin.getServer().getOnlinePlayers().length; n++) { String str = plugin.getServer().getOnlinePlayers()[n].getName(); if (str.matches("(?i).*" + p + ".*")) { m++; Result = str; if(m==2) { return null; } } if (str.equalsIgnoreCase(p)) return str; } if (m == 1) return Result; if (m > 1) { return null; } if (m < 1) { return p; } return p; } public boolean onCommand(CommandSender sender, Command command, String commandLabel, String[] args) { YamlConfiguration config = (YamlConfiguration) plugin.getConfig(); boolean auth = false; boolean anon = false; Player player = null; String admin = config.getString("defAdminName", "server"); if (sender instanceof Player){ player = (Player)sender; if (plugin.setupPermissions()){ if (plugin.permission.has(player, "ultraban.kick")) auth = true; }else{ if (player.isOp()) auth = true; //defaulting to Op if no vault doesn't take or node } admin = player.getName(); }else{ auth = true; //if sender is not a player - Console } if (!auth){ sender.sendMessage(ChatColor.RED + "You do not have the required permissions."); return true; } // Has enough arguments? if (args.length < 1) return false; String p = args[0].toLowerCase(); if(autoComplete) p = expandName(p); // Reason stuff String reason = config.getString("defReason", "not sure"); boolean broadcast = true; if(args.length > 1){ if(args[1].equalsIgnoreCase("-s")){ broadcast = false; reason = combineSplit(2, args, " "); }else{ if(args[1].equalsIgnoreCase("-a")){ anon = true; reason = combineSplit(2, args, " "); }else{ reason = combineSplit(1, args, " "); } } } if (anon){ admin = config.getString("defAdminName", "server"); } if(p.equals("*")){ if (sender instanceof Player) if (plugin.setupPermissions()){ if (!plugin.permission.has(player, "ultraban.kick.all")) return true; }else{ if (!player.isOp()) return true; } log.log(Level.INFO, "[UltraBan] " + admin + " kicked Everyone Reason: " + reason); Player[] pl = plugin.getServer().getOnlinePlayers(); for (int i=0; i<pl.length; i++){ - if (!plugin.permission.has(pl[i], "ultraban.kick.all")){ + if (pl[i] != player){ String adminMsg = config.getString("messages.kickAllMsg", "Everyone has been kicked by %admin%. Reason: %reason%"); adminMsg = adminMsg.replaceAll("%admin%", admin); adminMsg = adminMsg.replaceAll("%reason%", reason); pl[i].kickPlayer(formatMessage(adminMsg)); } } return true; } if(plugin.autoComplete) p = expandName(p); Player victim = plugin.getServer().getPlayer(p); if(victim == null){ sender.sendMessage(ChatColor.GRAY + "Player must be online!"); return true; } plugin.db.addPlayer(victim.getName(), reason, admin, 0, 3); log.log(Level.INFO, "[UltraBan] " + admin + " kicked player " + victim.getName() + ". Reason: " + reason); String adminMsg = config.getString("messages.kickMsgVictim", "You have been kicked by %admin%. Reason: %reason%"); adminMsg = adminMsg.replaceAll("%admin%", admin); adminMsg = adminMsg.replaceAll("%reason%", reason); victim.kickPlayer(formatMessage(adminMsg)); if(broadcast){ String kickMsgBroadcast = config.getString("messages.kickMsgBroadcast", "%victim% has been kicked by %admin%. Reason: %reason%"); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%admin%", admin); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%victim%", victim.getName()); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%reason%", reason); plugin.getServer().broadcastMessage(formatMessage(kickMsgBroadcast)); }else{ String kickMsgBroadcast = config.getString("messages.kickMsgBroadcast", "%victim% has been kicked by %admin%. Reason: %reason%"); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%admin%", admin); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%victim%", victim.getName()); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%reason%", reason); sender.sendMessage(formatMessage(":S:" + kickMsgBroadcast)); } return true; } public String combineSplit(int startIndex, String[] string, String seperator) { StringBuilder builder = new StringBuilder(); for (int i = startIndex; i < string.length; i++) { builder.append(string[i]); builder.append(seperator); } builder.deleteCharAt(builder.length() - seperator.length()); // remove return builder.toString(); } public String formatMessage(String str){ String funnyChar = new Character((char) 167).toString(); str = str.replaceAll("&", funnyChar); return str; } }
true
true
public boolean onCommand(CommandSender sender, Command command, String commandLabel, String[] args) { YamlConfiguration config = (YamlConfiguration) plugin.getConfig(); boolean auth = false; boolean anon = false; Player player = null; String admin = config.getString("defAdminName", "server"); if (sender instanceof Player){ player = (Player)sender; if (plugin.setupPermissions()){ if (plugin.permission.has(player, "ultraban.kick")) auth = true; }else{ if (player.isOp()) auth = true; //defaulting to Op if no vault doesn't take or node } admin = player.getName(); }else{ auth = true; //if sender is not a player - Console } if (!auth){ sender.sendMessage(ChatColor.RED + "You do not have the required permissions."); return true; } // Has enough arguments? if (args.length < 1) return false; String p = args[0].toLowerCase(); if(autoComplete) p = expandName(p); // Reason stuff String reason = config.getString("defReason", "not sure"); boolean broadcast = true; if(args.length > 1){ if(args[1].equalsIgnoreCase("-s")){ broadcast = false; reason = combineSplit(2, args, " "); }else{ if(args[1].equalsIgnoreCase("-a")){ anon = true; reason = combineSplit(2, args, " "); }else{ reason = combineSplit(1, args, " "); } } } if (anon){ admin = config.getString("defAdminName", "server"); } if(p.equals("*")){ if (sender instanceof Player) if (plugin.setupPermissions()){ if (!plugin.permission.has(player, "ultraban.kick.all")) return true; }else{ if (!player.isOp()) return true; } log.log(Level.INFO, "[UltraBan] " + admin + " kicked Everyone Reason: " + reason); Player[] pl = plugin.getServer().getOnlinePlayers(); for (int i=0; i<pl.length; i++){ if (!plugin.permission.has(pl[i], "ultraban.kick.all")){ String adminMsg = config.getString("messages.kickAllMsg", "Everyone has been kicked by %admin%. Reason: %reason%"); adminMsg = adminMsg.replaceAll("%admin%", admin); adminMsg = adminMsg.replaceAll("%reason%", reason); pl[i].kickPlayer(formatMessage(adminMsg)); } } return true; } if(plugin.autoComplete) p = expandName(p); Player victim = plugin.getServer().getPlayer(p); if(victim == null){ sender.sendMessage(ChatColor.GRAY + "Player must be online!"); return true; } plugin.db.addPlayer(victim.getName(), reason, admin, 0, 3); log.log(Level.INFO, "[UltraBan] " + admin + " kicked player " + victim.getName() + ". Reason: " + reason); String adminMsg = config.getString("messages.kickMsgVictim", "You have been kicked by %admin%. Reason: %reason%"); adminMsg = adminMsg.replaceAll("%admin%", admin); adminMsg = adminMsg.replaceAll("%reason%", reason); victim.kickPlayer(formatMessage(adminMsg)); if(broadcast){ String kickMsgBroadcast = config.getString("messages.kickMsgBroadcast", "%victim% has been kicked by %admin%. Reason: %reason%"); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%admin%", admin); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%victim%", victim.getName()); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%reason%", reason); plugin.getServer().broadcastMessage(formatMessage(kickMsgBroadcast)); }else{ String kickMsgBroadcast = config.getString("messages.kickMsgBroadcast", "%victim% has been kicked by %admin%. Reason: %reason%"); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%admin%", admin); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%victim%", victim.getName()); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%reason%", reason); sender.sendMessage(formatMessage(":S:" + kickMsgBroadcast)); } return true; }
public boolean onCommand(CommandSender sender, Command command, String commandLabel, String[] args) { YamlConfiguration config = (YamlConfiguration) plugin.getConfig(); boolean auth = false; boolean anon = false; Player player = null; String admin = config.getString("defAdminName", "server"); if (sender instanceof Player){ player = (Player)sender; if (plugin.setupPermissions()){ if (plugin.permission.has(player, "ultraban.kick")) auth = true; }else{ if (player.isOp()) auth = true; //defaulting to Op if no vault doesn't take or node } admin = player.getName(); }else{ auth = true; //if sender is not a player - Console } if (!auth){ sender.sendMessage(ChatColor.RED + "You do not have the required permissions."); return true; } // Has enough arguments? if (args.length < 1) return false; String p = args[0].toLowerCase(); if(autoComplete) p = expandName(p); // Reason stuff String reason = config.getString("defReason", "not sure"); boolean broadcast = true; if(args.length > 1){ if(args[1].equalsIgnoreCase("-s")){ broadcast = false; reason = combineSplit(2, args, " "); }else{ if(args[1].equalsIgnoreCase("-a")){ anon = true; reason = combineSplit(2, args, " "); }else{ reason = combineSplit(1, args, " "); } } } if (anon){ admin = config.getString("defAdminName", "server"); } if(p.equals("*")){ if (sender instanceof Player) if (plugin.setupPermissions()){ if (!plugin.permission.has(player, "ultraban.kick.all")) return true; }else{ if (!player.isOp()) return true; } log.log(Level.INFO, "[UltraBan] " + admin + " kicked Everyone Reason: " + reason); Player[] pl = plugin.getServer().getOnlinePlayers(); for (int i=0; i<pl.length; i++){ if (pl[i] != player){ String adminMsg = config.getString("messages.kickAllMsg", "Everyone has been kicked by %admin%. Reason: %reason%"); adminMsg = adminMsg.replaceAll("%admin%", admin); adminMsg = adminMsg.replaceAll("%reason%", reason); pl[i].kickPlayer(formatMessage(adminMsg)); } } return true; } if(plugin.autoComplete) p = expandName(p); Player victim = plugin.getServer().getPlayer(p); if(victim == null){ sender.sendMessage(ChatColor.GRAY + "Player must be online!"); return true; } plugin.db.addPlayer(victim.getName(), reason, admin, 0, 3); log.log(Level.INFO, "[UltraBan] " + admin + " kicked player " + victim.getName() + ". Reason: " + reason); String adminMsg = config.getString("messages.kickMsgVictim", "You have been kicked by %admin%. Reason: %reason%"); adminMsg = adminMsg.replaceAll("%admin%", admin); adminMsg = adminMsg.replaceAll("%reason%", reason); victim.kickPlayer(formatMessage(adminMsg)); if(broadcast){ String kickMsgBroadcast = config.getString("messages.kickMsgBroadcast", "%victim% has been kicked by %admin%. Reason: %reason%"); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%admin%", admin); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%victim%", victim.getName()); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%reason%", reason); plugin.getServer().broadcastMessage(formatMessage(kickMsgBroadcast)); }else{ String kickMsgBroadcast = config.getString("messages.kickMsgBroadcast", "%victim% has been kicked by %admin%. Reason: %reason%"); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%admin%", admin); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%victim%", victim.getName()); kickMsgBroadcast = kickMsgBroadcast.replaceAll("%reason%", reason); sender.sendMessage(formatMessage(":S:" + kickMsgBroadcast)); } return true; }
diff --git a/src/main/java/de/cismet/cids/utils/jasperreports/ReportSwingWorker.java b/src/main/java/de/cismet/cids/utils/jasperreports/ReportSwingWorker.java index 1ded959..8c873c0 100644 --- a/src/main/java/de/cismet/cids/utils/jasperreports/ReportSwingWorker.java +++ b/src/main/java/de/cismet/cids/utils/jasperreports/ReportSwingWorker.java @@ -1,282 +1,282 @@ /*************************************************** * * cismet GmbH, Saarbruecken, Germany * * ... and it just works. * ****************************************************/ package de.cismet.cids.utils.jasperreports; import net.sf.jasperreports.engine.JRDataSource; import net.sf.jasperreports.engine.JRException; import net.sf.jasperreports.engine.JasperExportManager; import net.sf.jasperreports.engine.JasperFillManager; import net.sf.jasperreports.engine.JasperPrint; import net.sf.jasperreports.engine.JasperReport; import net.sf.jasperreports.engine.util.JRLoader; import java.awt.Frame; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.concurrent.ExecutionException; import javax.swing.JOptionPane; import javax.swing.SwingUtilities; import javax.swing.SwingWorker; import de.cismet.cids.dynamics.CidsBean; import de.cismet.tools.BrowserLauncher; import de.cismet.tools.gui.StaticSwingTools; /** * DOCUMENT ME! * * @version $Revision$, $Date$ */ public class ReportSwingWorker extends SwingWorker<Boolean, Object> { //~ Static fields/initializers --------------------------------------------- private static final org.apache.log4j.Logger LOG = org.apache.log4j.Logger.getLogger(ReportSwingWorker.class); //~ Instance fields -------------------------------------------------------- // private Collection<CidsBean> cidsBeans; // private String compiledReport; private final List<Collection<CidsBean>> cidsBeansList; private final List<String> compiledReportList; private final ReportSwingWorkerDialog dialog; private final boolean withDialog; private String directory; //~ Constructors ----------------------------------------------------------- /** * Creates a new ReportSwingWorker object. * * @param cidsBeansList map DOCUMENT ME! * @param compiledReportList DOCUMENT ME! * @param directory DOCUMENT ME! */ public ReportSwingWorker(final List<Collection<CidsBean>> cidsBeansList, final List<String> compiledReportList, final String directory) { this(cidsBeansList, compiledReportList, false, null, directory); } /** * Creates a new ReportSwingWorker object. * * @param cidsBeans DOCUMENT ME! * @param compiledReport DOCUMENT ME! * @param directory DOCUMENT ME! */ public ReportSwingWorker(final Collection<CidsBean> cidsBeans, final String compiledReport, final String directory) { this(cidsBeans, compiledReport, false, null, directory); } /** * Creates a new ReportSwingWorker object. * * @param cidsBeansList DOCUMENT ME! * @param compiledReportList DOCUMENT ME! * @param parent DOCUMENT ME! * @param directory DOCUMENT ME! */ public ReportSwingWorker(final List<Collection<CidsBean>> cidsBeansList, final List<String> compiledReportList, final Frame parent, final String directory) { this(cidsBeansList, compiledReportList, true, parent, directory); } /** * Creates a new ReportSwingWorker object. * * @param cidsBeans DOCUMENT ME! * @param compiledReport DOCUMENT ME! * @param parent DOCUMENT ME! * @param directory DOCUMENT ME! */ public ReportSwingWorker(final Collection<CidsBean> cidsBeans, final String compiledReport, final Frame parent, final String directory) { this(cidsBeans, compiledReport, true, parent, directory); } /** * Creates a new ReportSwingWorker object. * * @param cidsBeansList DOCUMENT ME! * @param compiledReportList DOCUMENT ME! * @param withDialog DOCUMENT ME! * @param parent DOCUMENT ME! * @param directory DOCUMENT ME! */ public ReportSwingWorker(final List<Collection<CidsBean>> cidsBeansList, final List<String> compiledReportList, final boolean withDialog, final Frame parent, final String directory) { this.cidsBeansList = cidsBeansList; this.compiledReportList = compiledReportList; this.withDialog = withDialog; this.directory = directory; if (withDialog) { dialog = new ReportSwingWorkerDialog(parent, true); } else { dialog = null; } } /** * Creates a new ReportSwingWorker object. * * @param cidsBeans DOCUMENT ME! * @param compiledReport DOCUMENT ME! * @param withDialog DOCUMENT ME! * @param parent DOCUMENT ME! * @param directory DOCUMENT ME! */ public ReportSwingWorker(final Collection<CidsBean> cidsBeans, final String compiledReport, final boolean withDialog, final Frame parent, final String directory) { this.cidsBeansList = new ArrayList<Collection<CidsBean>>(); this.cidsBeansList.add(cidsBeans); this.compiledReportList = new ArrayList<String>(); this.compiledReportList.add(compiledReport); this.withDialog = withDialog; this.directory = directory; if (withDialog) { dialog = new ReportSwingWorkerDialog(parent, true); } else { dialog = null; } } //~ Methods ---------------------------------------------------------------- /** * DOCUMENT ME! * * @return DOCUMENT ME! * * @throws Exception DOCUMENT ME! */ @Override protected Boolean doInBackground() throws Exception { if (withDialog) { SwingUtilities.invokeLater(new Runnable() { @Override public void run() { StaticSwingTools.showDialog(dialog); } }); } final ByteArrayOutputStream out = new ByteArrayOutputStream(); FileOutputStream fos = null; try { final List<InputStream> ins = new ArrayList<InputStream>(); for (int index = 0; index < compiledReportList.size(); index++) { final String report = compiledReportList.get(index); final Collection<CidsBean> beans = cidsBeansList.get(index); // report holen final JasperReport jasperReport = (JasperReport)JRLoader.loadObject(ReportSwingWorker.class .getResourceAsStream(report)); // daten vorbereiten final JRDataSource dataSource = new CidsBeanDataSource(beans); // print aus report und daten erzeugen final JasperPrint jasperPrint = JasperFillManager.fillReport(jasperReport, new HashMap(), dataSource); // quer- bzw hochformat übernehmen jasperPrint.setOrientation(jasperReport.getOrientation()); // zum pdfStream exportieren und der streamliste hinzufügen final ByteArrayOutputStream outTmp = new ByteArrayOutputStream(); JasperExportManager.exportReportToPdfStream(jasperPrint, outTmp); ins.add(new ByteArrayInputStream(outTmp.toByteArray())); outTmp.close(); } // pdfStreams zu einem einzelnen pdfStream zusammenfügen ReportHelper.concatPDFs(ins, out, true); // zusammengefügten pdfStream in Datei schreiben File file = new File(directory, "report.pdf"); int index = 0; while (file.exists()) { file = new File(directory, "report" + (++index) + ".pdf"); } file.getParentFile().mkdirs(); fos = new FileOutputStream(file); fos.write(out.toByteArray()); // Datei über Browser öffnen - BrowserLauncher.openURL("file:///" + file); + BrowserLauncher.openURL(file.toURI().toURL().toString()); return true; } catch (IOException ex) { LOG.error("Export to PDF-Stream failed.", ex); } catch (JRException ex) { LOG.error("Export to PDF-Stream failed.", ex); } finally { try { if (out != null) { out.close(); } if (fos != null) { fos.close(); } } catch (IOException ex) { LOG.error("error while closing streams", ex); } } return false; } /** * DOCUMENT ME! */ @Override protected void done() { boolean error = false; try { error = !get(); } catch (InterruptedException ex) { // unterbrochen, nichts tun } catch (ExecutionException ex) { error = true; LOG.error("error while generating report", ex); } if (withDialog) { dialog.setVisible(false); } if (error) { JOptionPane.showMessageDialog( dialog.getParent(), "Beim Generieren des Reports ist ein Fehler aufgetreten.", "Fehler!", JOptionPane.ERROR_MESSAGE); } } }
true
true
protected Boolean doInBackground() throws Exception { if (withDialog) { SwingUtilities.invokeLater(new Runnable() { @Override public void run() { StaticSwingTools.showDialog(dialog); } }); } final ByteArrayOutputStream out = new ByteArrayOutputStream(); FileOutputStream fos = null; try { final List<InputStream> ins = new ArrayList<InputStream>(); for (int index = 0; index < compiledReportList.size(); index++) { final String report = compiledReportList.get(index); final Collection<CidsBean> beans = cidsBeansList.get(index); // report holen final JasperReport jasperReport = (JasperReport)JRLoader.loadObject(ReportSwingWorker.class .getResourceAsStream(report)); // daten vorbereiten final JRDataSource dataSource = new CidsBeanDataSource(beans); // print aus report und daten erzeugen final JasperPrint jasperPrint = JasperFillManager.fillReport(jasperReport, new HashMap(), dataSource); // quer- bzw hochformat übernehmen jasperPrint.setOrientation(jasperReport.getOrientation()); // zum pdfStream exportieren und der streamliste hinzufügen final ByteArrayOutputStream outTmp = new ByteArrayOutputStream(); JasperExportManager.exportReportToPdfStream(jasperPrint, outTmp); ins.add(new ByteArrayInputStream(outTmp.toByteArray())); outTmp.close(); } // pdfStreams zu einem einzelnen pdfStream zusammenfügen ReportHelper.concatPDFs(ins, out, true); // zusammengefügten pdfStream in Datei schreiben File file = new File(directory, "report.pdf"); int index = 0; while (file.exists()) { file = new File(directory, "report" + (++index) + ".pdf"); } file.getParentFile().mkdirs(); fos = new FileOutputStream(file); fos.write(out.toByteArray()); // Datei über Browser öffnen BrowserLauncher.openURL("file:///" + file); return true; } catch (IOException ex) { LOG.error("Export to PDF-Stream failed.", ex); } catch (JRException ex) { LOG.error("Export to PDF-Stream failed.", ex); } finally { try { if (out != null) { out.close(); } if (fos != null) { fos.close(); } } catch (IOException ex) { LOG.error("error while closing streams", ex); } } return false; }
protected Boolean doInBackground() throws Exception { if (withDialog) { SwingUtilities.invokeLater(new Runnable() { @Override public void run() { StaticSwingTools.showDialog(dialog); } }); } final ByteArrayOutputStream out = new ByteArrayOutputStream(); FileOutputStream fos = null; try { final List<InputStream> ins = new ArrayList<InputStream>(); for (int index = 0; index < compiledReportList.size(); index++) { final String report = compiledReportList.get(index); final Collection<CidsBean> beans = cidsBeansList.get(index); // report holen final JasperReport jasperReport = (JasperReport)JRLoader.loadObject(ReportSwingWorker.class .getResourceAsStream(report)); // daten vorbereiten final JRDataSource dataSource = new CidsBeanDataSource(beans); // print aus report und daten erzeugen final JasperPrint jasperPrint = JasperFillManager.fillReport(jasperReport, new HashMap(), dataSource); // quer- bzw hochformat übernehmen jasperPrint.setOrientation(jasperReport.getOrientation()); // zum pdfStream exportieren und der streamliste hinzufügen final ByteArrayOutputStream outTmp = new ByteArrayOutputStream(); JasperExportManager.exportReportToPdfStream(jasperPrint, outTmp); ins.add(new ByteArrayInputStream(outTmp.toByteArray())); outTmp.close(); } // pdfStreams zu einem einzelnen pdfStream zusammenfügen ReportHelper.concatPDFs(ins, out, true); // zusammengefügten pdfStream in Datei schreiben File file = new File(directory, "report.pdf"); int index = 0; while (file.exists()) { file = new File(directory, "report" + (++index) + ".pdf"); } file.getParentFile().mkdirs(); fos = new FileOutputStream(file); fos.write(out.toByteArray()); // Datei über Browser öffnen BrowserLauncher.openURL(file.toURI().toURL().toString()); return true; } catch (IOException ex) { LOG.error("Export to PDF-Stream failed.", ex); } catch (JRException ex) { LOG.error("Export to PDF-Stream failed.", ex); } finally { try { if (out != null) { out.close(); } if (fos != null) { fos.close(); } } catch (IOException ex) { LOG.error("error while closing streams", ex); } } return false; }
diff --git a/luni/src/main/java/org/apache/harmony/xnet/provider/jsse/OpenSSLSocketImpl.java b/luni/src/main/java/org/apache/harmony/xnet/provider/jsse/OpenSSLSocketImpl.java index f37282e7c..ad21f7b36 100644 --- a/luni/src/main/java/org/apache/harmony/xnet/provider/jsse/OpenSSLSocketImpl.java +++ b/luni/src/main/java/org/apache/harmony/xnet/provider/jsse/OpenSSLSocketImpl.java @@ -1,1277 +1,1277 @@ /* * Copyright (C) 2007 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.harmony.xnet.provider.jsse; import dalvik.system.BlockGuard; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.InetAddress; import java.net.Socket; import java.net.SocketException; import java.security.PrivateKey; import java.security.SecureRandom; import java.security.cert.CertificateEncodingException; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.ArrayList; import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Logger; import javax.net.ssl.HandshakeCompletedEvent; import javax.net.ssl.HandshakeCompletedListener; import javax.net.ssl.SSLException; import javax.net.ssl.SSLPeerUnverifiedException; import javax.net.ssl.SSLSession; import javax.security.auth.x500.X500Principal; import org.apache.harmony.security.provider.cert.X509CertImpl; /** * Implementation of the class OpenSSLSocketImpl based on OpenSSL. * <p> * This class only supports SSLv3 and TLSv1. This should be documented elsewhere * later, for example in the package.html or a separate reference document. * <p> * Extensions to SSLSocket include: * <ul> * <li>handshake timeout * <li>compression methods * <li>session tickets * <li>Server Name Indication * </ul> */ public class OpenSSLSocketImpl extends javax.net.ssl.SSLSocket implements NativeCrypto.SSLHandshakeCallbacks { private int sslNativePointer; private InputStream is; private OutputStream os; private final Object handshakeLock = new Object(); private final Object readLock = new Object(); private final Object writeLock = new Object(); private SSLParametersImpl sslParameters; private String[] enabledProtocols; private String[] enabledCipherSuites; private String[] enabledCompressionMethods; private boolean useSessionTickets; private String hostname; private OpenSSLSessionImpl sslSession; private Socket socket; private boolean autoClose; private boolean handshakeStarted = false; /** * Not set to true until the update from native that tells us the * full handshake is complete, since SSL_do_handshake can return * before the handshake is completely done due to * handshake_cutthrough support. */ private boolean handshakeCompleted = false; private ArrayList<HandshakeCompletedListener> listeners; /** * Local cache of timeout to avoid getsockopt on every read and * write for non-wrapped sockets. Note that * OpenSSLSocketImplWrapper overrides setSoTimeout and * getSoTimeout to delegate to the wrapped socket. */ private int timeoutMilliseconds = 0; // BEGIN android-added private int handshakeTimeoutMilliseconds = -1; // -1 = same as timeout; 0 = infinite // END android-added private String wrappedHost; private int wrappedPort; private static final AtomicInteger instanceCount = new AtomicInteger(0); public static int getInstanceCount() { return instanceCount.get(); } private static void updateInstanceCount(int amount) { instanceCount.addAndGet(amount); } /** * Class constructor with 1 parameter * * @param sslParameters Parameters for the SSL * context * @throws IOException if network fails */ protected OpenSSLSocketImpl(SSLParametersImpl sslParameters) throws IOException { super(); init(sslParameters); } /** * Create an OpenSSLSocketImpl from an OpenSSLServerSocketImpl * * @param sslParameters Parameters for the SSL * context * @throws IOException if network fails */ protected OpenSSLSocketImpl(SSLParametersImpl sslParameters, String[] enabledProtocols, String[] enabledCipherSuites, String[] enabledCompressionMethods) throws IOException { super(); init(sslParameters, enabledProtocols, enabledCipherSuites, enabledCompressionMethods); } /** * Class constructor with 3 parameters * * @throws IOException if network fails * @throws java.net.UnknownHostException host not defined */ protected OpenSSLSocketImpl(String host, int port, SSLParametersImpl sslParameters) throws IOException { super(host, port); init(sslParameters); } /** * Class constructor with 3 parameters: 1st is InetAddress * * @throws IOException if network fails * @throws java.net.UnknownHostException host not defined */ protected OpenSSLSocketImpl(InetAddress address, int port, SSLParametersImpl sslParameters) throws IOException { super(address, port); init(sslParameters); } /** * Class constructor with 5 parameters: 1st is host * * @throws IOException if network fails * @throws java.net.UnknownHostException host not defined */ protected OpenSSLSocketImpl(String host, int port, InetAddress clientAddress, int clientPort, SSLParametersImpl sslParameters) throws IOException { super(host, port, clientAddress, clientPort); init(sslParameters); } /** * Class constructor with 5 parameters: 1st is InetAddress * * @throws IOException if network fails * @throws java.net.UnknownHostException host not defined */ protected OpenSSLSocketImpl(InetAddress address, int port, InetAddress clientAddress, int clientPort, SSLParametersImpl sslParameters) throws IOException { super(address, port, clientAddress, clientPort); init(sslParameters); } /** * Constructor with 5 parameters: 1st is socket. Enhances an existing socket * with SSL functionality. Invoked via OpenSSLSocketImplWrapper constructor. * * @throws IOException if network fails */ protected OpenSSLSocketImpl(Socket socket, String host, int port, boolean autoClose, SSLParametersImpl sslParameters) throws IOException { super(); this.socket = socket; this.wrappedHost = host; this.wrappedPort = port; this.autoClose = autoClose; init(sslParameters); // this.timeout is not set intentionally. // OpenSSLSocketImplWrapper.getSoTimeout will delegate timeout // to wrapped socket } /** * Initialize the SSL socket and set the certificates for the * future handshaking. */ private void init(SSLParametersImpl sslParameters) throws IOException { init(sslParameters, NativeCrypto.getSupportedProtocols(), NativeCrypto.getDefaultCipherSuites(), NativeCrypto.getDefaultCompressionMethods()); } /** * Initialize the SSL socket and set the certificates for the * future handshaking. */ private void init(SSLParametersImpl sslParameters, String[] enabledProtocols, String[] enabledCipherSuites, String[] enabledCompressionMethods) throws IOException { this.sslParameters = sslParameters; this.enabledProtocols = enabledProtocols; this.enabledCipherSuites = enabledCipherSuites; this.enabledCompressionMethods = enabledCompressionMethods; updateInstanceCount(1); } /** * Gets the suitable session reference from the session cache container. * * @return OpenSSLSessionImpl */ private OpenSSLSessionImpl getCachedClientSession(ClientSessionContext sessionContext) { if (super.getInetAddress() == null || super.getInetAddress().getHostAddress() == null || super.getInetAddress().getHostName() == null) { return null; } OpenSSLSessionImpl session = (OpenSSLSessionImpl) sessionContext.getSession( super.getInetAddress().getHostName(), super.getPort()); if (session == null) { return null; } String protocol = session.getProtocol(); boolean protocolFound = false; for (String enabledProtocol : enabledProtocols) { if (protocol.equals(enabledProtocol)) { protocolFound = true; break; } } if (!protocolFound) { return null; } String cipherSuite = session.getCipherSuite(); boolean cipherSuiteFound = false; for (String enabledCipherSuite : enabledCipherSuites) { if (cipherSuite.equals(enabledCipherSuite)) { cipherSuiteFound = true; break; } } if (!cipherSuiteFound) { return null; } String compressionMethod = session.getCompressionMethod(); boolean compressionMethodFound = false; for (String enabledCompressionMethod : enabledCompressionMethods) { if (compressionMethod.equals(enabledCompressionMethod)) { compressionMethodFound = true; break; } } if (!compressionMethodFound) { return null; } return session; } /** * Ensures that logger is lazily loaded. The outer class seems to load * before logging is ready. */ static class LoggerHolder { static final Logger logger = Logger.getLogger(OpenSSLSocketImpl.class.getName()); } /** * Starts a TLS/SSL handshake on this connection using some native methods * from the OpenSSL library. It can negotiate new encryption keys, change * cipher suites, or initiate a new session. The certificate chain is * verified if the correspondent property in java.Security is set. All * listeners are notified at the end of the TLS/SSL handshake. * * @throws <code>IOException</code> if network fails */ @Override public void startHandshake() throws IOException { startHandshake(true); } /** * Checks whether the socket is closed, and throws an exception. * * @throws SocketException * if the socket is closed. */ private void checkOpen() throws SocketException { if (isClosed()) { throw new SocketException("Socket is closed"); } } /** * Perform the handshake * @param full If true, disable handshake cutthrough for a fully synchronous handshake */ public synchronized void startHandshake(boolean full) throws IOException { checkOpen(); synchronized (handshakeLock) { if (!handshakeStarted) { handshakeStarted = true; } else { return; } } // note that this modifies the global seed, not something specific to the connection final int seedLengthInBytes = NativeCrypto.RAND_SEED_LENGTH_IN_BYTES; final SecureRandom secureRandom = sslParameters.getSecureRandomMember(); if (secureRandom == null) { NativeCrypto.RAND_load_file("/dev/urandom", seedLengthInBytes); } else { NativeCrypto.RAND_seed(secureRandom.generateSeed(seedLengthInBytes)); } final boolean client = sslParameters.getUseClientMode(); final int sslCtxNativePointer = (client) ? sslParameters.getClientSessionContext().sslCtxNativePointer : sslParameters.getServerSessionContext().sslCtxNativePointer; this.sslNativePointer = NativeCrypto.SSL_new(sslCtxNativePointer); // setup server certificates and private keys. // clients will receive a call back to request certificates. if (!client) { for (String keyType : NativeCrypto.KEY_TYPES) { try { setCertificate(sslParameters.getKeyManager().chooseServerAlias(keyType, null, this)); } catch (CertificateEncodingException e) { throw new IOException(e); } } } NativeCrypto.setEnabledProtocols(sslNativePointer, enabledProtocols); NativeCrypto.setEnabledCipherSuites(sslNativePointer, enabledCipherSuites); if (enabledCompressionMethods.length != 0) { NativeCrypto.setEnabledCompressionMethods(sslNativePointer, enabledCompressionMethods); } if (useSessionTickets) { NativeCrypto.SSL_clear_options(sslNativePointer, NativeCrypto.SSL_OP_NO_TICKET); } if (hostname != null) { NativeCrypto.SSL_set_tlsext_host_name(sslNativePointer, hostname); } boolean enableSessionCreation = sslParameters.getEnableSessionCreation(); if (!enableSessionCreation) { NativeCrypto.SSL_set_session_creation_enabled(sslNativePointer, enableSessionCreation); } AbstractSessionContext sessionContext; OpenSSLSessionImpl session; if (client) { // look for client session to reuse ClientSessionContext clientSessionContext = sslParameters.getClientSessionContext(); sessionContext = clientSessionContext; session = getCachedClientSession(clientSessionContext); if (session != null) { NativeCrypto.SSL_set_session(sslNativePointer, session.sslSessionNativePointer); } } else { sessionContext = sslParameters.getServerSessionContext(); session = null; } // setup peer certificate verification if (client) { // TODO support for anonymous cipher would require us to // conditionally use SSL_VERIFY_NONE } else { // needing client auth takes priority... boolean certRequested = false; if (sslParameters.getNeedClientAuth()) { NativeCrypto.SSL_set_verify(sslNativePointer, NativeCrypto.SSL_VERIFY_PEER | NativeCrypto.SSL_VERIFY_FAIL_IF_NO_PEER_CERT); certRequested = true; // ... over just wanting it... } else if (sslParameters.getWantClientAuth()) { NativeCrypto.SSL_set_verify(sslNativePointer, NativeCrypto.SSL_VERIFY_PEER); certRequested = true; // ... and it defaults properly so we don't need call SSL_set_verify in the common case. } else { certRequested = false; } if (certRequested) { X509Certificate[] issuers = sslParameters.getTrustManager().getAcceptedIssuers(); - if (issuers != null) { + if (issuers != null && issuers.length != 0) { byte[][] issuersBytes; try { issuersBytes = NativeCrypto.encodeIssuerX509Principals(issuers); } catch (CertificateEncodingException e) { throw new IOException("Problem encoding principals", e); } NativeCrypto.SSL_set_client_CA_list(sslNativePointer, issuersBytes); } } } if (client && full) { // we want to do a full synchronous handshake, so turn off cutthrough NativeCrypto.SSL_clear_mode(sslNativePointer, NativeCrypto.SSL_MODE_HANDSHAKE_CUTTHROUGH); } // BEGIN android-added // Temporarily use a different timeout for the handshake process int savedTimeoutMilliseconds = getSoTimeout(); if (handshakeTimeoutMilliseconds >= 0) { setSoTimeout(handshakeTimeoutMilliseconds); } // END android-added Socket socket = this.socket != null ? this.socket : this; int sslSessionNativePointer; try { sslSessionNativePointer = NativeCrypto.SSL_do_handshake(sslNativePointer, socket, this, getSoTimeout(), client); } catch (CertificateException e) { throw new SSLPeerUnverifiedException(e.getMessage()); } byte[] sessionId = NativeCrypto.SSL_SESSION_session_id(sslSessionNativePointer); sslSession = (OpenSSLSessionImpl) sessionContext.getSession(sessionId); if (sslSession != null) { sslSession.lastAccessedTime = System.currentTimeMillis(); LoggerHolder.logger.fine("Reused cached session for " + getInetAddress() + "."); NativeCrypto.SSL_SESSION_free(sslSessionNativePointer); } else { if (!enableSessionCreation) { // Should have been prevented by NativeCrypto.SSL_set_session_creation_enabled throw new IllegalStateException("SSL Session may not be created"); } X509Certificate[] localCertificates = createCertChain(NativeCrypto.SSL_get_certificate(sslNativePointer)); X509Certificate[] peerCertificates = createCertChain(NativeCrypto.SSL_get_peer_cert_chain(sslNativePointer)); if (wrappedHost == null) { sslSession = new OpenSSLSessionImpl(sslSessionNativePointer, localCertificates, peerCertificates, super.getInetAddress().getHostName(), super.getPort(), sessionContext); } else { sslSession = new OpenSSLSessionImpl(sslSessionNativePointer, localCertificates, peerCertificates, wrappedHost, wrappedPort, sessionContext); } // if not, putSession later in handshakeCompleted() callback if (handshakeCompleted) { sessionContext.putSession(sslSession); } LoggerHolder.logger.fine("Created new session for " + getInetAddress().getHostName() + "."); } // BEGIN android-added // Restore the original timeout now that the handshake is complete if (handshakeTimeoutMilliseconds >= 0) { setSoTimeout(savedTimeoutMilliseconds); } // END android-added // if not, notifyHandshakeCompletedListeners later in handshakeCompleted() callback if (handshakeCompleted) { notifyHandshakeCompletedListeners(); } } /** * Return a possibly null array of X509Certificates given the * possibly null array of DER encoded bytes. */ private static final X509Certificate[] createCertChain(byte[][] certificatesBytes) { if (certificatesBytes == null) { return null; } X509Certificate[] certificates = new X509Certificate[certificatesBytes.length]; for (int i = 0; i < certificatesBytes.length; i++) { try { certificates[i] = new X509CertImpl(certificatesBytes[i]); } catch (IOException e) { return null; } } return certificates; } private void setCertificate(String alias) throws CertificateEncodingException, SSLException { if (alias == null) { return; } PrivateKey privateKey = sslParameters.getKeyManager().getPrivateKey(alias); byte[] privateKeyBytes = privateKey.getEncoded(); NativeCrypto.SSL_use_PrivateKey(sslNativePointer, privateKeyBytes); X509Certificate[] certificates = sslParameters.getKeyManager().getCertificateChain(alias); byte[][] certificateBytes = NativeCrypto.encodeCertificates(certificates); NativeCrypto.SSL_use_certificate(sslNativePointer, certificateBytes); // checks the last installed private key and certificate, // so need to do this once per loop iteration NativeCrypto.SSL_check_private_key(sslNativePointer); } /** * Implementation of NativeCrypto.SSLHandshakeCallbacks * invoked via JNI from client_cert_cb */ public void clientCertificateRequested(byte[] keyTypeBytes, byte[][] asn1DerEncodedPrincipals) throws CertificateEncodingException, SSLException { String[] keyTypes = new String[keyTypeBytes.length]; for (int i = 0; i < keyTypeBytes.length; i++) { keyTypes[i] = NativeCrypto.keyType(keyTypeBytes[i]); } X500Principal[] issuers; if (asn1DerEncodedPrincipals == null) { issuers = null; } else { issuers = new X500Principal[asn1DerEncodedPrincipals.length]; for (int i = 0; i < asn1DerEncodedPrincipals.length; i++) { issuers[i] = new X500Principal(asn1DerEncodedPrincipals[i]); } } setCertificate(sslParameters.getKeyManager().chooseClientAlias(keyTypes, issuers, this)); } /** * Implementation of NativeCrypto.SSLHandshakeCallbacks * invoked via JNI from info_callback */ public void handshakeCompleted() { handshakeCompleted = true; // If sslSession is null, the handshake was completed during // the call to NativeCrypto.SSL_do_handshake and not during a // later read operation. That means we do not need to fixup // the SSLSession and session cache or notify // HandshakeCompletedListeners, it will be done in // startHandshake. if (sslSession == null) { return; } // reset session id from the native pointer and update the // appropriate cache. sslSession.resetId(); AbstractSessionContext sessionContext = (sslParameters.getUseClientMode()) ? sslParameters.getClientSessionContext() : sslParameters.getServerSessionContext(); sessionContext.putSession(sslSession); // let listeners know we are finally done notifyHandshakeCompletedListeners(); } private void notifyHandshakeCompletedListeners() { if (listeners != null && !listeners.isEmpty()) { // notify the listeners HandshakeCompletedEvent event = new HandshakeCompletedEvent(this, sslSession); for (HandshakeCompletedListener listener : listeners) { try { listener.handshakeCompleted(event); } catch (RuntimeException e) { // The RI runs the handlers in a separate thread, // which we do not. But we try to preserve their // behavior of logging a problem and not killing // the handshaking thread just because a listener // has a problem. Thread thread = Thread.currentThread(); thread.getUncaughtExceptionHandler().uncaughtException(thread, e); } } } } /** * Implementation of NativeCrypto.SSLHandshakeCallbacks * * @param bytes An array of ASN.1 DER encoded certficates * @param authMethod auth algorithm name * * @throws CertificateException if the certificate is untrusted */ @SuppressWarnings("unused") public void verifyCertificateChain(byte[][] bytes, String authMethod) throws CertificateException { try { if (bytes == null || bytes.length == 0) { throw new SSLException("Peer sent no certificate"); } X509Certificate[] peerCertificateChain = new X509Certificate[bytes.length]; for (int i = 0; i < bytes.length; i++) { peerCertificateChain[i] = new X509CertImpl( javax.security.cert.X509Certificate.getInstance(bytes[i]).getEncoded()); } boolean client = sslParameters.getUseClientMode(); if (client) { sslParameters.getTrustManager().checkServerTrusted(peerCertificateChain, authMethod); } else { sslParameters.getTrustManager().checkClientTrusted(peerCertificateChain, authMethod); } } catch (CertificateException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } } /** * Returns an input stream for this SSL socket using native calls to the * OpenSSL library. * * @return: an input stream for reading bytes from this socket. * @throws: <code>IOException</code> if an I/O error occurs when creating * the input stream, the socket is closed, the socket is not * connected, or the socket input has been shutdown. */ @Override public InputStream getInputStream() throws IOException { checkOpen(); synchronized (this) { if (is == null) { is = new SSLInputStream(); } return is; } } /** * Returns an output stream for this SSL socket using native calls to the * OpenSSL library. * * @return an output stream for writing bytes to this socket. * @throws <code>IOException</code> if an I/O error occurs when creating * the output stream, or no connection to the socket exists. */ @Override public OutputStream getOutputStream() throws IOException { checkOpen(); synchronized (this) { if (os == null) { os = new SSLOutputStream(); } return os; } } /** * This method is not supported for this SSLSocket implementation * because reading from an SSLSocket may involve writing to the * network. */ @Override public void shutdownInput() throws IOException { throw new UnsupportedOperationException(); } /** * This method is not supported for this SSLSocket implementation * because writing to an SSLSocket may involve reading from the * network. */ @Override public void shutdownOutput() throws IOException { throw new UnsupportedOperationException(); } /** * This inner class provides input data stream functionality * for the OpenSSL native implementation. It is used to * read data received via SSL protocol. */ private class SSLInputStream extends InputStream { SSLInputStream() throws IOException { /** /* Note: When startHandshake() throws an exception, no * SSLInputStream object will be created. */ OpenSSLSocketImpl.this.startHandshake(false); } /** * Reads one byte. If there is no data in the underlying buffer, * this operation can block until the data will be * available. * @return read value. * @throws <code>IOException</code> */ @Override public int read() throws IOException { checkOpen(); BlockGuard.getThreadPolicy().onNetwork(); synchronized (readLock) { return NativeCrypto.SSL_read_byte(sslNativePointer, getSoTimeout()); } } /** * Method acts as described in spec for superclass. * @see java.io.InputStream#read(byte[],int,int) */ @Override public int read(byte[] b, int off, int len) throws IOException { checkOpen(); BlockGuard.getThreadPolicy().onNetwork(); if (b == null) { throw new NullPointerException("b == null"); } if ((len | off) < 0 || len > b.length - off) { throw new IndexOutOfBoundsException(); } if (0 == len) { return 0; } synchronized (readLock) { return NativeCrypto.SSL_read(sslNativePointer, b, off, len, getSoTimeout()); } } } /** * This inner class provides output data stream functionality * for the OpenSSL native implementation. It is used to * write data according to the encryption parameters given in SSL context. */ private class SSLOutputStream extends OutputStream { SSLOutputStream() throws IOException { /** /* Note: When startHandshake() throws an exception, no * SSLOutputStream object will be created. */ OpenSSLSocketImpl.this.startHandshake(false); } /** * Method acts as described in spec for superclass. * @see java.io.OutputStream#write(int) */ @Override public void write(int b) throws IOException { checkOpen(); BlockGuard.getThreadPolicy().onNetwork(); synchronized (writeLock) { NativeCrypto.SSL_write_byte(sslNativePointer, b); } } /** * Method acts as described in spec for superclass. * @see java.io.OutputStream#write(byte[],int,int) */ @Override public void write(byte[] b, int start, int len) throws IOException { checkOpen(); BlockGuard.getThreadPolicy().onNetwork(); if (b == null) { throw new NullPointerException("b == null"); } if ((len | start) < 0 || len > b.length - start) { throw new IndexOutOfBoundsException(); } if (len == 0) { return; } synchronized (writeLock) { NativeCrypto.SSL_write(sslNativePointer, b, start, len); } } } /** * The SSL session used by this connection is returned. The SSL session * determines which cipher suite should be used by all connections within * that session and which identities have the session's client and server. * This method starts the SSL handshake. * @return the SSLSession. * @throws <code>IOException</code> if the handshake fails */ @Override public SSLSession getSession() { if (sslSession == null) { try { startHandshake(true); } catch (IOException e) { // return an invalid session with // invalid cipher suite of "SSL_NULL_WITH_NULL_NULL" return SSLSessionImpl.NULL_SESSION; } } return sslSession; } /** * Registers a listener to be notified that a SSL handshake * was successfully completed on this connection. * @throws <code>IllegalArgumentException</code> if listener is null. */ @Override public void addHandshakeCompletedListener( HandshakeCompletedListener listener) { if (listener == null) { throw new IllegalArgumentException("Provided listener is null"); } if (listeners == null) { listeners = new ArrayList(); } listeners.add(listener); } /** * The method removes a registered listener. * @throws IllegalArgumentException if listener is null or not registered */ @Override public void removeHandshakeCompletedListener( HandshakeCompletedListener listener) { if (listener == null) { throw new IllegalArgumentException("Provided listener is null"); } if (listeners == null) { throw new IllegalArgumentException( "Provided listener is not registered"); } if (!listeners.remove(listener)) { throw new IllegalArgumentException( "Provided listener is not registered"); } } /** * Returns true if new SSL sessions may be established by this socket. * * @return true if the session may be created; false if a session already * exists and must be resumed. */ @Override public boolean getEnableSessionCreation() { return sslParameters.getEnableSessionCreation(); } /** * Set a flag for the socket to inhibit or to allow the creation of a new * SSL sessions. If the flag is set to false, and there are no actual * sessions to resume, then there will be no successful handshaking. * * @param flag true if session may be created; false * if a session already exists and must be resumed. */ @Override public void setEnableSessionCreation(boolean flag) { sslParameters.setEnableSessionCreation(flag); } /** * The names of the cipher suites which could be used by the SSL connection * are returned. * @return an array of cipher suite names */ @Override public String[] getSupportedCipherSuites() { return NativeCrypto.getSupportedCipherSuites(); } /** * The names of the cipher suites that are in use in the actual the SSL * connection are returned. * * @return an array of cipher suite names */ @Override public String[] getEnabledCipherSuites() { return enabledCipherSuites.clone(); } /** * This method enables the cipher suites listed by * getSupportedCipherSuites(). * * @param suites names of all the cipher suites to * put on use * @throws IllegalArgumentException when one or more of the * ciphers in array suites are not supported, or when the array * is null. */ @Override public void setEnabledCipherSuites(String[] suites) { enabledCipherSuites = NativeCrypto.checkEnabledCipherSuites(suites); } /** * The names of the protocols' versions that may be used on this SSL * connection. * @return an array of protocols names */ @Override public String[] getSupportedProtocols() { return NativeCrypto.getSupportedProtocols(); } /** * The names of the protocols' versions that are in use on this SSL * connection. * * @return an array of protocols names */ @Override public String[] getEnabledProtocols() { return enabledProtocols.clone(); } /** * This method enables the protocols' versions listed by * getSupportedProtocols(). * * @param protocols The names of all the protocols to allow * * @throws IllegalArgumentException when one or more of the names in the * array are not supported, or when the array is null. */ @Override public void setEnabledProtocols(String[] protocols) { enabledProtocols = NativeCrypto.checkEnabledProtocols(protocols); } /** * The names of the compression methods that may be used on this SSL * connection. * @return an array of compression methods */ public String[] getSupportedCompressionMethods() { return NativeCrypto.getSupportedCompressionMethods(); } /** * The names of the compression methods versions that are in use * on this SSL connection. * * @return an array of compression methods */ public String[] getEnabledCompressionMethods() { return enabledCompressionMethods.clone(); } /** * This method enables the compression method listed by * getSupportedCompressionMethods(). * * @param methods The names of all the compression methods to allow * * @throws IllegalArgumentException when one or more of the names in the * array are not supported, or when the array is null. */ public void setEnabledCompressionMethods (String[] methods) { enabledCompressionMethods = NativeCrypto.checkEnabledCompressionMethods(methods); } /** * This method enables session ticket support. * * @param useSessionTickets True to enable session tickets */ public void setUseSessionTickets(boolean useSessionTickets) { this.useSessionTickets = useSessionTickets; } /** * This method gives true back if the SSL socket is set to client mode. * * @return true if the socket should do the handshaking as client. */ public boolean getUseSessionTickets() { return useSessionTickets; } /** * This method enables Server Name Indication * * @param hostname the desired SNI hostname, or null to disable */ public void setHostname(String hostname) { this.hostname = hostname; } /** * This method returns the current SNI hostname * * @return a host name if SNI is enabled, or null otherwise */ public String getHostname() { return hostname; } /** * This method gives true back if the SSL socket is set to client mode. * * @return true if the socket should do the handshaking as client. */ public boolean getUseClientMode() { return sslParameters.getUseClientMode(); } /** * This method set the actual SSL socket to client mode. * * @param mode true if the socket starts in client * mode * @throws IllegalArgumentException if mode changes during * handshake. */ @Override public void setUseClientMode(boolean mode) { if (handshakeStarted) { throw new IllegalArgumentException( "Could not change the mode after the initial handshake has begun."); } sslParameters.setUseClientMode(mode); } /** * Returns true if the SSL socket requests client's authentication. Relevant * only for server sockets! * * @return true if client authentication is desired, false if not. */ @Override public boolean getWantClientAuth() { return sslParameters.getWantClientAuth(); } /** * Returns true if the SSL socket needs client's authentication. Relevant * only for server sockets! * * @return true if client authentication is desired, false if not. */ @Override public boolean getNeedClientAuth() { return sslParameters.getNeedClientAuth(); } /** * Sets the SSL socket to use client's authentication. Relevant only for * server sockets! * * @param need true if client authentication is * desired, false if not. */ @Override public void setNeedClientAuth(boolean need) { sslParameters.setNeedClientAuth(need); } /** * Sets the SSL socket to use client's authentication. Relevant only for * server sockets! Notice that in contrast to setNeedClientAuth(..) this * method will continue the negotiation if the client decide not to send * authentication credentials. * * @param want true if client authentication is * desired, false if not. */ @Override public void setWantClientAuth(boolean want) { sslParameters.setWantClientAuth(want); } /** * This method is not supported for SSLSocket implementation. */ @Override public void sendUrgentData(int data) throws IOException { throw new SocketException( "Method sendUrgentData() is not supported."); } /** * This method is not supported for SSLSocket implementation. */ @Override public void setOOBInline(boolean on) throws SocketException { throw new SocketException( "Methods sendUrgentData, setOOBInline are not supported."); } /** * Set the read timeout on this socket. The SO_TIMEOUT option, is specified * in milliseconds. The read operation will block indefinitely for a zero * value. * * @param timeout the read timeout value * @throws SocketException if an error occurs setting the option */ @Override public void setSoTimeout(int timeoutMilliseconds) throws SocketException { super.setSoTimeout(timeoutMilliseconds); this.timeoutMilliseconds = timeoutMilliseconds; } @Override public int getSoTimeout() throws SocketException { return timeoutMilliseconds; } // BEGIN android-added /** * Set the handshake timeout on this socket. This timeout is specified in * milliseconds and will be used only during the handshake process. * * @param timeout the handshake timeout value */ public void setHandshakeTimeout(int timeoutMilliseconds) throws SocketException { this.handshakeTimeoutMilliseconds = timeoutMilliseconds; } // END android-added /** * Closes the SSL socket. Once closed, a socket is not available for further * use anymore under any circumstance. A new socket must be created. * * @throws <code>IOException</code> if an I/O error happens during the * socket's closure. */ @Override public void close() throws IOException { // TODO: Close SSL sockets using a background thread so they close // gracefully. synchronized (handshakeLock) { if (!handshakeStarted) { // prevent further attemps to start handshake handshakeStarted = true; synchronized (this) { free(); if (socket != null) { if (autoClose && !socket.isClosed()) socket.close(); } else { if (!super.isClosed()) super.close(); } } return; } } NativeCrypto.SSL_interrupt(sslNativePointer); synchronized (this) { synchronized (writeLock) { synchronized (readLock) { IOException pendingException = null; // Shut down the SSL connection, per se. try { if (handshakeStarted) { BlockGuard.getThreadPolicy().onNetwork(); NativeCrypto.SSL_shutdown(sslNativePointer); } } catch (IOException ex) { /* * Note the exception at this point, but try to continue * to clean the rest of this all up before rethrowing. */ pendingException = ex; } /* * Even if the above call failed, it is still safe to free * the native structs, and we need to do so lest we leak * memory. */ free(); if (socket != null) { if (autoClose && !socket.isClosed()) socket.close(); } else { if (!super.isClosed()) super.close(); } if (pendingException != null) { throw pendingException; } } } } } private void free() { if (sslNativePointer == 0) { return; } NativeCrypto.SSL_free(sslNativePointer); sslNativePointer = 0; } @Override protected void finalize() throws IOException { /* * Just worry about our own state. Notably we do not try and * close anything. The SocketImpl, either our own * PlainSocketImpl, or the Socket we are wrapping, will do * that. This might mean we do not properly SSL_shutdown, but * if you want to do that, properly close the socket yourself. * * The reason why we don't try to SSL_shutdown, is that there * can be a race between finalizers where the PlainSocketImpl * finalizer runs first and closes the socket. However, in the * meanwhile, the underlying file descriptor could be reused * for another purpose. If we call SSL_shutdown, the * underlying socket BIOs still have the old file descriptor * and will write the close notify to some unsuspecting * reader. */ updateInstanceCount(-1); free(); } }
true
true
public synchronized void startHandshake(boolean full) throws IOException { checkOpen(); synchronized (handshakeLock) { if (!handshakeStarted) { handshakeStarted = true; } else { return; } } // note that this modifies the global seed, not something specific to the connection final int seedLengthInBytes = NativeCrypto.RAND_SEED_LENGTH_IN_BYTES; final SecureRandom secureRandom = sslParameters.getSecureRandomMember(); if (secureRandom == null) { NativeCrypto.RAND_load_file("/dev/urandom", seedLengthInBytes); } else { NativeCrypto.RAND_seed(secureRandom.generateSeed(seedLengthInBytes)); } final boolean client = sslParameters.getUseClientMode(); final int sslCtxNativePointer = (client) ? sslParameters.getClientSessionContext().sslCtxNativePointer : sslParameters.getServerSessionContext().sslCtxNativePointer; this.sslNativePointer = NativeCrypto.SSL_new(sslCtxNativePointer); // setup server certificates and private keys. // clients will receive a call back to request certificates. if (!client) { for (String keyType : NativeCrypto.KEY_TYPES) { try { setCertificate(sslParameters.getKeyManager().chooseServerAlias(keyType, null, this)); } catch (CertificateEncodingException e) { throw new IOException(e); } } } NativeCrypto.setEnabledProtocols(sslNativePointer, enabledProtocols); NativeCrypto.setEnabledCipherSuites(sslNativePointer, enabledCipherSuites); if (enabledCompressionMethods.length != 0) { NativeCrypto.setEnabledCompressionMethods(sslNativePointer, enabledCompressionMethods); } if (useSessionTickets) { NativeCrypto.SSL_clear_options(sslNativePointer, NativeCrypto.SSL_OP_NO_TICKET); } if (hostname != null) { NativeCrypto.SSL_set_tlsext_host_name(sslNativePointer, hostname); } boolean enableSessionCreation = sslParameters.getEnableSessionCreation(); if (!enableSessionCreation) { NativeCrypto.SSL_set_session_creation_enabled(sslNativePointer, enableSessionCreation); } AbstractSessionContext sessionContext; OpenSSLSessionImpl session; if (client) { // look for client session to reuse ClientSessionContext clientSessionContext = sslParameters.getClientSessionContext(); sessionContext = clientSessionContext; session = getCachedClientSession(clientSessionContext); if (session != null) { NativeCrypto.SSL_set_session(sslNativePointer, session.sslSessionNativePointer); } } else { sessionContext = sslParameters.getServerSessionContext(); session = null; } // setup peer certificate verification if (client) { // TODO support for anonymous cipher would require us to // conditionally use SSL_VERIFY_NONE } else { // needing client auth takes priority... boolean certRequested = false; if (sslParameters.getNeedClientAuth()) { NativeCrypto.SSL_set_verify(sslNativePointer, NativeCrypto.SSL_VERIFY_PEER | NativeCrypto.SSL_VERIFY_FAIL_IF_NO_PEER_CERT); certRequested = true; // ... over just wanting it... } else if (sslParameters.getWantClientAuth()) { NativeCrypto.SSL_set_verify(sslNativePointer, NativeCrypto.SSL_VERIFY_PEER); certRequested = true; // ... and it defaults properly so we don't need call SSL_set_verify in the common case. } else { certRequested = false; } if (certRequested) { X509Certificate[] issuers = sslParameters.getTrustManager().getAcceptedIssuers(); if (issuers != null) { byte[][] issuersBytes; try { issuersBytes = NativeCrypto.encodeIssuerX509Principals(issuers); } catch (CertificateEncodingException e) { throw new IOException("Problem encoding principals", e); } NativeCrypto.SSL_set_client_CA_list(sslNativePointer, issuersBytes); } } } if (client && full) { // we want to do a full synchronous handshake, so turn off cutthrough NativeCrypto.SSL_clear_mode(sslNativePointer, NativeCrypto.SSL_MODE_HANDSHAKE_CUTTHROUGH); } // BEGIN android-added // Temporarily use a different timeout for the handshake process int savedTimeoutMilliseconds = getSoTimeout(); if (handshakeTimeoutMilliseconds >= 0) { setSoTimeout(handshakeTimeoutMilliseconds); } // END android-added Socket socket = this.socket != null ? this.socket : this; int sslSessionNativePointer; try { sslSessionNativePointer = NativeCrypto.SSL_do_handshake(sslNativePointer, socket, this, getSoTimeout(), client); } catch (CertificateException e) { throw new SSLPeerUnverifiedException(e.getMessage()); } byte[] sessionId = NativeCrypto.SSL_SESSION_session_id(sslSessionNativePointer); sslSession = (OpenSSLSessionImpl) sessionContext.getSession(sessionId); if (sslSession != null) { sslSession.lastAccessedTime = System.currentTimeMillis(); LoggerHolder.logger.fine("Reused cached session for " + getInetAddress() + "."); NativeCrypto.SSL_SESSION_free(sslSessionNativePointer); } else { if (!enableSessionCreation) { // Should have been prevented by NativeCrypto.SSL_set_session_creation_enabled throw new IllegalStateException("SSL Session may not be created"); } X509Certificate[] localCertificates = createCertChain(NativeCrypto.SSL_get_certificate(sslNativePointer)); X509Certificate[] peerCertificates = createCertChain(NativeCrypto.SSL_get_peer_cert_chain(sslNativePointer)); if (wrappedHost == null) { sslSession = new OpenSSLSessionImpl(sslSessionNativePointer, localCertificates, peerCertificates, super.getInetAddress().getHostName(), super.getPort(), sessionContext); } else { sslSession = new OpenSSLSessionImpl(sslSessionNativePointer, localCertificates, peerCertificates, wrappedHost, wrappedPort, sessionContext); } // if not, putSession later in handshakeCompleted() callback if (handshakeCompleted) { sessionContext.putSession(sslSession); } LoggerHolder.logger.fine("Created new session for " + getInetAddress().getHostName() + "."); } // BEGIN android-added // Restore the original timeout now that the handshake is complete if (handshakeTimeoutMilliseconds >= 0) { setSoTimeout(savedTimeoutMilliseconds); } // END android-added // if not, notifyHandshakeCompletedListeners later in handshakeCompleted() callback if (handshakeCompleted) { notifyHandshakeCompletedListeners(); } }
public synchronized void startHandshake(boolean full) throws IOException { checkOpen(); synchronized (handshakeLock) { if (!handshakeStarted) { handshakeStarted = true; } else { return; } } // note that this modifies the global seed, not something specific to the connection final int seedLengthInBytes = NativeCrypto.RAND_SEED_LENGTH_IN_BYTES; final SecureRandom secureRandom = sslParameters.getSecureRandomMember(); if (secureRandom == null) { NativeCrypto.RAND_load_file("/dev/urandom", seedLengthInBytes); } else { NativeCrypto.RAND_seed(secureRandom.generateSeed(seedLengthInBytes)); } final boolean client = sslParameters.getUseClientMode(); final int sslCtxNativePointer = (client) ? sslParameters.getClientSessionContext().sslCtxNativePointer : sslParameters.getServerSessionContext().sslCtxNativePointer; this.sslNativePointer = NativeCrypto.SSL_new(sslCtxNativePointer); // setup server certificates and private keys. // clients will receive a call back to request certificates. if (!client) { for (String keyType : NativeCrypto.KEY_TYPES) { try { setCertificate(sslParameters.getKeyManager().chooseServerAlias(keyType, null, this)); } catch (CertificateEncodingException e) { throw new IOException(e); } } } NativeCrypto.setEnabledProtocols(sslNativePointer, enabledProtocols); NativeCrypto.setEnabledCipherSuites(sslNativePointer, enabledCipherSuites); if (enabledCompressionMethods.length != 0) { NativeCrypto.setEnabledCompressionMethods(sslNativePointer, enabledCompressionMethods); } if (useSessionTickets) { NativeCrypto.SSL_clear_options(sslNativePointer, NativeCrypto.SSL_OP_NO_TICKET); } if (hostname != null) { NativeCrypto.SSL_set_tlsext_host_name(sslNativePointer, hostname); } boolean enableSessionCreation = sslParameters.getEnableSessionCreation(); if (!enableSessionCreation) { NativeCrypto.SSL_set_session_creation_enabled(sslNativePointer, enableSessionCreation); } AbstractSessionContext sessionContext; OpenSSLSessionImpl session; if (client) { // look for client session to reuse ClientSessionContext clientSessionContext = sslParameters.getClientSessionContext(); sessionContext = clientSessionContext; session = getCachedClientSession(clientSessionContext); if (session != null) { NativeCrypto.SSL_set_session(sslNativePointer, session.sslSessionNativePointer); } } else { sessionContext = sslParameters.getServerSessionContext(); session = null; } // setup peer certificate verification if (client) { // TODO support for anonymous cipher would require us to // conditionally use SSL_VERIFY_NONE } else { // needing client auth takes priority... boolean certRequested = false; if (sslParameters.getNeedClientAuth()) { NativeCrypto.SSL_set_verify(sslNativePointer, NativeCrypto.SSL_VERIFY_PEER | NativeCrypto.SSL_VERIFY_FAIL_IF_NO_PEER_CERT); certRequested = true; // ... over just wanting it... } else if (sslParameters.getWantClientAuth()) { NativeCrypto.SSL_set_verify(sslNativePointer, NativeCrypto.SSL_VERIFY_PEER); certRequested = true; // ... and it defaults properly so we don't need call SSL_set_verify in the common case. } else { certRequested = false; } if (certRequested) { X509Certificate[] issuers = sslParameters.getTrustManager().getAcceptedIssuers(); if (issuers != null && issuers.length != 0) { byte[][] issuersBytes; try { issuersBytes = NativeCrypto.encodeIssuerX509Principals(issuers); } catch (CertificateEncodingException e) { throw new IOException("Problem encoding principals", e); } NativeCrypto.SSL_set_client_CA_list(sslNativePointer, issuersBytes); } } } if (client && full) { // we want to do a full synchronous handshake, so turn off cutthrough NativeCrypto.SSL_clear_mode(sslNativePointer, NativeCrypto.SSL_MODE_HANDSHAKE_CUTTHROUGH); } // BEGIN android-added // Temporarily use a different timeout for the handshake process int savedTimeoutMilliseconds = getSoTimeout(); if (handshakeTimeoutMilliseconds >= 0) { setSoTimeout(handshakeTimeoutMilliseconds); } // END android-added Socket socket = this.socket != null ? this.socket : this; int sslSessionNativePointer; try { sslSessionNativePointer = NativeCrypto.SSL_do_handshake(sslNativePointer, socket, this, getSoTimeout(), client); } catch (CertificateException e) { throw new SSLPeerUnverifiedException(e.getMessage()); } byte[] sessionId = NativeCrypto.SSL_SESSION_session_id(sslSessionNativePointer); sslSession = (OpenSSLSessionImpl) sessionContext.getSession(sessionId); if (sslSession != null) { sslSession.lastAccessedTime = System.currentTimeMillis(); LoggerHolder.logger.fine("Reused cached session for " + getInetAddress() + "."); NativeCrypto.SSL_SESSION_free(sslSessionNativePointer); } else { if (!enableSessionCreation) { // Should have been prevented by NativeCrypto.SSL_set_session_creation_enabled throw new IllegalStateException("SSL Session may not be created"); } X509Certificate[] localCertificates = createCertChain(NativeCrypto.SSL_get_certificate(sslNativePointer)); X509Certificate[] peerCertificates = createCertChain(NativeCrypto.SSL_get_peer_cert_chain(sslNativePointer)); if (wrappedHost == null) { sslSession = new OpenSSLSessionImpl(sslSessionNativePointer, localCertificates, peerCertificates, super.getInetAddress().getHostName(), super.getPort(), sessionContext); } else { sslSession = new OpenSSLSessionImpl(sslSessionNativePointer, localCertificates, peerCertificates, wrappedHost, wrappedPort, sessionContext); } // if not, putSession later in handshakeCompleted() callback if (handshakeCompleted) { sessionContext.putSession(sslSession); } LoggerHolder.logger.fine("Created new session for " + getInetAddress().getHostName() + "."); } // BEGIN android-added // Restore the original timeout now that the handshake is complete if (handshakeTimeoutMilliseconds >= 0) { setSoTimeout(savedTimeoutMilliseconds); } // END android-added // if not, notifyHandshakeCompletedListeners later in handshakeCompleted() callback if (handshakeCompleted) { notifyHandshakeCompletedListeners(); } }
diff --git a/de.jutzig.jabylon.rest.ui/src/main/java/de/jutzig/jabylon/rest/ui/navbar/NavbarPanel.java b/de.jutzig.jabylon.rest.ui/src/main/java/de/jutzig/jabylon/rest/ui/navbar/NavbarPanel.java index 6810ece3..0f1466ff 100644 --- a/de.jutzig.jabylon.rest.ui/src/main/java/de/jutzig/jabylon/rest/ui/navbar/NavbarPanel.java +++ b/de.jutzig.jabylon.rest.ui/src/main/java/de/jutzig/jabylon/rest/ui/navbar/NavbarPanel.java @@ -1,104 +1,104 @@ package de.jutzig.jabylon.rest.ui.navbar; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.apache.wicket.markup.html.basic.Label; import org.apache.wicket.markup.html.link.BookmarkablePageLink; import org.apache.wicket.markup.html.list.ListItem; import org.apache.wicket.markup.html.list.ListView; import org.apache.wicket.markup.html.panel.Panel; import org.apache.wicket.model.IModel; import org.apache.wicket.request.mapper.parameter.PageParameters; import org.eclipse.core.runtime.CoreException; import org.eclipse.core.runtime.IConfigurationElement; import org.eclipse.core.runtime.RegistryFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import de.jutzig.jabylon.rest.ui.wicket.BasicPanel; import de.jutzig.jabylon.rest.ui.wicket.PanelFactory; import de.jutzig.jabylon.rest.ui.wicket.pages.WelcomePage; public class NavbarPanel<T> extends BasicPanel<T> { private static final long serialVersionUID = 1L; private static final Logger logger = LoggerFactory.getLogger(NavbarPanel.class); public NavbarPanel(String id, IModel<T> model, PageParameters parameters) { super(id, model, parameters); - add(new BookmarkablePageLink<String>("Jabylon",WelcomePage.class)); //$NON-NLS-1$ + add(new BookmarkablePageLink<String>("jabylon",WelcomePage.class)); //$NON-NLS-1$ Map<PanelFactory, Boolean> data = loadNavBarExtensions(); List<PanelFactory> items = new ArrayList<PanelFactory>(); List<PanelFactory> rightAligned = new ArrayList<PanelFactory>(); for (Entry<PanelFactory, Boolean> entry : data.entrySet()) { if(entry.getValue()) rightAligned.add(entry.getKey()); else items.add(entry.getKey()); } ListView<PanelFactory> listView = new ListView<PanelFactory>("items", items) { //$NON-NLS-1$ private static final long serialVersionUID = 1L; @Override protected void populateItem(ListItem<PanelFactory> item) { Panel newPanel = item.getModelObject().createPanel(getPageParameters(), NavbarPanel.this.getModel(), "content"); //$NON-NLS-1$ if(newPanel==null) item.add(new Label("content","NONE")); //$NON-NLS-1$ //$NON-NLS-2$ else item.add(newPanel); } }; listView.setRenderBodyOnly(true); add(listView); ListView<PanelFactory> rightListView = new ListView<PanelFactory>("right-items", rightAligned) { //$NON-NLS-1$ private static final long serialVersionUID = 1L; @Override protected void populateItem(ListItem<PanelFactory> item) { Panel newPanel = item.getModelObject().createPanel(getPageParameters(), NavbarPanel.this.getModel(), "content"); //$NON-NLS-1$ if(newPanel==null) item.add(new Label("content","NONE")); //$NON-NLS-1$ //$NON-NLS-2$ else item.add(newPanel); } }; rightListView.setRenderBodyOnly(true); add(rightListView); } private Map<PanelFactory,Boolean> loadNavBarExtensions() { Map<PanelFactory, Boolean> extensions = new LinkedHashMap<PanelFactory, Boolean>(); IConfigurationElement[] configurationElements = RegistryFactory.getRegistry().getConfigurationElementsFor( "de.jutzig.jabylon.rest.ui.navbarItem"); //$NON-NLS-1$ for (IConfigurationElement element : configurationElements) { try { PanelFactory extension = (PanelFactory) element.createExecutableExtension("panel"); //$NON-NLS-1$ String pullRight = element.getAttribute("pullRight"); //$NON-NLS-1$ if(pullRight!=null && Boolean.valueOf(pullRight)) extensions.put(extension, true); else extensions.put(extension, false); } catch (CoreException e) { logger.error("Failed to load extension "+element,e); //$NON-NLS-1$ } } return extensions; } }
true
true
public NavbarPanel(String id, IModel<T> model, PageParameters parameters) { super(id, model, parameters); add(new BookmarkablePageLink<String>("Jabylon",WelcomePage.class)); //$NON-NLS-1$ Map<PanelFactory, Boolean> data = loadNavBarExtensions(); List<PanelFactory> items = new ArrayList<PanelFactory>(); List<PanelFactory> rightAligned = new ArrayList<PanelFactory>(); for (Entry<PanelFactory, Boolean> entry : data.entrySet()) { if(entry.getValue()) rightAligned.add(entry.getKey()); else items.add(entry.getKey()); } ListView<PanelFactory> listView = new ListView<PanelFactory>("items", items) { //$NON-NLS-1$ private static final long serialVersionUID = 1L; @Override protected void populateItem(ListItem<PanelFactory> item) { Panel newPanel = item.getModelObject().createPanel(getPageParameters(), NavbarPanel.this.getModel(), "content"); //$NON-NLS-1$ if(newPanel==null) item.add(new Label("content","NONE")); //$NON-NLS-1$ //$NON-NLS-2$ else item.add(newPanel); } }; listView.setRenderBodyOnly(true); add(listView); ListView<PanelFactory> rightListView = new ListView<PanelFactory>("right-items", rightAligned) { //$NON-NLS-1$ private static final long serialVersionUID = 1L; @Override protected void populateItem(ListItem<PanelFactory> item) { Panel newPanel = item.getModelObject().createPanel(getPageParameters(), NavbarPanel.this.getModel(), "content"); //$NON-NLS-1$ if(newPanel==null) item.add(new Label("content","NONE")); //$NON-NLS-1$ //$NON-NLS-2$ else item.add(newPanel); } }; rightListView.setRenderBodyOnly(true); add(rightListView); }
public NavbarPanel(String id, IModel<T> model, PageParameters parameters) { super(id, model, parameters); add(new BookmarkablePageLink<String>("jabylon",WelcomePage.class)); //$NON-NLS-1$ Map<PanelFactory, Boolean> data = loadNavBarExtensions(); List<PanelFactory> items = new ArrayList<PanelFactory>(); List<PanelFactory> rightAligned = new ArrayList<PanelFactory>(); for (Entry<PanelFactory, Boolean> entry : data.entrySet()) { if(entry.getValue()) rightAligned.add(entry.getKey()); else items.add(entry.getKey()); } ListView<PanelFactory> listView = new ListView<PanelFactory>("items", items) { //$NON-NLS-1$ private static final long serialVersionUID = 1L; @Override protected void populateItem(ListItem<PanelFactory> item) { Panel newPanel = item.getModelObject().createPanel(getPageParameters(), NavbarPanel.this.getModel(), "content"); //$NON-NLS-1$ if(newPanel==null) item.add(new Label("content","NONE")); //$NON-NLS-1$ //$NON-NLS-2$ else item.add(newPanel); } }; listView.setRenderBodyOnly(true); add(listView); ListView<PanelFactory> rightListView = new ListView<PanelFactory>("right-items", rightAligned) { //$NON-NLS-1$ private static final long serialVersionUID = 1L; @Override protected void populateItem(ListItem<PanelFactory> item) { Panel newPanel = item.getModelObject().createPanel(getPageParameters(), NavbarPanel.this.getModel(), "content"); //$NON-NLS-1$ if(newPanel==null) item.add(new Label("content","NONE")); //$NON-NLS-1$ //$NON-NLS-2$ else item.add(newPanel); } }; rightListView.setRenderBodyOnly(true); add(rightListView); }
diff --git a/src/com/android/settings/Settings.java b/src/com/android/settings/Settings.java index 83b06c3f7..982db18d8 100644 --- a/src/com/android/settings/Settings.java +++ b/src/com/android/settings/Settings.java @@ -1,869 +1,869 @@ /* * Copyright (C) 2008 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.settings; import com.android.internal.util.ArrayUtils; import com.android.settings.ChooseLockGeneric.ChooseLockGenericFragment; import com.android.settings.accounts.AccountSyncSettings; import com.android.settings.accounts.AuthenticatorHelper; import com.android.settings.accounts.ManageAccountsSettings; import com.android.settings.applications.InstalledAppDetails; import com.android.settings.applications.ManageApplications; import com.android.settings.bluetooth.BluetoothEnabler; import com.android.settings.deviceinfo.Memory; import com.android.settings.fuelgauge.PowerUsageSummary; import com.android.settings.profiles.ProfileEnabler; import com.android.settings.vpn2.VpnSettings; import com.android.settings.wifi.WifiEnabler; import android.accounts.Account; import android.accounts.AccountManager; import android.accounts.OnAccountsUpdateListener; import android.content.ComponentName; import android.content.Context; import android.content.Intent; import android.content.SharedPreferences; import android.content.pm.ActivityInfo; import android.content.pm.PackageManager; import android.content.pm.PackageManager.NameNotFoundException; import android.content.pm.ResolveInfo; import android.graphics.drawable.Drawable; import android.os.Bundle; import android.os.INetworkManagementService; import android.os.RemoteException; import android.os.ServiceManager; import android.os.UserHandle; import android.os.UserManager; import android.preference.Preference; import android.preference.PreferenceActivity; import android.preference.PreferenceFragment; import android.text.TextUtils; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.View.OnClickListener; import android.view.ViewGroup; import android.widget.ArrayAdapter; import android.widget.Button; import android.widget.ImageView; import android.widget.ListAdapter; import android.widget.Switch; import android.widget.TextView; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; /** * Top-level settings activity to handle single pane and double pane UI layout. */ public class Settings extends PreferenceActivity implements ButtonBarHandler, OnAccountsUpdateListener { private static final String LOG_TAG = "Settings"; private static final String META_DATA_KEY_HEADER_ID = "com.android.settings.TOP_LEVEL_HEADER_ID"; private static final String META_DATA_KEY_FRAGMENT_CLASS = "com.android.settings.FRAGMENT_CLASS"; private static final String META_DATA_KEY_PARENT_TITLE = "com.android.settings.PARENT_FRAGMENT_TITLE"; private static final String META_DATA_KEY_PARENT_FRAGMENT_CLASS = "com.android.settings.PARENT_FRAGMENT_CLASS"; private static final String EXTRA_CLEAR_UI_OPTIONS = "settings:remove_ui_options"; private static final String SAVE_KEY_CURRENT_HEADER = "com.android.settings.CURRENT_HEADER"; private static final String SAVE_KEY_PARENT_HEADER = "com.android.settings.PARENT_HEADER"; private String mFragmentClass; private int mTopLevelHeaderId; private Header mFirstHeader; private Header mCurrentHeader; private Header mParentHeader; private boolean mInLocalHeaderSwitch; // Show only these settings for restricted users private int[] SETTINGS_FOR_RESTRICTED = { R.id.wireless_section, R.id.wifi_settings, R.id.bluetooth_settings, R.id.data_usage_settings, R.id.wireless_settings, R.id.device_section, R.id.sound_settings, R.id.display_settings, R.id.storage_settings, R.id.application_settings, R.id.battery_settings, R.id.personal_section, R.id.location_settings, R.id.security_settings, R.id.language_settings, R.id.user_settings, R.id.account_settings, R.id.account_add, R.id.system_section, R.id.date_time_settings, R.id.about_settings, R.id.accessibility_settings }; private SharedPreferences mDevelopmentPreferences; private SharedPreferences.OnSharedPreferenceChangeListener mDevelopmentPreferencesListener; // TODO: Update Call Settings based on airplane mode state. protected HashMap<Integer, Integer> mHeaderIndexMap = new HashMap<Integer, Integer>(); private AuthenticatorHelper mAuthenticatorHelper; private Header mLastHeader; private boolean mListeningToAccountUpdates; @Override protected void onCreate(Bundle savedInstanceState) { if (getIntent().getBooleanExtra(EXTRA_CLEAR_UI_OPTIONS, false)) { getWindow().setUiOptions(0); } mAuthenticatorHelper = new AuthenticatorHelper(); mAuthenticatorHelper.updateAuthDescriptions(this); mAuthenticatorHelper.onAccountsUpdated(this, null); mDevelopmentPreferences = getSharedPreferences(DevelopmentSettings.PREF_FILE, Context.MODE_PRIVATE); getMetaData(); mInLocalHeaderSwitch = true; super.onCreate(savedInstanceState); mInLocalHeaderSwitch = false; if (!onIsHidingHeaders() && onIsMultiPane()) { highlightHeader(mTopLevelHeaderId); // Force the title so that it doesn't get overridden by a direct launch of // a specific settings screen. setTitle(R.string.settings_label); } // Retrieve any saved state if (savedInstanceState != null) { mCurrentHeader = savedInstanceState.getParcelable(SAVE_KEY_CURRENT_HEADER); mParentHeader = savedInstanceState.getParcelable(SAVE_KEY_PARENT_HEADER); } // If the current header was saved, switch to it if (savedInstanceState != null && mCurrentHeader != null) { //switchToHeaderLocal(mCurrentHeader); showBreadCrumbs(mCurrentHeader.title, null); } if (mParentHeader != null) { setParentTitle(mParentHeader.title, null, new OnClickListener() { public void onClick(View v) { switchToParent(mParentHeader.fragment); } }); } // Override up navigation for multi-pane, since we handle it in the fragment breadcrumbs if (onIsMultiPane()) { getActionBar().setDisplayHomeAsUpEnabled(false); getActionBar().setHomeButtonEnabled(false); } } @Override protected void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); // Save the current fragment, if it is the same as originally launched if (mCurrentHeader != null) { outState.putParcelable(SAVE_KEY_CURRENT_HEADER, mCurrentHeader); } if (mParentHeader != null) { outState.putParcelable(SAVE_KEY_PARENT_HEADER, mParentHeader); } } @Override public void onResume() { super.onResume(); mDevelopmentPreferencesListener = new SharedPreferences.OnSharedPreferenceChangeListener() { @Override public void onSharedPreferenceChanged(SharedPreferences sharedPreferences, String key) { invalidateHeaders(); } }; mDevelopmentPreferences.registerOnSharedPreferenceChangeListener( mDevelopmentPreferencesListener); ListAdapter listAdapter = getListAdapter(); if (listAdapter instanceof HeaderAdapter) { ((HeaderAdapter) listAdapter).resume(); } invalidateHeaders(); } @Override public void onPause() { super.onPause(); ListAdapter listAdapter = getListAdapter(); if (listAdapter instanceof HeaderAdapter) { ((HeaderAdapter) listAdapter).pause(); } mDevelopmentPreferences.unregisterOnSharedPreferenceChangeListener( mDevelopmentPreferencesListener); mDevelopmentPreferencesListener = null; } @Override public void onDestroy() { super.onDestroy(); if (mListeningToAccountUpdates) { AccountManager.get(this).removeOnAccountsUpdatedListener(this); } } private void switchToHeaderLocal(Header header) { mInLocalHeaderSwitch = true; switchToHeader(header); mInLocalHeaderSwitch = false; } @Override public void switchToHeader(Header header) { if (!mInLocalHeaderSwitch) { mCurrentHeader = null; mParentHeader = null; } super.switchToHeader(header); } /** * Switch to parent fragment and store the grand parent's info * @param className name of the activity wrapper for the parent fragment. */ private void switchToParent(String className) { final ComponentName cn = new ComponentName(this, className); try { final PackageManager pm = getPackageManager(); final ActivityInfo parentInfo = pm.getActivityInfo(cn, PackageManager.GET_META_DATA); if (parentInfo != null && parentInfo.metaData != null) { String fragmentClass = parentInfo.metaData.getString(META_DATA_KEY_FRAGMENT_CLASS); CharSequence fragmentTitle = parentInfo.loadLabel(pm); Header parentHeader = new Header(); parentHeader.fragment = fragmentClass; parentHeader.title = fragmentTitle; mCurrentHeader = parentHeader; switchToHeaderLocal(parentHeader); highlightHeader(mTopLevelHeaderId); mParentHeader = new Header(); mParentHeader.fragment = parentInfo.metaData.getString(META_DATA_KEY_PARENT_FRAGMENT_CLASS); mParentHeader.title = parentInfo.metaData.getString(META_DATA_KEY_PARENT_TITLE); } } catch (NameNotFoundException nnfe) { Log.w(LOG_TAG, "Could not find parent activity : " + className); } } @Override public void onNewIntent(Intent intent) { super.onNewIntent(intent); // If it is not launched from history, then reset to top-level if ((intent.getFlags() & Intent.FLAG_ACTIVITY_LAUNCHED_FROM_HISTORY) == 0) { if (mFirstHeader != null && !onIsHidingHeaders() && onIsMultiPane()) { switchToHeaderLocal(mFirstHeader); } getListView().setSelectionFromTop(0, 0); } } private void highlightHeader(int id) { if (id != 0) { Integer index = mHeaderIndexMap.get(id); if (index != null) { getListView().setItemChecked(index, true); if (isMultiPane()) { getListView().smoothScrollToPosition(index); } } } } @Override public Intent getIntent() { Intent superIntent = super.getIntent(); String startingFragment = getStartingFragmentClass(superIntent); // This is called from super.onCreate, isMultiPane() is not yet reliable // Do not use onIsHidingHeaders either, which relies itself on this method if (startingFragment != null && !onIsMultiPane()) { Intent modIntent = new Intent(superIntent); modIntent.putExtra(EXTRA_SHOW_FRAGMENT, startingFragment); Bundle args = superIntent.getExtras(); if (args != null) { args = new Bundle(args); } else { args = new Bundle(); } args.putParcelable("intent", superIntent); modIntent.putExtra(EXTRA_SHOW_FRAGMENT_ARGUMENTS, superIntent.getExtras()); return modIntent; } return superIntent; } /** * Checks if the component name in the intent is different from the Settings class and * returns the class name to load as a fragment. */ protected String getStartingFragmentClass(Intent intent) { if (mFragmentClass != null) return mFragmentClass; String intentClass = intent.getComponent().getClassName(); if (intentClass.equals(getClass().getName())) return null; if ("com.android.settings.ManageApplications".equals(intentClass) || "com.android.settings.RunningServices".equals(intentClass) || "com.android.settings.applications.StorageUse".equals(intentClass)) { // Old names of manage apps. intentClass = com.android.settings.applications.ManageApplications.class.getName(); } return intentClass; } /** * Override initial header when an activity-alias is causing Settings to be launched * for a specific fragment encoded in the android:name parameter. */ @Override public Header onGetInitialHeader() { String fragmentClass = getStartingFragmentClass(super.getIntent()); if (fragmentClass != null) { Header header = new Header(); header.fragment = fragmentClass; header.title = getTitle(); header.fragmentArguments = getIntent().getExtras(); mCurrentHeader = header; return header; } return mFirstHeader; } @Override public Intent onBuildStartFragmentIntent(String fragmentName, Bundle args, int titleRes, int shortTitleRes) { Intent intent = super.onBuildStartFragmentIntent(fragmentName, args, titleRes, shortTitleRes); // some fragments want to avoid split actionbar if (DataUsageSummary.class.getName().equals(fragmentName) || PowerUsageSummary.class.getName().equals(fragmentName) || AccountSyncSettings.class.getName().equals(fragmentName) || UserDictionarySettings.class.getName().equals(fragmentName) || Memory.class.getName().equals(fragmentName) || ManageApplications.class.getName().equals(fragmentName) || WirelessSettings.class.getName().equals(fragmentName) || SoundSettings.class.getName().equals(fragmentName) || PrivacySettings.class.getName().equals(fragmentName) || ManageAccountsSettings.class.getName().equals(fragmentName) || VpnSettings.class.getName().equals(fragmentName) || SecuritySettings.class.getName().equals(fragmentName) || InstalledAppDetails.class.getName().equals(fragmentName) || ChooseLockGenericFragment.class.getName().equals(fragmentName)) { intent.putExtra(EXTRA_CLEAR_UI_OPTIONS, true); } intent.setClass(this, SubSettings.class); return intent; } /** * Populate the activity with the top-level headers. */ @Override public void onBuildHeaders(List<Header> headers) { loadHeadersFromResource(R.xml.settings_headers, headers); updateHeaderList(headers); } private void updateHeaderList(List<Header> target) { final boolean showDev = mDevelopmentPreferences.getBoolean( DevelopmentSettings.PREF_SHOW, android.os.Build.TYPE.equals("eng")); int i = 0; mHeaderIndexMap.clear(); while (i < target.size()) { Header header = target.get(i); // Ids are integers, so downcasting int id = (int) header.id; if (id == R.id.operator_settings || id == R.id.manufacturer_settings || id == R.id.advanced_settings) { Utils.updateHeaderToSpecificActivityFromMetaDataOrRemove(this, target, header); } else if (id == R.id.launcher_settings) { Intent launcherIntent = new Intent(Intent.ACTION_MAIN); launcherIntent.addCategory(Intent.CATEGORY_HOME); launcherIntent.addCategory(Intent.CATEGORY_DEFAULT); Intent launcherPreferencesIntent = new Intent(Intent.ACTION_MAIN); launcherPreferencesIntent.addCategory("com.cyanogenmod.category.LAUNCHER_PREFERENCES"); ActivityInfo defaultLauncher = getPackageManager().resolveActivity(launcherIntent, PackageManager.MATCH_DEFAULT_ONLY).activityInfo; launcherPreferencesIntent.setPackage(defaultLauncher.packageName); ResolveInfo launcherPreferences = getPackageManager().resolveActivity(launcherPreferencesIntent, 0); if (launcherPreferences != null) { header.intent = new Intent().setClassName(launcherPreferences.activityInfo.packageName, launcherPreferences.activityInfo.name); } else { target.remove(header); } } else if (id == R.id.wifi_settings) { // Remove WiFi Settings if WiFi service is not available. if (!getPackageManager().hasSystemFeature(PackageManager.FEATURE_WIFI)) { target.remove(i); } } else if (id == R.id.bluetooth_settings) { // Remove Bluetooth Settings if Bluetooth service is not available. if (!getPackageManager().hasSystemFeature(PackageManager.FEATURE_BLUETOOTH)) { target.remove(i); } } else if (id == R.id.data_usage_settings) { // Remove data usage when kernel module not enabled final INetworkManagementService netManager = INetworkManagementService.Stub .asInterface(ServiceManager.getService(Context.NETWORKMANAGEMENT_SERVICE)); try { if (!netManager.isBandwidthControlEnabled()) { target.remove(i); } } catch (RemoteException e) { // ignored } } else if (id == R.id.account_settings) { int headerIndex = i + 1; i = insertAccountsHeaders(target, headerIndex); } else if (id == R.id.user_settings) { if (!UserHandle.MU_ENABLED || !UserManager.supportsMultipleUsers() || Utils.isMonkeyRunning()) { target.remove(i); } } else if (id == R.id.development_settings || id == R.id.performance_settings) { if (!showDev) { target.remove(i); } } if (target.get(i) == header && UserHandle.MU_ENABLED && UserHandle.myUserId() != 0 && !ArrayUtils.contains(SETTINGS_FOR_RESTRICTED, id)) { target.remove(i); } // Increment if the current one wasn't removed by the Utils code. if (target.get(i) == header) { // Hold on to the first header, when we need to reset to the top-level if (mFirstHeader == null && HeaderAdapter.getHeaderType(header) != HeaderAdapter.HEADER_TYPE_CATEGORY) { mFirstHeader = header; } mHeaderIndexMap.put(id, i); i++; } } } private int insertAccountsHeaders(List<Header> target, int headerIndex) { String[] accountTypes = mAuthenticatorHelper.getEnabledAccountTypes(); List<Header> accountHeaders = new ArrayList<Header>(accountTypes.length); for (String accountType : accountTypes) { CharSequence label = mAuthenticatorHelper.getLabelForType(this, accountType); if (label == null) { continue; } Account[] accounts = AccountManager.get(this).getAccountsByType(accountType); boolean skipToAccount = accounts.length == 1 && !mAuthenticatorHelper.hasAccountPreferences(accountType); Header accHeader = new Header(); accHeader.title = label; if (accHeader.extras == null) { accHeader.extras = new Bundle(); } if (skipToAccount) { accHeader.breadCrumbTitleRes = R.string.account_sync_settings_title; accHeader.breadCrumbShortTitleRes = R.string.account_sync_settings_title; accHeader.fragment = AccountSyncSettings.class.getName(); accHeader.fragmentArguments = new Bundle(); // Need this for the icon accHeader.extras.putString(ManageAccountsSettings.KEY_ACCOUNT_TYPE, accountType); accHeader.extras.putParcelable(AccountSyncSettings.ACCOUNT_KEY, accounts[0]); accHeader.fragmentArguments.putParcelable(AccountSyncSettings.ACCOUNT_KEY, accounts[0]); } else { accHeader.breadCrumbTitle = label; accHeader.breadCrumbShortTitle = label; accHeader.fragment = ManageAccountsSettings.class.getName(); accHeader.fragmentArguments = new Bundle(); accHeader.extras.putString(ManageAccountsSettings.KEY_ACCOUNT_TYPE, accountType); accHeader.fragmentArguments.putString(ManageAccountsSettings.KEY_ACCOUNT_TYPE, accountType); if (!isMultiPane()) { accHeader.fragmentArguments.putString(ManageAccountsSettings.KEY_ACCOUNT_LABEL, label.toString()); } } accountHeaders.add(accHeader); } // Sort by label Collections.sort(accountHeaders, new Comparator<Header>() { @Override public int compare(Header h1, Header h2) { return h1.title.toString().compareTo(h2.title.toString()); } }); for (Header header : accountHeaders) { target.add(headerIndex++, header); } if (!mListeningToAccountUpdates) { AccountManager.get(this).addOnAccountsUpdatedListener(this, null, true); mListeningToAccountUpdates = true; } return headerIndex; } private void getMetaData() { try { ActivityInfo ai = getPackageManager().getActivityInfo(getComponentName(), PackageManager.GET_META_DATA); if (ai == null || ai.metaData == null) return; mTopLevelHeaderId = ai.metaData.getInt(META_DATA_KEY_HEADER_ID); mFragmentClass = ai.metaData.getString(META_DATA_KEY_FRAGMENT_CLASS); // Check if it has a parent specified and create a Header object final int parentHeaderTitleRes = ai.metaData.getInt(META_DATA_KEY_PARENT_TITLE); String parentFragmentClass = ai.metaData.getString(META_DATA_KEY_PARENT_FRAGMENT_CLASS); if (parentFragmentClass != null) { mParentHeader = new Header(); mParentHeader.fragment = parentFragmentClass; if (parentHeaderTitleRes != 0) { mParentHeader.title = getResources().getString(parentHeaderTitleRes); } } } catch (NameNotFoundException nnfe) { // No recovery } } @Override public boolean hasNextButton() { return super.hasNextButton(); } @Override public Button getNextButton() { return super.getNextButton(); } private static class HeaderAdapter extends ArrayAdapter<Header> { static final int HEADER_TYPE_CATEGORY = 0; static final int HEADER_TYPE_NORMAL = 1; static final int HEADER_TYPE_SWITCH = 2; private static final int HEADER_TYPE_COUNT = HEADER_TYPE_SWITCH + 1; private final WifiEnabler mWifiEnabler; private final BluetoothEnabler mBluetoothEnabler; private final ProfileEnabler mProfileEnabler; private AuthenticatorHelper mAuthHelper; private static class HeaderViewHolder { ImageView icon; TextView title; TextView summary; Switch switch_; } private LayoutInflater mInflater; static int getHeaderType(Header header) { if (header.fragment == null && header.intent == null) { return HEADER_TYPE_CATEGORY; } else if (header.id == R.id.wifi_settings || header.id == R.id.bluetooth_settings || header.id == R.id.profiles_settings) { return HEADER_TYPE_SWITCH; } else { return HEADER_TYPE_NORMAL; } } @Override public int getItemViewType(int position) { Header header = getItem(position); return getHeaderType(header); } @Override public boolean areAllItemsEnabled() { return false; // because of categories } @Override public boolean isEnabled(int position) { return getItemViewType(position) != HEADER_TYPE_CATEGORY; } @Override public int getViewTypeCount() { return HEADER_TYPE_COUNT; } @Override public boolean hasStableIds() { return true; } public HeaderAdapter(Context context, List<Header> objects, AuthenticatorHelper authenticatorHelper) { super(context, 0, objects); mAuthHelper = authenticatorHelper; mInflater = (LayoutInflater)context.getSystemService(Context.LAYOUT_INFLATER_SERVICE); // Temp Switches provided as placeholder until the adapter replaces these with actual // Switches inflated from their layouts. Must be done before adapter is set in super mWifiEnabler = new WifiEnabler(context, new Switch(context)); mBluetoothEnabler = new BluetoothEnabler(context, new Switch(context)); mProfileEnabler = new ProfileEnabler(context, null, new Switch(context)); } @Override public View getView(int position, View convertView, ViewGroup parent) { HeaderViewHolder holder; Header header = getItem(position); int headerType = getHeaderType(header); View view = null; - if (convertView == null) { + if (convertView == null || headerType == HEADER_TYPE_SWITCH) { holder = new HeaderViewHolder(); switch (headerType) { case HEADER_TYPE_CATEGORY: view = new TextView(getContext(), null, android.R.attr.listSeparatorTextViewStyle); holder.title = (TextView) view; break; case HEADER_TYPE_SWITCH: view = mInflater.inflate(R.layout.preference_header_switch_item, parent, false); holder.icon = (ImageView) view.findViewById(R.id.icon); holder.title = (TextView) view.findViewById(com.android.internal.R.id.title); holder.summary = (TextView) view.findViewById(com.android.internal.R.id.summary); holder.switch_ = (Switch) view.findViewById(R.id.switchWidget); break; case HEADER_TYPE_NORMAL: view = mInflater.inflate( R.layout.preference_header_item, parent, false); holder.icon = (ImageView) view.findViewById(R.id.icon); holder.title = (TextView) view.findViewById(com.android.internal.R.id.title); holder.summary = (TextView) view.findViewById(com.android.internal.R.id.summary); break; } view.setTag(holder); } else { view = convertView; holder = (HeaderViewHolder) view.getTag(); } // All view fields must be updated every time, because the view may be recycled switch (headerType) { case HEADER_TYPE_CATEGORY: holder.title.setText(header.getTitle(getContext().getResources())); break; case HEADER_TYPE_SWITCH: // Would need a different treatment if the main menu had more switches if (header.id == R.id.wifi_settings) { mWifiEnabler.setSwitch(holder.switch_); } else if (header.id == R.id.bluetooth_settings) { mBluetoothEnabler.setSwitch(holder.switch_); } else if (header.id == R.id.profiles_settings) { mProfileEnabler.setSwitch(holder.switch_); } // No break, fall through on purpose to update common fields //$FALL-THROUGH$ case HEADER_TYPE_NORMAL: if (header.extras != null && header.extras.containsKey(ManageAccountsSettings.KEY_ACCOUNT_TYPE)) { String accType = header.extras.getString( ManageAccountsSettings.KEY_ACCOUNT_TYPE); ViewGroup.LayoutParams lp = holder.icon.getLayoutParams(); lp.width = getContext().getResources().getDimensionPixelSize( R.dimen.header_icon_width); lp.height = lp.width; holder.icon.setLayoutParams(lp); Drawable icon = mAuthHelper.getDrawableForType(getContext(), accType); holder.icon.setImageDrawable(icon); } else { holder.icon.setImageResource(header.iconRes); } holder.title.setText(header.getTitle(getContext().getResources())); CharSequence summary = header.getSummary(getContext().getResources()); if (!TextUtils.isEmpty(summary)) { holder.summary.setVisibility(View.VISIBLE); holder.summary.setText(summary); } else { holder.summary.setVisibility(View.GONE); } break; } return view; } public void resume() { mWifiEnabler.resume(); mBluetoothEnabler.resume(); mProfileEnabler.resume(); } public void pause() { mWifiEnabler.pause(); mBluetoothEnabler.pause(); mProfileEnabler.pause(); } } @Override public void onHeaderClick(Header header, int position) { boolean revert = false; if (header.id == R.id.account_add) { revert = true; } super.onHeaderClick(header, position); if (revert && mLastHeader != null) { highlightHeader((int) mLastHeader.id); } else { mLastHeader = header; } } @Override public boolean onPreferenceStartFragment(PreferenceFragment caller, Preference pref) { // Override the fragment title for Wallpaper settings int titleRes = pref.getTitleRes(); if (pref.getFragment().equals(WallpaperTypeSettings.class.getName())) { titleRes = R.string.wallpaper_settings_fragment_title; } else if (pref.getFragment().equals(OwnerInfoSettings.class.getName()) && UserHandle.myUserId() != UserHandle.USER_OWNER) { titleRes = R.string.user_info_settings_title; } startPreferencePanel(pref.getFragment(), pref.getExtras(), titleRes, pref.getTitle(), null, 0); return true; } public boolean shouldUpRecreateTask(Intent targetIntent) { return super.shouldUpRecreateTask(new Intent(this, Settings.class)); } @Override public void setListAdapter(ListAdapter adapter) { if (adapter == null) { super.setListAdapter(null); } else { super.setListAdapter(new HeaderAdapter(this, getHeaders(), mAuthenticatorHelper)); } } @Override public void onAccountsUpdated(Account[] accounts) { // TODO: watch for package upgrades to invalidate cache; see 7206643 mAuthenticatorHelper.updateAuthDescriptions(this); mAuthenticatorHelper.onAccountsUpdated(this, accounts); invalidateHeaders(); } /* * Settings subclasses for launching independently. */ public static class BluetoothSettingsActivity extends Settings { /* empty */ } public static class WirelessSettingsActivity extends Settings { /* empty */ } public static class TetherSettingsActivity extends Settings { /* empty */ } public static class VpnSettingsActivity extends Settings { /* empty */ } public static class DateTimeSettingsActivity extends Settings { /* empty */ } public static class StorageSettingsActivity extends Settings { /* empty */ } public static class WifiSettingsActivity extends Settings { /* empty */ } public static class WifiP2pSettingsActivity extends Settings { /* empty */ } public static class InputMethodAndLanguageSettingsActivity extends Settings { /* empty */ } public static class KeyboardLayoutPickerActivity extends Settings { /* empty */ } public static class InputMethodAndSubtypeEnablerActivity extends Settings { /* empty */ } public static class SpellCheckersSettingsActivity extends Settings { /* empty */ } public static class LocalePickerActivity extends Settings { /* empty */ } public static class UserDictionarySettingsActivity extends Settings { /* empty */ } public static class SoundSettingsActivity extends Settings { /* empty */ } public static class DisplaySettingsActivity extends Settings { /* empty */ } public static class DeviceInfoSettingsActivity extends Settings { /* empty */ } public static class ApplicationSettingsActivity extends Settings { /* empty */ } public static class ManageApplicationsActivity extends Settings { /* empty */ } public static class StorageUseActivity extends Settings { /* empty */ } public static class DevelopmentSettingsActivity extends Settings { /* empty */ } public static class AccessibilitySettingsActivity extends Settings { /* empty */ } public static class SecuritySettingsActivity extends Settings { /* empty */ } public static class LocationSettingsActivity extends Settings { /* empty */ } public static class PrivacySettingsActivity extends Settings { /* empty */ } public static class RunningServicesActivity extends Settings { /* empty */ } public static class ManageAccountsSettingsActivity extends Settings { /* empty */ } public static class PowerUsageSummaryActivity extends Settings { /* empty */ } public static class AccountSyncSettingsActivity extends Settings { /* empty */ } public static class AccountSyncSettingsInAddAccountActivity extends Settings { /* empty */ } public static class CryptKeeperSettingsActivity extends Settings { /* empty */ } public static class DeviceAdminSettingsActivity extends Settings { /* empty */ } public static class DataUsageSummaryActivity extends Settings { /* empty */ } public static class AdvancedWifiSettingsActivity extends Settings { /* empty */ } public static class TextToSpeechSettingsActivity extends Settings { /* empty */ } public static class AndroidBeamSettingsActivity extends Settings { /* empty */ } public static class WifiDisplaySettingsActivity extends Settings { /* empty */ } public static class AnonymousStatsActivity extends Settings { /* empty */ } public static class ApnSettingsActivity extends Settings { /* empty */ } public static class ApnEditorActivity extends Settings { /* empty */ } }
true
true
public View getView(int position, View convertView, ViewGroup parent) { HeaderViewHolder holder; Header header = getItem(position); int headerType = getHeaderType(header); View view = null; if (convertView == null) { holder = new HeaderViewHolder(); switch (headerType) { case HEADER_TYPE_CATEGORY: view = new TextView(getContext(), null, android.R.attr.listSeparatorTextViewStyle); holder.title = (TextView) view; break; case HEADER_TYPE_SWITCH: view = mInflater.inflate(R.layout.preference_header_switch_item, parent, false); holder.icon = (ImageView) view.findViewById(R.id.icon); holder.title = (TextView) view.findViewById(com.android.internal.R.id.title); holder.summary = (TextView) view.findViewById(com.android.internal.R.id.summary); holder.switch_ = (Switch) view.findViewById(R.id.switchWidget); break; case HEADER_TYPE_NORMAL: view = mInflater.inflate( R.layout.preference_header_item, parent, false); holder.icon = (ImageView) view.findViewById(R.id.icon); holder.title = (TextView) view.findViewById(com.android.internal.R.id.title); holder.summary = (TextView) view.findViewById(com.android.internal.R.id.summary); break; } view.setTag(holder); } else { view = convertView; holder = (HeaderViewHolder) view.getTag(); } // All view fields must be updated every time, because the view may be recycled switch (headerType) { case HEADER_TYPE_CATEGORY: holder.title.setText(header.getTitle(getContext().getResources())); break; case HEADER_TYPE_SWITCH: // Would need a different treatment if the main menu had more switches if (header.id == R.id.wifi_settings) { mWifiEnabler.setSwitch(holder.switch_); } else if (header.id == R.id.bluetooth_settings) { mBluetoothEnabler.setSwitch(holder.switch_); } else if (header.id == R.id.profiles_settings) { mProfileEnabler.setSwitch(holder.switch_); } // No break, fall through on purpose to update common fields //$FALL-THROUGH$ case HEADER_TYPE_NORMAL: if (header.extras != null && header.extras.containsKey(ManageAccountsSettings.KEY_ACCOUNT_TYPE)) { String accType = header.extras.getString( ManageAccountsSettings.KEY_ACCOUNT_TYPE); ViewGroup.LayoutParams lp = holder.icon.getLayoutParams(); lp.width = getContext().getResources().getDimensionPixelSize( R.dimen.header_icon_width); lp.height = lp.width; holder.icon.setLayoutParams(lp); Drawable icon = mAuthHelper.getDrawableForType(getContext(), accType); holder.icon.setImageDrawable(icon); } else { holder.icon.setImageResource(header.iconRes); } holder.title.setText(header.getTitle(getContext().getResources())); CharSequence summary = header.getSummary(getContext().getResources()); if (!TextUtils.isEmpty(summary)) { holder.summary.setVisibility(View.VISIBLE); holder.summary.setText(summary); } else { holder.summary.setVisibility(View.GONE); } break; } return view; }
public View getView(int position, View convertView, ViewGroup parent) { HeaderViewHolder holder; Header header = getItem(position); int headerType = getHeaderType(header); View view = null; if (convertView == null || headerType == HEADER_TYPE_SWITCH) { holder = new HeaderViewHolder(); switch (headerType) { case HEADER_TYPE_CATEGORY: view = new TextView(getContext(), null, android.R.attr.listSeparatorTextViewStyle); holder.title = (TextView) view; break; case HEADER_TYPE_SWITCH: view = mInflater.inflate(R.layout.preference_header_switch_item, parent, false); holder.icon = (ImageView) view.findViewById(R.id.icon); holder.title = (TextView) view.findViewById(com.android.internal.R.id.title); holder.summary = (TextView) view.findViewById(com.android.internal.R.id.summary); holder.switch_ = (Switch) view.findViewById(R.id.switchWidget); break; case HEADER_TYPE_NORMAL: view = mInflater.inflate( R.layout.preference_header_item, parent, false); holder.icon = (ImageView) view.findViewById(R.id.icon); holder.title = (TextView) view.findViewById(com.android.internal.R.id.title); holder.summary = (TextView) view.findViewById(com.android.internal.R.id.summary); break; } view.setTag(holder); } else { view = convertView; holder = (HeaderViewHolder) view.getTag(); } // All view fields must be updated every time, because the view may be recycled switch (headerType) { case HEADER_TYPE_CATEGORY: holder.title.setText(header.getTitle(getContext().getResources())); break; case HEADER_TYPE_SWITCH: // Would need a different treatment if the main menu had more switches if (header.id == R.id.wifi_settings) { mWifiEnabler.setSwitch(holder.switch_); } else if (header.id == R.id.bluetooth_settings) { mBluetoothEnabler.setSwitch(holder.switch_); } else if (header.id == R.id.profiles_settings) { mProfileEnabler.setSwitch(holder.switch_); } // No break, fall through on purpose to update common fields //$FALL-THROUGH$ case HEADER_TYPE_NORMAL: if (header.extras != null && header.extras.containsKey(ManageAccountsSettings.KEY_ACCOUNT_TYPE)) { String accType = header.extras.getString( ManageAccountsSettings.KEY_ACCOUNT_TYPE); ViewGroup.LayoutParams lp = holder.icon.getLayoutParams(); lp.width = getContext().getResources().getDimensionPixelSize( R.dimen.header_icon_width); lp.height = lp.width; holder.icon.setLayoutParams(lp); Drawable icon = mAuthHelper.getDrawableForType(getContext(), accType); holder.icon.setImageDrawable(icon); } else { holder.icon.setImageResource(header.iconRes); } holder.title.setText(header.getTitle(getContext().getResources())); CharSequence summary = header.getSummary(getContext().getResources()); if (!TextUtils.isEmpty(summary)) { holder.summary.setVisibility(View.VISIBLE); holder.summary.setText(summary); } else { holder.summary.setVisibility(View.GONE); } break; } return view; }
diff --git a/src/edu/vub/at/nfcpoker/ui/ClientActivity.java b/src/edu/vub/at/nfcpoker/ui/ClientActivity.java index 363770a..f8c5b5d 100644 --- a/src/edu/vub/at/nfcpoker/ui/ClientActivity.java +++ b/src/edu/vub/at/nfcpoker/ui/ClientActivity.java @@ -1,1470 +1,1470 @@ package edu.vub.at.nfcpoker.ui; import java.io.IOException; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Timer; import java.util.TimerTask; import java.util.UUID; import android.app.Activity; import android.app.AlertDialog; import android.app.Dialog; import android.app.PendingIntent; import android.app.ProgressDialog; import android.content.ActivityNotFoundException; import android.content.Context; import android.content.DialogInterface; import android.content.Intent; import android.content.IntentFilter; import android.content.IntentFilter.MalformedMimeTypeException; import android.content.pm.ActivityInfo; import android.hardware.Sensor; import android.hardware.SensorEvent; import android.hardware.SensorEventListener; import android.hardware.SensorManager; import android.media.AudioManager; import android.net.Uri; import android.nfc.NdefMessage; import android.nfc.NfcAdapter; import android.os.AsyncTask; import android.os.Bundle; import android.os.Vibrator; import android.speech.RecognizerIntent; import android.speech.tts.TextToSpeech; import android.text.InputType; import android.util.Log; import android.view.GestureDetector; import android.view.Gravity; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.MotionEvent; import android.view.View; import android.view.View.OnClickListener; import android.view.animation.AnimationUtils; import android.widget.Button; import android.widget.EditText; import android.widget.ImageButton; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.TextView; import android.widget.Toast; import com.esotericsoftware.kryonet.Client; import com.esotericsoftware.kryonet.Connection; import com.esotericsoftware.kryonet.Listener; import edu.vub.at.commlib.CommLib; import edu.vub.at.commlib.CommLibConnectionInfo; import edu.vub.at.nfcpoker.Card; import edu.vub.at.nfcpoker.Constants; import edu.vub.at.nfcpoker.PokerGameState; import edu.vub.at.nfcpoker.PlayerState; import edu.vub.at.nfcpoker.QRNFCFunctions; import edu.vub.at.nfcpoker.R; import edu.vub.at.nfcpoker.comm.GameServer; import edu.vub.at.nfcpoker.comm.Message.BigBlindMessage; import edu.vub.at.nfcpoker.comm.Message.CheatMessage; import edu.vub.at.nfcpoker.comm.Message.ClientAction; import edu.vub.at.nfcpoker.comm.Message.ClientActionMessage; import edu.vub.at.nfcpoker.comm.Message.ClientActionType; import edu.vub.at.nfcpoker.comm.Message.FutureMessage; import edu.vub.at.nfcpoker.comm.Message.SetClientParameterMessage; import edu.vub.at.nfcpoker.comm.Message.PoolMessage; import edu.vub.at.nfcpoker.comm.Message.ReceiveHoleCardsMessage; import edu.vub.at.nfcpoker.comm.Message.ReceivePublicCards; import edu.vub.at.nfcpoker.comm.Message.RequestClientActionFutureMessage; import edu.vub.at.nfcpoker.comm.Message.RoundWinnersDeclarationMessage; import edu.vub.at.nfcpoker.comm.Message.SetIDMessage; import edu.vub.at.nfcpoker.comm.Message.SmallBlindMessage; import edu.vub.at.nfcpoker.comm.Message.StateChangeMessage; import edu.vub.at.nfcpoker.settings.Settings; import edu.vub.at.nfcpoker.ui.tools.Levenshtein; import edu.vub.at.nfcpoker.ui.tools.PageProvider; import fi.harism.curl.CurlView; public class ClientActivity extends Activity implements OnClickListener { public class ConnectAsyncTask extends AsyncTask<Void, Void, Client> { private int port; private String address; private Listener listener; public ConnectAsyncTask(String address, int port, Listener listener) { this.address = address; this.port = port; this.listener = listener; } @Override protected void onPreExecute() { super.onPreExecute(); Log.v("wePoker - Client", "Connecting to "+address+" "+port); } @Override protected Client doInBackground(Void... params) { try { return CommLibConnectionInfo.connect(address, port, listener); } catch (IOException e) { Log.d("wePoker - Client", "Could not connect to server", e); } return null; } } // Game state private static int money = 2000; // Current money private int currentSelectedBet = 0; // Currently selected bet (in sync with visualisation) private int currentProcessedBet = 0; // Bet's forwarded to server private int minimumBet = 0; // Minimum bet private int totalBet = 0; // Total bet for this game // Server private static boolean isDedicated = false; private static boolean isServer = false; private static String serverIpAddress; private static int serverPort; private static String serverBroadcast; private static String serverWifiName; private static String serverWifiPassword; // Connectivity private static UUID pendingFuture; private static Connection serverConnection; private static int myClientID; // UI public static final int POKER_GREEN = 0xFF2C672E; private static int nextToReveal = 0; private static ReceiveHoleCardsMessage lastReceivedHoleCards; private CurlView mCardView1; private CurlView mCardView2; // Interactivity (Process dialog) private ProgressDialog barrier; // Interactivity (Incognito) public static boolean incognitoMode; private static final boolean useIncognitoMode = true; private static final boolean useIncognitoLight = false; private static final boolean useIncognitoProxmity = true; private long incognitoLight; private long incognitoProximity; private Timer incognitoDelay; private SensorManager sensorManager; // Interactivity(Fold&Gravity) private static final boolean useFoldOnGravity = true; private Timer foldDelay; private long foldProximity; private long foldGravity; // Interactivity(Gestures) private static final int SWIPE_MIN_DISTANCE = 120; private static final int SWIPE_MAX_OFF_PATH = 250; private static final int SWIPE_THRESHOLD_VELOCITY = 200; private GestureDetector gestureDetector; private View.OnTouchListener gestureListener; private int currentChipSwiped = 0; private boolean touchedCard = false; // Interactivity(Speech) private static final int RESULT_SPEECH = 1; // Interactivity(Audio) private static boolean audioFeedback = false; private TextToSpeech tts = null; private boolean ttsInitialised = false; // NFC private NfcAdapter nfcAdapter; private PendingIntent pendingIntent; private IntentFilter[] intentFiltersArray; // Help private static boolean firstSwipe = true; private Button bet; private Button check; private Button fold; private boolean allInEnabled; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); // Force portrait mode. Do this in code because Google TV does not like it in the manifest. setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT); // Connectivity serverIpAddress = getIntent().getStringExtra(Constants.INTENT_SERVER_IP); serverPort = getIntent().getIntExtra(Constants.INTENT_PORT, CommLib.SERVER_PORT); isDedicated = getIntent().getBooleanExtra(Constants.INTENT_IS_DEDICATED, true); isServer = getIntent().getBooleanExtra(Constants.INTENT_IS_SERVER, false); serverBroadcast = getIntent().getStringExtra(Constants.INTENT_BROADCAST); serverWifiName = getIntent().getStringExtra(Constants.INTENT_WIFI_NAME); serverWifiPassword = getIntent().getStringExtra(Constants.INTENT_WIFI_PASSWORD); // Configure the Client Interface if (isDedicated && !audioFeedback) { setContentView(R.layout.activity_client_is_dedicated); } else { setContentView(R.layout.activity_client); } // Start server on a client if required if (isServer) { GameServer cps = new GameServer( new DummServerView(), false, serverIpAddress, serverBroadcast); cps.start(); } // Interactivity incognitoMode = false; incognitoLight = 0; incognitoProximity = 0; incognitoDelay = null; foldProximity = 0; foldGravity = 0; foldDelay = null; sensorManager = (SensorManager)getSystemService(Context.SENSOR_SERVICE); tts = new TextToSpeech(this, txtToSpeechListener); checkHeadset(); // NFC nfcAdapter = NfcAdapter.getDefaultAdapter(this); if (nfcAdapter != null) { pendingIntent = PendingIntent.getActivity( this, 0, new Intent(this, getClass()).addFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP), 0); IntentFilter ndef = new IntentFilter(NfcAdapter.ACTION_NDEF_DISCOVERED); IntentFilter all = new IntentFilter(NfcAdapter.ACTION_TECH_DISCOVERED); try { // Handles all MIME based dispatches. You should specify only the ones that you need. ndef.addDataType("*/*"); } catch (MalformedMimeTypeException e) { throw new RuntimeException("fail", e); } intentFiltersArray = new IntentFilter[] { ndef, all }; // Broadcast NFC Beam try { // SDK API 14 - Method setNdefPushMessage = nfcAdapter.getClass().getMethod("setNdefPushMessage", new Class[] { NdefMessage.class, Activity.class }); + Method setNdefPushMessage = nfcAdapter.getClass().getMethod("setNdefPushMessage", new Class[] { NdefMessage.class, Activity.class, Activity[].class}); setNdefPushMessage.invoke(nfcAdapter, QRNFCFunctions.getServerInfoNdefMessage( serverWifiName, serverWifiPassword, - serverIpAddress, serverPort, isDedicated), this); + serverIpAddress, serverPort, isDedicated), this, null); } catch (Exception e) { e.printStackTrace(); } } // Gesture detection gestureDetector = new GestureDetector(this, new MyGestureDetector()); gestureListener = new View.OnTouchListener() { @Override public boolean onTouch(View arg0, MotionEvent arg1) { if (firstSwipe) { firstSwipe = false; quickOutputMessage(ClientActivity.this, "Swipe up or down to add or remove money"); } int viewSwiped = arg0.getId(); switch(viewSwiped) { case R.id.whitechip: currentChipSwiped = 5; break; case R.id.redchip: currentChipSwiped = 10; break; case R.id.greenchip: currentChipSwiped = 20; break; case R.id.bluechip: currentChipSwiped = 50; break; case R.id.blackchip: currentChipSwiped = 100; break; case R.id.pCard1: touchedCard = true; break; case R.id.pCard2: touchedCard = true; break; default: Log.v("wePoker - Client", "wrong view swipped" + viewSwiped); touchedCard = false; } ImageView chip = (ImageView) findViewById(viewSwiped); chip.startAnimation(AnimationUtils.loadAnimation(ClientActivity.this, R.anim.rotate_full)); return gestureDetector.onTouchEvent(arg1); } }; final ImageView whitechip = (ImageView) findViewById(R.id.whitechip); whitechip.setOnClickListener(ClientActivity.this); whitechip.setOnTouchListener(gestureListener); final ImageView redchip = (ImageView) findViewById(R.id.redchip); redchip.setOnClickListener(ClientActivity.this); redchip.setOnTouchListener(gestureListener); final ImageView greenchip = (ImageView) findViewById(R.id.greenchip); greenchip.setOnClickListener(ClientActivity.this); greenchip.setOnTouchListener(gestureListener); final ImageView bluechip = (ImageView) findViewById(R.id.bluechip); bluechip.setOnClickListener(ClientActivity.this); bluechip.setOnTouchListener(gestureListener); final ImageView blackchip = (ImageView) findViewById(R.id.blackchip); blackchip.setOnClickListener(ClientActivity.this); blackchip.setOnTouchListener(gestureListener); mCardView1 = (CurlView) findViewById(R.id.pCard1); mCardView1.setPageProvider(new PageProvider(this, new int[] { R.drawable.backside, R.drawable.backside })); mCardView1.setCurrentIndex(0); mCardView1.setBackgroundColor(POKER_GREEN); mCardView1.setAllowLastPageCurl(false); mCardView2 = (CurlView) findViewById(R.id.pCard2); mCardView2.setPageProvider(new PageProvider(this, new int[] { R.drawable.backside, R.drawable.backside })); mCardView2.setCurrentIndex(0); mCardView2.setBackgroundColor(POKER_GREEN); mCardView2.setAllowLastPageCurl(false); bet = (Button) findViewById(R.id.Bet); check = (Button) findViewById(R.id.Check); fold = (Button) findViewById(R.id.Fold); bet.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { performBet(); } }); check.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { performCheck(); } }); fold.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { performFold(); } }); currentSelectedBet = 0; currentProcessedBet = 0; minimumBet = 0; totalBet = 0; currentChipSwiped = 0; nextToReveal = 0; lastReceivedHoleCards = null; // Connect to the server new ConnectAsyncTask(serverIpAddress, serverPort, listener).execute(); showBarrier("Registering to server..."); // adding the hallo wePoker to the watch // clientGameState = ClientGameState.PLAYING; // Intent intent = new Intent(Control.Intents.CONTROL_START_REQUEST_INTENT); // intent.putExtra(Control.Intents.EXTRA_AEA_PACKAGE_NAME, this.getPackageName()); // intent.setPackage("com.sonyericsson.extras.smartwatch"); // sendBroadcast(intent, Registration.HOSTAPP_PERMISSION); // } protected void runOnNotUiThread(Runnable runnable) { new Thread(runnable).start(); } private void showBarrier(String cause) { if (barrier == null) { barrier = new ProgressDialog(ClientActivity.this); barrier.setTitle(cause); barrier.setCancelable(false); barrier.setMessage("Please wait"); barrier.show(); } else { barrier.setTitle(cause); } } private void hideBarrier() { if (barrier != null) { barrier.dismiss(); barrier = null; } } private void setServerConnection(Connection c) { serverConnection = c; } private void updateMoneyTitle() { if (totalBet > 0) { setTitle("wePoker (" +money+"\u20AC // "+(totalBet)+"\u20AC)"); } else { setTitle("wePoker (" +money+"\u20AC)"); } } private void toastSmallBlind(int amount) { quickOutputMessage(this, "Small blind for " + amount + " \u20AC"); disableActions(); } private void toastBigBlind(int amount) { quickOutputMessage(this, "Big blind for "+amount+" \u20AC"); disableActions(); } private void performBet() { if (!bet.isEnabled()) { quickOutputMessage(this, "Cannot bet or raise"); return; } if (currentSelectedBet < minimumBet) { quickOutputMessage(this, "At least bet "+minimumBet); return; } // TODO mimumbet en money check if setting currentSelectedBet if (money < currentSelectedBet) { quickOutputMessage(this, "Not enough money to place bet"); return; } final int diff = currentSelectedBet - currentProcessedBet; currentProcessedBet = currentSelectedBet; money -= diff; totalBet += diff; runOnNotUiThread(new Runnable() { public void run() { ClientAction ca = new ClientAction(ClientActionType.Bet, currentProcessedBet, diff); serverConnection.sendTCP(new FutureMessage(pendingFuture, ca)); } }); quickOutputMessage(this, "Bet "+currentProcessedBet); updateBetAmount(); updateMoneyTitle(); disableActions(); } private void performCheck() { if (!check.isEnabled()) { quickOutputMessage(this, "Cannot check or call"); return; } if (minimumBet >= currentProcessedBet + money) { performAllIn(); return; } final int diffMoney = minimumBet - currentProcessedBet; currentSelectedBet = minimumBet; currentProcessedBet = minimumBet; money -= diffMoney; totalBet += diffMoney; runOnNotUiThread(new Runnable() { public void run() { ClientAction ca = new ClientAction(ClientActionType.Check, currentProcessedBet, diffMoney); serverConnection.sendTCP(new FutureMessage(pendingFuture, ca)); } }); quickOutputMessage(this, "Following for "+currentProcessedBet); updateBetAmount(); updateMoneyTitle(); disableActions(); } private void performFold() { if (!fold.isEnabled()) { quickOutputMessage(this, "Cannot fold"); return; } runOnNotUiThread(new Runnable() { public void run() { ClientAction ca = new ClientAction(ClientActionType.Fold); serverConnection.sendTCP(new FutureMessage(pendingFuture, ca)); } }); quickOutputMessage(this, "Fold"); updateBetAmount(); updateMoneyTitle(); disableActions(); } // TODO: force all in if not enough money for blind / bet / ... private void performAllIn() { if (!allInEnabled) { quickOutputMessage(this, "Cannot perform all in"); return; } final int diffMoney = money - currentProcessedBet; currentSelectedBet = diffMoney; currentProcessedBet += diffMoney; money = 0; totalBet += diffMoney; runOnNotUiThread(new Runnable() { public void run() { ClientAction ca = new ClientAction(ClientActionType.AllIn, currentProcessedBet, diffMoney); serverConnection.sendTCP(new FutureMessage(pendingFuture, ca)); } }); quickOutputMessage(this, "All in for "+currentProcessedBet); updateBetAmount(); updateMoneyTitle(); disableActions(); } @SuppressWarnings("deprecation") private void checkHeadset() { AudioManager am = (AudioManager)getSystemService(AUDIO_SERVICE); Log.i("wePoker - Client", "Wifi headset: " + am.isWiredHeadsetOn()); audioFeedback = am.isWiredHeadsetOn(); } private void enableActions(final int round) { runOnUiThread(new Runnable() { @Override public void run() { if (round >= 1) { bet.setEnabled(false); if (minimumBet >= currentProcessedBet + money) { allInEnabled = true; } else { allInEnabled = false; } } else { bet.setEnabled(true); allInEnabled = true; } check.setEnabled(true); fold.setEnabled(true); // Interactivity(Fold&Gravity) - Reset foldProximity = 0; foldGravity = 0; // Interactivity(Vibrate) vibrate(VibrationType.Action); updateMoneyTitle(); updateCheckCallText(); } }); } private void updateCheckCallText() { if (minimumBet > 0) { if (minimumBet >= currentProcessedBet + money) { check.setText("All in"); } else if (minimumBet == currentProcessedBet) { check.setText("Check"); } else { check.setText("Call"); } } else { check.setText("Check"); } } private void disableActions() { runOnUiThread(new Runnable() { @Override public void run() { bet.setEnabled(false); check.setEnabled(false); fold.setEnabled(false); allInEnabled = false; updateMoneyTitle(); updateCheckCallText(); } }); } public void processStateChangeMessage(Connection c, Object m) { StateChangeMessage scm = (StateChangeMessage) m; PokerGameState newGameState = scm.newState; currentSelectedBet = 0; currentProcessedBet = 0; currentChipSwiped = 0; runOnUiThread(new Runnable() { public void run() { disableActions(); updateBetAmount(); updateMinBetAmount(0); checkHeadset(); }}); String toastToShow = null; switch (newGameState) { case STOPPED: Log.v("wePoker - Client", "Game state changed to STOPPED"); runOnUiThread(new Runnable() { public void run() { showBarrier("Waiting for server"); }}); break; case WAITING_FOR_PLAYERS: Log.v("wePoker - Client", "Game state changed to WAITING_FOR_PLAYERS"); runOnUiThread(new Runnable() { public void run() { showBarrier("Waiting for players"); hideCards(); }}); break; case PREFLOP: toastToShow = "Any preflop bet?"; Log.v("wePoker - Client", "Game state changed to PREFLOP"); runOnUiThread(new Runnable() { public void run() { hideBarrier(); if (lastReceivedHoleCards == null) { showBarrier("Waiting for next round"); } }}); break; case FLOP: toastToShow = "Flopping cards..."; Log.v("wePoker - Client", "Game state changed to FLOP"); break; case TURN: toastToShow = "Here is the turn"; Log.v("wePoker - Client", "Game state changed to TURN"); break; case RIVER: toastToShow = "River card visible"; Log.v("wePoker - Client", "Game state changed to RIVER"); break; case END_OF_ROUND: Log.v("wePoker - Client", "Game state changed to END_OF_ROUND"); currentSelectedBet = 0; currentProcessedBet = 0; totalBet = 0; currentChipSwiped = 0; nextToReveal = 0; lastReceivedHoleCards = null; runOnUiThread(new Runnable() { public void run() { updateMoneyTitle(); updateBetAmount(); updateMinBetAmount(0); }}); break; } if (toastToShow != null) { final String toastToShowFinal = toastToShow; runOnUiThread(new Runnable() { public void run() { quickOutputMessage(ClientActivity.this, toastToShowFinal); } }); } } Listener listener = new Listener() { @Override public void connected(Connection arg0) { super.connected(arg0); setServerConnection(arg0); Log.d("wePoker - Client","Connected to server!"); } @Override public void received(Connection c, Object m) { super.received(c, m); Log.v("wePoker - Client", "Received message " + m.toString()); if (m instanceof StateChangeMessage) { // Client view Log.v("wePoker - Client", "Procesing state message " + m.toString()); processStateChangeMessage(c, m); if (!isDedicated) { // Server view Log.v("wePoker - Client-Server", "Procesing state message " + m.toString()); final StateChangeMessage sm = (StateChangeMessage) m; if (sm.newState == PokerGameState.PREFLOP) { runOnUiThread(new Runnable() { @Override public void run() { serverHideCards(); }}); } } } if (m instanceof ReceivePublicCards) { ReceivePublicCards newPublicCards = (ReceivePublicCards) m; Log.v("wePoker - Client", "Received public cards: "); Card[] cards = newPublicCards.cards; for (int i = 0; i < cards.length; i++) { Log.v("wePoker - Client", cards[i].toString() + ", "); } if (!isDedicated) { // Server view Log.v("wePoker - Client-Server", "Procesing state message " + m.toString()); final ReceivePublicCards rc = (ReceivePublicCards) m; runOnUiThread(new Runnable() { @Override public void run() { serverRevealCards(rc.cards); }}); } } if (m instanceof ReceiveHoleCardsMessage) { final ReceiveHoleCardsMessage newHoleCards = (ReceiveHoleCardsMessage) m; Log.v("wePoker - Client", "Received hand cards: " + newHoleCards.toString()); lastReceivedHoleCards = newHoleCards; ClientActivity.this.runOnUiThread(new Runnable() { @Override public void run() { updateHandGui(newHoleCards); } }); } if (m instanceof ClientActionMessage) { final ClientActionMessage newClientActionMessage = (ClientActionMessage) m; final ClientAction action = newClientActionMessage.getClientAction(); Log.v("wePoker - Client", "Received client action message" + newClientActionMessage.toString()); final int amount = action.roundMoney; if (amount > minimumBet) { runOnUiThread(new Runnable() { public void run() { updateMinBetAmount(amount); } }); } } if (m instanceof RequestClientActionFutureMessage) { final RequestClientActionFutureMessage rcafm = (RequestClientActionFutureMessage) m; pendingFuture = rcafm.futureId; Log.d("wePoker - Client", "Pending future: " + pendingFuture); runOnUiThread(new Runnable() { public void run() { enableActions(rcafm.round); }}); } if (m instanceof PoolMessage) { final PoolMessage pm = (PoolMessage) m; runOnUiThread(new Runnable() { @Override public void run() { serverUpdatePoolMoney(pm.poolMoney); }}); } if (m instanceof SmallBlindMessage) { final SmallBlindMessage sbm = (SmallBlindMessage) m; if (sbm.clientId == myClientID) { currentProcessedBet = sbm.amount; runOnUiThread(new Runnable() { public void run() { toastSmallBlind(sbm.amount); } }); } if (sbm.amount > minimumBet) { runOnUiThread(new Runnable() { public void run() { updateMinBetAmount(sbm.amount); } }); } } if (m instanceof BigBlindMessage) { final BigBlindMessage bbm = (BigBlindMessage) m; if (bbm.clientId == myClientID) { currentProcessedBet = bbm.amount; runOnUiThread(new Runnable() { public void run() { toastBigBlind(bbm.amount); } }); } if (bbm.amount > minimumBet) { runOnUiThread(new Runnable() { public void run() { updateMinBetAmount(bbm.amount); } }); } } if (m instanceof SetIDMessage) { final SetIDMessage sidm = (SetIDMessage) m; myClientID = sidm.id; SetClientParameterMessage pm = new SetClientParameterMessage(Settings.nickname, Settings.avatar, money); serverConnection.sendTCP(pm); } if (m instanceof RoundWinnersDeclarationMessage) { final RoundWinnersDeclarationMessage rwdm = (RoundWinnersDeclarationMessage) m; final List<PlayerState> players = rwdm.bestPlayers; boolean iWon = false; for (PlayerState player : players) { if (player.clientId == myClientID) { iWon = true; break; } } if (iWon) { money += rwdm.chips / players.size(); runOnUiThread(new Runnable() { public void run() { updateMoneyTitle(); quickOutputMessage(ClientActivity.this, "Congratulations, you won!!"); vibrate(VibrationType.Win); }}); } else { runOnUiThread(new Runnable() { public void run() { quickOutputMessage(ClientActivity.this, "You lost..."); quickOutputMessage(ClientActivity.this, rwdm.winMessageString()); vibrate(VibrationType.Lose); if (rwdm.showCards) { if (rwdm.bestPlayers.size() == 1) { // Show the winning cards of the player showWinningCards(rwdm.bestPlayers.iterator().next().gameHoleCards, rwdm.winMessageString()); } else { // Show the best hand showWinningCards(rwdm.bestHand.cards, rwdm.winMessageString()); } } }}); } } } }; private void updateHandGui(ReceiveHoleCardsMessage cards) { int id1 = getResources().getIdentifier("edu.vub.at.nfcpoker:drawable/" + cards.card1.toString(), null, null); int[] bitmapIds1 = new int[] { R.drawable.backside, id1 }; mCardView1.setPageProvider(new PageProvider(this, bitmapIds1)); String vMsg1 = cards.card1.toString().replace("_", " "); mCardView1.setContentDescription(vMsg1); speakMessage(this, vMsg1); int id2 = getResources().getIdentifier("edu.vub.at.nfcpoker:drawable/" + cards.card2.toString(), null, null); int[] bitmapIds2 = new int[] { R.drawable.backside, id2 }; mCardView2.setPageProvider(new PageProvider(this, bitmapIds2)); String vMsg2 = cards.card2.toString().replace("_", " "); mCardView2.setContentDescription(vMsg2); speakMessage(this, vMsg2); updateMoneyTitle(); } @Override protected void onResume() { if (useIncognitoMode) { incognitoMode = false; incognitoLight = -1; incognitoProximity = -1; incognitoDelay = new Timer(); if (useIncognitoLight) { Sensor lightSensor = sensorManager.getDefaultSensor(Sensor.TYPE_LIGHT); if (lightSensor != null) { sensorManager.registerListener(incognitoSensorEventListener, lightSensor, SensorManager.SENSOR_DELAY_NORMAL); incognitoLight = 0; } } if (useIncognitoProxmity) { Sensor proximitySensor = sensorManager.getDefaultSensor(Sensor.TYPE_PROXIMITY); if (proximitySensor != null) { sensorManager.registerListener(incognitoSensorEventListener, proximitySensor, SensorManager.SENSOR_DELAY_NORMAL); incognitoProximity = 0; } } } if (useFoldOnGravity) { Sensor gravitySensor = sensorManager.getDefaultSensor(Sensor.TYPE_GRAVITY); if (gravitySensor != null) { sensorManager.registerListener(foldGravitySensorEventListener, gravitySensor, SensorManager.SENSOR_DELAY_NORMAL); } Sensor proximitySensor = sensorManager.getDefaultSensor(Sensor.TYPE_PROXIMITY); if (proximitySensor != null) { sensorManager.registerListener(foldGravitySensorEventListener, proximitySensor, SensorManager.SENSOR_DELAY_NORMAL); } } mCardView1.onResume(); mCardView2.onResume(); if (nfcAdapter != null) { nfcAdapter.enableForegroundDispatch(this, pendingIntent, intentFiltersArray, null); } super.onResume(); } @Override protected void onPause() { sensorManager.unregisterListener(incognitoSensorEventListener); sensorManager.unregisterListener(foldGravitySensorEventListener); mCardView1.onPause(); mCardView2.onPause(); if (nfcAdapter != null) { nfcAdapter.disableForegroundDispatch(this); } super.onPause(); } @Override public void onStop() { super.onStop(); Settings.saveSettings(this); } @Override public void onDestroy() { if (tts != null) { tts.stop(); tts.shutdown(); tts = null; ttsInitialised = false; } super.onDestroy(); } @Override public boolean onCreateOptionsMenu(Menu menu) { MenuInflater inflater = getMenuInflater(); if (isDedicated) { inflater.inflate(R.menu.activity_client_dedicated, menu); } else { inflater.inflate(R.menu.activity_client, menu); } return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { // Handle item selection switch (item.getItemId()) { case R.id.speech: askSpeechInput(); return true; case R.id.show_wifi_settings: showQrCode(); return true; case R.id.allIn: performAllIn(); return true; case R.id.itemSetName: askNickName(); return true; case R.id.itemAddMoney: askAddMoney(); return true; case R.id.itemAbout: launchMainWebsite(); return true; default: return super.onOptionsItemSelected(item); } } @Override public void onNewIntent(Intent intent) { QRNFCFunctions.lastSeenNFCTag = intent.getParcelableExtra(NfcAdapter.EXTRA_TAG); } TextToSpeech.OnInitListener txtToSpeechListener = new TextToSpeech.OnInitListener() { @Override public void onInit(int status) { ttsInitialised = true; if (status == TextToSpeech.SUCCESS) { int result = tts.setLanguage(Locale.US); if (result == TextToSpeech.LANG_MISSING_DATA || result == TextToSpeech.LANG_NOT_SUPPORTED) { Log.e("wePoker - TTS", "This Language is not supported"); tts = null; } } else { Log.e("wePoker - TTS", "Initilization Failed!"); tts = null; } } }; private enum VibrationType { Short, Action, Win, Lose } private static final int dot = 200; // Length of a Morse Code "dot" in milliseconds private static final int dash = 500; // Length of a Morse Code "dash" in milliseconds private static final int short_gap = 200; // Length of Gap Between dots/dashes @SuppressWarnings("unused") private static final int medium_gap = 500; // Length of Gap Between Letters @SuppressWarnings("unused") private static final int long_gap = 1000; // Length of Gap Between Words private void vibrate(VibrationType buzzType) { long[] pattern; switch (buzzType) { default: case Short: pattern = new long[]{ 0 }; break; case Action: pattern = new long[]{ 0, dot, dash }; // Requires action break; case Win: pattern = new long[]{ 0, dot, dash, dot, short_gap, dash }; // Win break; case Lose: pattern = new long[]{ 0, dot, dot, dot }; // Lose break; } Vibrator v = (Vibrator) getSystemService(Context.VIBRATOR_SERVICE); if (v == null) return; v.vibrate(pattern, -1); } private static void quickOutputMessage(ClientActivity ca, String msg) { Toast t = Toast.makeText(ca, msg, Toast.LENGTH_SHORT); t.setGravity(Gravity.CENTER_VERTICAL|Gravity.CENTER_HORIZONTAL, 0, 0); t.show(); speakMessage(ca, msg); } private static void speakMessage(ClientActivity ca, String msg) { if (!ClientActivity.audioFeedback) return; if (ca.tts == null) return; if (!ca.ttsInitialised) return; ca.tts.speak(msg, TextToSpeech.QUEUE_FLUSH, null); } private void askSpeechInput() { if (!fold.isEnabled()) return; Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, "en-US"); try { startActivityForResult(intent, RESULT_SPEECH); } catch (ActivityNotFoundException a) { quickOutputMessage(this, "Oops! Your device doesn't support Speech to Text"); } } private void showQrCode() { QRNFCFunctions.showWifiConnectionDialog(this, serverWifiName, serverWifiPassword, serverIpAddress, serverPort, true); } private int txtToInteger(String msg) { try { return Integer.parseInt(msg); } catch(Exception e) { return -1; } } @Override protected void onActivityResult(int requestCode, int resultCode, Intent data) { super.onActivityResult(requestCode, resultCode, data); switch (requestCode) { case RESULT_SPEECH: { if (resultCode == RESULT_OK && null != data) { ArrayList<String> candidates = data.getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS); Log.d("wePoker - Text2Speech", "Got: " + candidates); double bestScore = 1; int bestActionI = -1; int amount = 0; for (int i = 0; i < candidates.size(); i++) { String candidate[] = candidates.get(i).split(" "); if (candidate.length <= 0) return; if (candidate.length > 3) return; String msg = candidate[0]; for (int j = 1; j < candidate.length; j++) { amount = txtToInteger(candidate[j]); if (amount >= 0) { break; } else { msg += " " + candidate[j]; } } String actions[] = { "bet", "check", "call", "fold", "all in" }; for (int j = 0; j < actions.length; j++) { double score = Levenshtein.ratioScore(actions[j], msg); if (score < 0.4 && score < bestScore) { // 40% of the chars do not match bestScore = score; bestActionI = j; } } } switch (bestActionI) { case 0: // Bet currentSelectedBet = amount; performBet(); break; case 1: // Check case 2: // Call performCheck(); break; case 3: // Fold performFold(); break; case 4: // All in performAllIn(); break; default: Log.d("wePoker - Text2Speech", "No action found"); quickOutputMessage(this, "No command recognised."); break; } } break; } } } private void askNickName() { final Context ctx = this; final Dialog moneyDialog; AlertDialog.Builder builder = new AlertDialog.Builder(this); final EditText input = new EditText(this); input.setInputType(InputType.TYPE_CLASS_TEXT); input.setText(Settings.nickname); builder.setView(input); builder.setPositiveButton("Set", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface di, int arg1) { try { Settings.nickname = input.getText().toString(); Settings.saveSettings(ctx); SetClientParameterMessage ca = new SetClientParameterMessage(Settings.nickname, Settings.avatar, money); serverConnection.sendTCP(new FutureMessage(pendingFuture, ca)); } catch (Exception e) { } } }); builder.setCancelable(true); moneyDialog = builder.create(); moneyDialog.show(); } private void askAddMoney() { final Dialog moneyDialog; AlertDialog.Builder builder = new AlertDialog.Builder(this); final EditText input = new EditText(this); input.setInputType(InputType.TYPE_CLASS_NUMBER | InputType.TYPE_NUMBER_FLAG_SIGNED); builder.setView(input); builder.setPositiveButton("Add Chips", new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface di, int arg1) { try { int extra = Integer.parseInt(input.getText().toString()); money += extra; // TODO Server: User X added #{extra} CheatMessage ca = new CheatMessage(extra); serverConnection.sendTCP(new FutureMessage(pendingFuture, ca)); } catch (Exception e) { } } }); builder.setCancelable(true); moneyDialog = builder.create(); moneyDialog.show(); } private void launchMainWebsite() { try { Intent intent = new Intent(Intent.ACTION_VIEW); intent.setData(Uri.parse(Splash.WEPOKER_WEBSITE)); startActivity(intent); } catch (Exception e) { e.printStackTrace(); } } enum ToastBetAmount { Positive, Negative, OutOfMoney, MinimumBet }; // Game private void updateBetAmount(int value) { ToastBetAmount toast = ToastBetAmount.Positive; if (value < 0) toast = ToastBetAmount.Negative; currentSelectedBet += value; if (minimumBet > currentSelectedBet) { currentSelectedBet = minimumBet; toast = ToastBetAmount.MinimumBet; } if (currentSelectedBet > money) { currentSelectedBet = money; toast = ToastBetAmount.OutOfMoney; } switch (toast) { case Positive: case Negative: break; case MinimumBet: quickOutputMessage(ClientActivity.this, "Minimum bet required"); break; case OutOfMoney: quickOutputMessage(ClientActivity.this, "Out of money !!"); break; } updateMoneyTitle(); updateBetAmount(); updateCheckCallText(); } private void updateBetAmount() { final TextView currentBet = (TextView) findViewById(R.id.currentBet); currentBet.setText(" \u20AC" + currentSelectedBet + " (\u20AC"+currentProcessedBet+")"); updateCheckCallText(); } private void updateMinBetAmount(int value) { minimumBet = value; final TextView textCurrentBet = (TextView) findViewById(R.id.minBet); textCurrentBet.setText(" " + minimumBet); updateMoneyTitle(); updateCheckCallText(); } // Interactivity SensorEventListener foldGravitySensorEventListener = new SensorEventListener() { @Override public void onSensorChanged(SensorEvent event) { if (event.sensor.getType()==Sensor.TYPE_GRAVITY) { // final float g = SensorManager.GRAVITY_EARTH; // Log.d("wePoker - foldGravitySensorEventListener", String.format("g_vec: (%f,%f,%f)", event.values[0], event.values[1], event.values[2])); float dx = event.values[2]; if (dx < -9) { if (foldGravity == 0) foldGravity = System.currentTimeMillis(); Log.d("wePoker - foldGravitySensorEventListener", "Phone on its back"); } else { foldGravity = 0; } } if (event.sensor.getType()==Sensor.TYPE_PROXIMITY) { float currentReading = event.values[0]; if (currentReading < 1) { if (foldProximity == 0) foldProximity = System.currentTimeMillis(); Log.d("wePoker - foldGravitySensorEventListener", "I found a table!" + currentReading); } else { foldProximity = 0; Log.d("wePoker - foldGravitySensorEventListener", "All clear!" + currentReading); } } if ((foldProximity != 0) && (foldGravity != 0)) { if (foldDelay != null) return; foldDelay = new Timer(); foldDelay.schedule(new TimerTask() { public void run() { runOnUiThread(new Runnable() { @Override public void run() { Log.d("wePoker - foldGravitySensorEventListener", "Folding!"); performFold(); } }); }}, 750); } else { if (foldDelay != null) { foldDelay.cancel(); foldDelay = null; } } } @Override public void onAccuracyChanged(Sensor arg0, int arg1) { } }; // Interactivity SensorEventListener incognitoSensorEventListener = new SensorEventListener() { @Override public void onSensorChanged(SensorEvent event) { if (event.sensor.getType()==Sensor.TYPE_LIGHT) { float currentReading = event.values[0]; if (currentReading < 10) { if (incognitoLight == 0) incognitoLight = System.currentTimeMillis(); Log.d("wePoker - incognitoSensorEventListener", "It's dark!" + currentReading); } else { incognitoLight = 0; Log.d("wePoker - incognitoSensorEventListener", "It's bright!" + currentReading); } } if (event.sensor.getType()==Sensor.TYPE_PROXIMITY) { float currentReading = event.values[0]; if (currentReading < 1) { if (incognitoProximity == 0) incognitoProximity = System.currentTimeMillis(); Log.d("wePoker - incognitoSensorEventListener", "I found a hand!" + currentReading); } else { incognitoProximity = 0; Log.d("wePoker - incognitoSensorEventListener", "All clear!" + currentReading); } } if ((incognitoLight != 0) && (incognitoProximity != 0)) { if (!incognitoMode) { incognitoMode = true; incognitoDelay = new Timer(); incognitoDelay.schedule(new TimerTask() { public void run() { runOnUiThread(new Runnable() { @Override public void run() { Log.d("wePoker - incognitoSensorEventListener", "Showing cards!"); showCards(); } }); }}, 750); } } else { if (incognitoDelay != null) { incognitoDelay.cancel(); incognitoDelay = null; } if (incognitoMode) { incognitoMode = false; runOnUiThread(new Runnable() { @Override public void run() { hideCards(); } }); } } } @Override public void onAccuracyChanged(Sensor sensor, int accuracy) { } }; // UI private void showCards() { mCardView1.setCurrentIndex(1); mCardView2.setCurrentIndex(1); } private void showWinningCards(Card[] cards, String winMessage) { if (cards == null) return; int id1 = getResources().getIdentifier("edu.vub.at.nfcpoker:drawable/" + cards[0].toString(), null, null); int[] bitmapIds1 = new int[] { R.drawable.backside, id1 }; mCardView1.setPageProvider(new PageProvider(this, bitmapIds1)); mCardView1.setContentDescription(cards[0].toString().replace("_", " ")); int id2 = getResources().getIdentifier("edu.vub.at.nfcpoker:drawable/" + cards[1].toString(), null, null); int[] bitmapIds2 = new int[] { R.drawable.backside, id2 }; mCardView2.setPageProvider(new PageProvider(this, bitmapIds2)); mCardView2.setContentDescription(cards[1].toString().replace("_", " ")); showCards(); } private void hideCards() { mCardView1.setCurrentIndex(0); mCardView2.setCurrentIndex(0); } public void resetCards() { Log.d("wePoker - Server", "Hiding cards again"); nextToReveal = 0; runOnUiThread(new Runnable() { public void run() { LinearLayout ll = (LinearLayout) findViewById(R.id.cards); for (int i = 0; i < 5; i++) { final ImageButton ib = (ImageButton) ll.getChildAt(i); CardAnimation.setCardImage(ib, R.drawable.backside); } } }); } @Override public void onClick(View v) { // Filter f = (Filter) v.getTag(); // FilterFullscreenActivity.show(this, input, f); } class MyGestureDetector extends GestureDetector.SimpleOnGestureListener { @Override public boolean onDoubleTap(MotionEvent e) { if (touchedCard) { // Double tap on cards means check touchedCard = false; performCheck(); } return true; } @Override public boolean onFling(MotionEvent e1, MotionEvent e2, float velocityX, float velocityY) { try { if (Math.abs(e1.getX() - e2.getX()) > SWIPE_MAX_OFF_PATH) return false; // right to left swipe if(e1.getY() - e2.getY() > SWIPE_MIN_DISTANCE && Math.abs(velocityY) > SWIPE_THRESHOLD_VELOCITY) { updateBetAmount(currentChipSwiped); } else if (e2.getY() - e1.getY() > SWIPE_MIN_DISTANCE && Math.abs(velocityY) > SWIPE_THRESHOLD_VELOCITY) { updateBetAmount(-currentChipSwiped); } } catch (Exception e) { // nothing } return false; } } private void serverUpdatePoolMoney(int poolMoney) { if (isDedicated) return; final TextView textPool = (TextView) findViewById(R.id.pool); if (textPool == null) return; textPool.setText(" " + poolMoney); } private int cardToResourceID(Card c) { return getResources().getIdentifier("edu.vub.at.nfcpoker:drawable/" + c.toString(), null, null); } public void serverHideCards() { if (isDedicated) return; final ImageView card1 = (ImageView) findViewById(R.id.card1); card1.setImageResource(R.drawable.backside); card1.setContentDescription("No card yet"); final ImageView card2 = (ImageView) findViewById(R.id.card2); card2.setImageResource(R.drawable.backside); card2.setContentDescription("No card yet"); final ImageView card3 = (ImageView) findViewById(R.id.card3); card3.setImageResource(R.drawable.backside); card3.setContentDescription("No card yet"); final ImageView card4 = (ImageView) findViewById(R.id.card4); card4.setImageResource(R.drawable.backside); card4.setContentDescription("No card yet"); final ImageView card5 = (ImageView) findViewById(R.id.card5); card5.setImageResource(R.drawable.backside); card5.setContentDescription("No card yet"); } public void serverRevealCards(final Card[] cards) { if (cards.length > 1) { speakMessage(this, "Revealing card"); } else { speakMessage(this, "Revealing cards"); } for (Card c : cards) { Log.d("wePoker - Client-Server", "Revealing card " + c); LinearLayout ll = (LinearLayout) findViewById(R.id.cards); ImageButton ib = (ImageButton) ll.getChildAt(nextToReveal++); CardAnimation.setCardImage(ib, cardToResourceID(c)); String vMsg = c.toString().replace("_", " "); ib.setContentDescription(vMsg); speakMessage(this, vMsg); } } }
false
true
public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); // Force portrait mode. Do this in code because Google TV does not like it in the manifest. setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT); // Connectivity serverIpAddress = getIntent().getStringExtra(Constants.INTENT_SERVER_IP); serverPort = getIntent().getIntExtra(Constants.INTENT_PORT, CommLib.SERVER_PORT); isDedicated = getIntent().getBooleanExtra(Constants.INTENT_IS_DEDICATED, true); isServer = getIntent().getBooleanExtra(Constants.INTENT_IS_SERVER, false); serverBroadcast = getIntent().getStringExtra(Constants.INTENT_BROADCAST); serverWifiName = getIntent().getStringExtra(Constants.INTENT_WIFI_NAME); serverWifiPassword = getIntent().getStringExtra(Constants.INTENT_WIFI_PASSWORD); // Configure the Client Interface if (isDedicated && !audioFeedback) { setContentView(R.layout.activity_client_is_dedicated); } else { setContentView(R.layout.activity_client); } // Start server on a client if required if (isServer) { GameServer cps = new GameServer( new DummServerView(), false, serverIpAddress, serverBroadcast); cps.start(); } // Interactivity incognitoMode = false; incognitoLight = 0; incognitoProximity = 0; incognitoDelay = null; foldProximity = 0; foldGravity = 0; foldDelay = null; sensorManager = (SensorManager)getSystemService(Context.SENSOR_SERVICE); tts = new TextToSpeech(this, txtToSpeechListener); checkHeadset(); // NFC nfcAdapter = NfcAdapter.getDefaultAdapter(this); if (nfcAdapter != null) { pendingIntent = PendingIntent.getActivity( this, 0, new Intent(this, getClass()).addFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP), 0); IntentFilter ndef = new IntentFilter(NfcAdapter.ACTION_NDEF_DISCOVERED); IntentFilter all = new IntentFilter(NfcAdapter.ACTION_TECH_DISCOVERED); try { // Handles all MIME based dispatches. You should specify only the ones that you need. ndef.addDataType("*/*"); } catch (MalformedMimeTypeException e) { throw new RuntimeException("fail", e); } intentFiltersArray = new IntentFilter[] { ndef, all }; // Broadcast NFC Beam try { // SDK API 14 Method setNdefPushMessage = nfcAdapter.getClass().getMethod("setNdefPushMessage", new Class[] { NdefMessage.class, Activity.class }); setNdefPushMessage.invoke(nfcAdapter, QRNFCFunctions.getServerInfoNdefMessage( serverWifiName, serverWifiPassword, serverIpAddress, serverPort, isDedicated), this); } catch (Exception e) { e.printStackTrace(); } } // Gesture detection gestureDetector = new GestureDetector(this, new MyGestureDetector()); gestureListener = new View.OnTouchListener() { @Override public boolean onTouch(View arg0, MotionEvent arg1) { if (firstSwipe) { firstSwipe = false; quickOutputMessage(ClientActivity.this, "Swipe up or down to add or remove money"); } int viewSwiped = arg0.getId(); switch(viewSwiped) { case R.id.whitechip: currentChipSwiped = 5; break; case R.id.redchip: currentChipSwiped = 10; break; case R.id.greenchip: currentChipSwiped = 20; break; case R.id.bluechip: currentChipSwiped = 50; break; case R.id.blackchip: currentChipSwiped = 100; break; case R.id.pCard1: touchedCard = true; break; case R.id.pCard2: touchedCard = true; break; default: Log.v("wePoker - Client", "wrong view swipped" + viewSwiped); touchedCard = false; } ImageView chip = (ImageView) findViewById(viewSwiped); chip.startAnimation(AnimationUtils.loadAnimation(ClientActivity.this, R.anim.rotate_full)); return gestureDetector.onTouchEvent(arg1); } }; final ImageView whitechip = (ImageView) findViewById(R.id.whitechip); whitechip.setOnClickListener(ClientActivity.this); whitechip.setOnTouchListener(gestureListener); final ImageView redchip = (ImageView) findViewById(R.id.redchip); redchip.setOnClickListener(ClientActivity.this); redchip.setOnTouchListener(gestureListener); final ImageView greenchip = (ImageView) findViewById(R.id.greenchip); greenchip.setOnClickListener(ClientActivity.this); greenchip.setOnTouchListener(gestureListener); final ImageView bluechip = (ImageView) findViewById(R.id.bluechip); bluechip.setOnClickListener(ClientActivity.this); bluechip.setOnTouchListener(gestureListener); final ImageView blackchip = (ImageView) findViewById(R.id.blackchip); blackchip.setOnClickListener(ClientActivity.this); blackchip.setOnTouchListener(gestureListener); mCardView1 = (CurlView) findViewById(R.id.pCard1); mCardView1.setPageProvider(new PageProvider(this, new int[] { R.drawable.backside, R.drawable.backside })); mCardView1.setCurrentIndex(0); mCardView1.setBackgroundColor(POKER_GREEN); mCardView1.setAllowLastPageCurl(false); mCardView2 = (CurlView) findViewById(R.id.pCard2); mCardView2.setPageProvider(new PageProvider(this, new int[] { R.drawable.backside, R.drawable.backside })); mCardView2.setCurrentIndex(0); mCardView2.setBackgroundColor(POKER_GREEN); mCardView2.setAllowLastPageCurl(false); bet = (Button) findViewById(R.id.Bet); check = (Button) findViewById(R.id.Check); fold = (Button) findViewById(R.id.Fold); bet.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { performBet(); } }); check.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { performCheck(); } }); fold.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { performFold(); } }); currentSelectedBet = 0; currentProcessedBet = 0; minimumBet = 0; totalBet = 0; currentChipSwiped = 0; nextToReveal = 0; lastReceivedHoleCards = null; // Connect to the server new ConnectAsyncTask(serverIpAddress, serverPort, listener).execute(); showBarrier("Registering to server..."); // adding the hallo wePoker to the watch // clientGameState = ClientGameState.PLAYING; // Intent intent = new Intent(Control.Intents.CONTROL_START_REQUEST_INTENT); // intent.putExtra(Control.Intents.EXTRA_AEA_PACKAGE_NAME, this.getPackageName()); // intent.setPackage("com.sonyericsson.extras.smartwatch"); // sendBroadcast(intent, Registration.HOSTAPP_PERMISSION); // }
public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); // Force portrait mode. Do this in code because Google TV does not like it in the manifest. setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT); // Connectivity serverIpAddress = getIntent().getStringExtra(Constants.INTENT_SERVER_IP); serverPort = getIntent().getIntExtra(Constants.INTENT_PORT, CommLib.SERVER_PORT); isDedicated = getIntent().getBooleanExtra(Constants.INTENT_IS_DEDICATED, true); isServer = getIntent().getBooleanExtra(Constants.INTENT_IS_SERVER, false); serverBroadcast = getIntent().getStringExtra(Constants.INTENT_BROADCAST); serverWifiName = getIntent().getStringExtra(Constants.INTENT_WIFI_NAME); serverWifiPassword = getIntent().getStringExtra(Constants.INTENT_WIFI_PASSWORD); // Configure the Client Interface if (isDedicated && !audioFeedback) { setContentView(R.layout.activity_client_is_dedicated); } else { setContentView(R.layout.activity_client); } // Start server on a client if required if (isServer) { GameServer cps = new GameServer( new DummServerView(), false, serverIpAddress, serverBroadcast); cps.start(); } // Interactivity incognitoMode = false; incognitoLight = 0; incognitoProximity = 0; incognitoDelay = null; foldProximity = 0; foldGravity = 0; foldDelay = null; sensorManager = (SensorManager)getSystemService(Context.SENSOR_SERVICE); tts = new TextToSpeech(this, txtToSpeechListener); checkHeadset(); // NFC nfcAdapter = NfcAdapter.getDefaultAdapter(this); if (nfcAdapter != null) { pendingIntent = PendingIntent.getActivity( this, 0, new Intent(this, getClass()).addFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP), 0); IntentFilter ndef = new IntentFilter(NfcAdapter.ACTION_NDEF_DISCOVERED); IntentFilter all = new IntentFilter(NfcAdapter.ACTION_TECH_DISCOVERED); try { // Handles all MIME based dispatches. You should specify only the ones that you need. ndef.addDataType("*/*"); } catch (MalformedMimeTypeException e) { throw new RuntimeException("fail", e); } intentFiltersArray = new IntentFilter[] { ndef, all }; // Broadcast NFC Beam try { // SDK API 14 Method setNdefPushMessage = nfcAdapter.getClass().getMethod("setNdefPushMessage", new Class[] { NdefMessage.class, Activity.class, Activity[].class}); setNdefPushMessage.invoke(nfcAdapter, QRNFCFunctions.getServerInfoNdefMessage( serverWifiName, serverWifiPassword, serverIpAddress, serverPort, isDedicated), this, null); } catch (Exception e) { e.printStackTrace(); } } // Gesture detection gestureDetector = new GestureDetector(this, new MyGestureDetector()); gestureListener = new View.OnTouchListener() { @Override public boolean onTouch(View arg0, MotionEvent arg1) { if (firstSwipe) { firstSwipe = false; quickOutputMessage(ClientActivity.this, "Swipe up or down to add or remove money"); } int viewSwiped = arg0.getId(); switch(viewSwiped) { case R.id.whitechip: currentChipSwiped = 5; break; case R.id.redchip: currentChipSwiped = 10; break; case R.id.greenchip: currentChipSwiped = 20; break; case R.id.bluechip: currentChipSwiped = 50; break; case R.id.blackchip: currentChipSwiped = 100; break; case R.id.pCard1: touchedCard = true; break; case R.id.pCard2: touchedCard = true; break; default: Log.v("wePoker - Client", "wrong view swipped" + viewSwiped); touchedCard = false; } ImageView chip = (ImageView) findViewById(viewSwiped); chip.startAnimation(AnimationUtils.loadAnimation(ClientActivity.this, R.anim.rotate_full)); return gestureDetector.onTouchEvent(arg1); } }; final ImageView whitechip = (ImageView) findViewById(R.id.whitechip); whitechip.setOnClickListener(ClientActivity.this); whitechip.setOnTouchListener(gestureListener); final ImageView redchip = (ImageView) findViewById(R.id.redchip); redchip.setOnClickListener(ClientActivity.this); redchip.setOnTouchListener(gestureListener); final ImageView greenchip = (ImageView) findViewById(R.id.greenchip); greenchip.setOnClickListener(ClientActivity.this); greenchip.setOnTouchListener(gestureListener); final ImageView bluechip = (ImageView) findViewById(R.id.bluechip); bluechip.setOnClickListener(ClientActivity.this); bluechip.setOnTouchListener(gestureListener); final ImageView blackchip = (ImageView) findViewById(R.id.blackchip); blackchip.setOnClickListener(ClientActivity.this); blackchip.setOnTouchListener(gestureListener); mCardView1 = (CurlView) findViewById(R.id.pCard1); mCardView1.setPageProvider(new PageProvider(this, new int[] { R.drawable.backside, R.drawable.backside })); mCardView1.setCurrentIndex(0); mCardView1.setBackgroundColor(POKER_GREEN); mCardView1.setAllowLastPageCurl(false); mCardView2 = (CurlView) findViewById(R.id.pCard2); mCardView2.setPageProvider(new PageProvider(this, new int[] { R.drawable.backside, R.drawable.backside })); mCardView2.setCurrentIndex(0); mCardView2.setBackgroundColor(POKER_GREEN); mCardView2.setAllowLastPageCurl(false); bet = (Button) findViewById(R.id.Bet); check = (Button) findViewById(R.id.Check); fold = (Button) findViewById(R.id.Fold); bet.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { performBet(); } }); check.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { performCheck(); } }); fold.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { performFold(); } }); currentSelectedBet = 0; currentProcessedBet = 0; minimumBet = 0; totalBet = 0; currentChipSwiped = 0; nextToReveal = 0; lastReceivedHoleCards = null; // Connect to the server new ConnectAsyncTask(serverIpAddress, serverPort, listener).execute(); showBarrier("Registering to server..."); // adding the hallo wePoker to the watch // clientGameState = ClientGameState.PLAYING; // Intent intent = new Intent(Control.Intents.CONTROL_START_REQUEST_INTENT); // intent.putExtra(Control.Intents.EXTRA_AEA_PACKAGE_NAME, this.getPackageName()); // intent.setPackage("com.sonyericsson.extras.smartwatch"); // sendBroadcast(intent, Registration.HOSTAPP_PERMISSION); // }
diff --git a/lucene/src/java/org/apache/lucene/util/ArrayUtil.java b/lucene/src/java/org/apache/lucene/util/ArrayUtil.java index b79c64ad4..1e66e7d44 100644 --- a/lucene/src/java/org/apache/lucene/util/ArrayUtil.java +++ b/lucene/src/java/org/apache/lucene/util/ArrayUtil.java @@ -1,441 +1,441 @@ package org.apache.lucene.util; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.util.Collection; /** * Methods for manipulating arrays. * * @lucene.internal */ public final class ArrayUtil { /** * @deprecated This constructor was not intended to be public and should not be used. * This class contains solely a static utility methods. * It will be made private in Lucene 4.0 */ // make private in 4.0! @Deprecated public ArrayUtil() {} // no instance /* Begin Apache Harmony code Revision taken on Friday, June 12. https://svn.apache.org/repos/asf/harmony/enhanced/classlib/archive/java6/modules/luni/src/main/java/java/lang/Integer.java */ /** * Parses the string argument as if it was an int value and returns the * result. Throws NumberFormatException if the string does not represent an * int quantity. * * @param chars a string representation of an int quantity. * @return int the value represented by the argument * @throws NumberFormatException if the argument could not be parsed as an int quantity. */ public static int parseInt(char[] chars) throws NumberFormatException { return parseInt(chars, 0, chars.length, 10); } /** * Parses a char array into an int. * @param chars the character array * @param offset The offset into the array * @param len The length * @return the int * @throws NumberFormatException if it can't parse */ public static int parseInt(char[] chars, int offset, int len) throws NumberFormatException { return parseInt(chars, offset, len, 10); } /** * Parses the string argument as if it was an int value and returns the * result. Throws NumberFormatException if the string does not represent an * int quantity. The second argument specifies the radix to use when parsing * the value. * * @param chars a string representation of an int quantity. * @param radix the base to use for conversion. * @return int the value represented by the argument * @throws NumberFormatException if the argument could not be parsed as an int quantity. */ public static int parseInt(char[] chars, int offset, int len, int radix) throws NumberFormatException { if (chars == null || radix < Character.MIN_RADIX || radix > Character.MAX_RADIX) { throw new NumberFormatException(); } int i = 0; if (len == 0) { throw new NumberFormatException("chars length is 0"); } boolean negative = chars[offset + i] == '-'; if (negative && ++i == len) { throw new NumberFormatException("can't convert to an int"); } if (negative == true){ offset++; len--; } return parse(chars, offset, len, radix, negative); } private static int parse(char[] chars, int offset, int len, int radix, boolean negative) throws NumberFormatException { int max = Integer.MIN_VALUE / radix; int result = 0; for (int i = 0; i < len; i++){ int digit = Character.digit(chars[i + offset], radix); if (digit == -1) { throw new NumberFormatException("Unable to parse"); } if (max > result) { throw new NumberFormatException("Unable to parse"); } int next = result * radix - digit; if (next > result) { throw new NumberFormatException("Unable to parse"); } result = next; } /*while (offset < len) { }*/ if (!negative) { result = -result; if (result < 0) { throw new NumberFormatException("Unable to parse"); } } return result; } /* END APACHE HARMONY CODE */ /** Returns an array size >= minTargetSize, generally * over-allocating exponentially to achieve amortized * linear-time cost as the array grows. * * NOTE: this was originally borrowed from Python 2.4.2 * listobject.c sources (attribution in LICENSE.txt), but * has now been substantially changed based on * discussions from java-dev thread with subject "Dynamic * array reallocation algorithms", started on Jan 12 * 2010. * * @param minTargetSize Minimum required value to be returned. * @param bytesPerElement Bytes used by each element of * the array. See constants in {@link RamUsageEstimator}. * * @lucene.internal */ public static int oversize(int minTargetSize, int bytesPerElement) { if (minTargetSize < 0) { // catch usage that accidentally overflows int throw new IllegalArgumentException("invalid array size " + minTargetSize); } if (minTargetSize == 0) { // wait until at least one element is requested return 0; } // asymptotic exponential growth by 1/8th, favors - // spending a bit more CPU to not tye up too much wasted + // spending a bit more CPU to not tie up too much wasted // RAM: int extra = minTargetSize >> 3; if (extra < 3) { // for very small arrays, where constant overhead of // realloc is presumably relatively high, we grow // faster extra = 3; } int newSize = minTargetSize + extra; // add 7 to allow for worst case byte alignment addition below: if (newSize+7 < 0) { // int overflowed -- return max allowed array size return Integer.MAX_VALUE; } if (Constants.JRE_IS_64BIT) { // round up to 8 byte alignment in 64bit env switch(bytesPerElement) { case 4: // round up to multiple of 2 return (newSize + 1) & 0x7ffffffe; case 2: // round up to multiple of 4 return (newSize + 3) & 0x7ffffffc; case 1: // round up to multiple of 8 return (newSize + 7) & 0x7ffffff8; case 8: // no rounding default: // odd (invalid?) size return newSize; } } else { // round up to 4 byte alignment in 64bit env switch(bytesPerElement) { case 2: // round up to multiple of 2 return (newSize + 1) & 0x7ffffffe; case 1: // round up to multiple of 4 return (newSize + 3) & 0x7ffffffc; case 4: case 8: // no rounding default: // odd (invalid?) size return newSize; } } } public static int getShrinkSize(int currentSize, int targetSize, int bytesPerElement) { final int newSize = oversize(targetSize, bytesPerElement); // Only reallocate if we are "substantially" smaller. // This saves us from "running hot" (constantly making a // bit bigger then a bit smaller, over and over): if (newSize < currentSize / 2) return newSize; else return currentSize; } public static short[] grow(short[] array, int minSize) { if (array.length < minSize) { short[] newArray = new short[oversize(minSize, RamUsageEstimator.NUM_BYTES_SHORT)]; System.arraycopy(array, 0, newArray, 0, array.length); return newArray; } else return array; } public static short[] grow(short[] array) { return grow(array, 1 + array.length); } public static short[] shrink(short[] array, int targetSize) { final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_SHORT); if (newSize != array.length) { short[] newArray = new short[newSize]; System.arraycopy(array, 0, newArray, 0, newSize); return newArray; } else return array; } public static int[] grow(int[] array, int minSize) { if (array.length < minSize) { int[] newArray = new int[oversize(minSize, RamUsageEstimator.NUM_BYTES_INT)]; System.arraycopy(array, 0, newArray, 0, array.length); return newArray; } else return array; } public static int[] grow(int[] array) { return grow(array, 1 + array.length); } public static int[] shrink(int[] array, int targetSize) { final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_INT); if (newSize != array.length) { int[] newArray = new int[newSize]; System.arraycopy(array, 0, newArray, 0, newSize); return newArray; } else return array; } public static long[] grow(long[] array, int minSize) { if (array.length < minSize) { long[] newArray = new long[oversize(minSize, RamUsageEstimator.NUM_BYTES_LONG)]; System.arraycopy(array, 0, newArray, 0, array.length); return newArray; } else return array; } public static long[] grow(long[] array) { return grow(array, 1 + array.length); } public static long[] shrink(long[] array, int targetSize) { final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_LONG); if (newSize != array.length) { long[] newArray = new long[newSize]; System.arraycopy(array, 0, newArray, 0, newSize); return newArray; } else return array; } public static byte[] grow(byte[] array, int minSize) { if (array.length < minSize) { byte[] newArray = new byte[oversize(minSize, 1)]; System.arraycopy(array, 0, newArray, 0, array.length); return newArray; } else return array; } public static byte[] grow(byte[] array) { return grow(array, 1 + array.length); } public static byte[] shrink(byte[] array, int targetSize) { final int newSize = getShrinkSize(array.length, targetSize, 1); if (newSize != array.length) { byte[] newArray = new byte[newSize]; System.arraycopy(array, 0, newArray, 0, newSize); return newArray; } else return array; } public static char[] grow(char[] array, int minSize) { if (array.length < minSize) { char[] newArray = new char[oversize(minSize, RamUsageEstimator.NUM_BYTES_CHAR)]; System.arraycopy(array, 0, newArray, 0, array.length); return newArray; } else return array; } public static char[] grow(char[] array) { return grow(array, 1 + array.length); } public static char[] shrink(char[] array, int targetSize) { final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_CHAR); if (newSize != array.length) { char[] newArray = new char[newSize]; System.arraycopy(array, 0, newArray, 0, newSize); return newArray; } else return array; } /** * Returns hash of chars in range start (inclusive) to * end (inclusive) */ public static int hashCode(char[] array, int start, int end) { int code = 0; for (int i = end - 1; i >= start; i--) code = code * 31 + array[i]; return code; } /** * Returns hash of chars in range start (inclusive) to * end (inclusive) */ public static int hashCode(byte[] array, int start, int end) { int code = 0; for (int i = end - 1; i >= start; i--) code = code * 31 + array[i]; return code; } // Since Arrays.equals doesn't implement offsets for equals /** * See if two array slices are the same. * * @param left The left array to compare * @param offsetLeft The offset into the array. Must be positive * @param right The right array to compare * @param offsetRight the offset into the right array. Must be positive * @param length The length of the section of the array to compare * @return true if the two arrays, starting at their respective offsets, are equal * * @see java.util.Arrays#equals(char[], char[]) */ public static boolean equals(char[] left, int offsetLeft, char[] right, int offsetRight, int length) { if ((offsetLeft + length <= left.length) && (offsetRight + length <= right.length)) { for (int i = 0; i < length; i++) { if (left[offsetLeft + i] != right[offsetRight + i]) { return false; } } return true; } return false; } // Since Arrays.equals doesn't implement offsets for equals /** * See if two array slices are the same. * * @param left The left array to compare * @param offsetLeft The offset into the array. Must be positive * @param right The right array to compare * @param offsetRight the offset into the right array. Must be positive * @param length The length of the section of the array to compare * @return true if the two arrays, starting at their respective offsets, are equal * * @see java.util.Arrays#equals(char[], char[]) */ public static boolean equals(int[] left, int offsetLeft, int[] right, int offsetRight, int length) { if ((offsetLeft + length <= left.length) && (offsetRight + length <= right.length)) { for (int i = 0; i < length; i++) { if (left[offsetLeft + i] != right[offsetRight + i]) { return false; } } return true; } return false; } public static int[] toIntArray(Collection<Integer> ints) { final int[] result = new int[ints.size()]; int upto = 0; for(int v : ints) { result[upto++] = v; } // paranoia: assert upto == result.length; return result; } }
true
true
public static int oversize(int minTargetSize, int bytesPerElement) { if (minTargetSize < 0) { // catch usage that accidentally overflows int throw new IllegalArgumentException("invalid array size " + minTargetSize); } if (minTargetSize == 0) { // wait until at least one element is requested return 0; } // asymptotic exponential growth by 1/8th, favors // spending a bit more CPU to not tye up too much wasted // RAM: int extra = minTargetSize >> 3; if (extra < 3) { // for very small arrays, where constant overhead of // realloc is presumably relatively high, we grow // faster extra = 3; } int newSize = minTargetSize + extra; // add 7 to allow for worst case byte alignment addition below: if (newSize+7 < 0) { // int overflowed -- return max allowed array size return Integer.MAX_VALUE; } if (Constants.JRE_IS_64BIT) { // round up to 8 byte alignment in 64bit env switch(bytesPerElement) { case 4: // round up to multiple of 2 return (newSize + 1) & 0x7ffffffe; case 2: // round up to multiple of 4 return (newSize + 3) & 0x7ffffffc; case 1: // round up to multiple of 8 return (newSize + 7) & 0x7ffffff8; case 8: // no rounding default: // odd (invalid?) size return newSize; } } else { // round up to 4 byte alignment in 64bit env switch(bytesPerElement) { case 2: // round up to multiple of 2 return (newSize + 1) & 0x7ffffffe; case 1: // round up to multiple of 4 return (newSize + 3) & 0x7ffffffc; case 4: case 8: // no rounding default: // odd (invalid?) size return newSize; } } }
public static int oversize(int minTargetSize, int bytesPerElement) { if (minTargetSize < 0) { // catch usage that accidentally overflows int throw new IllegalArgumentException("invalid array size " + minTargetSize); } if (minTargetSize == 0) { // wait until at least one element is requested return 0; } // asymptotic exponential growth by 1/8th, favors // spending a bit more CPU to not tie up too much wasted // RAM: int extra = minTargetSize >> 3; if (extra < 3) { // for very small arrays, where constant overhead of // realloc is presumably relatively high, we grow // faster extra = 3; } int newSize = minTargetSize + extra; // add 7 to allow for worst case byte alignment addition below: if (newSize+7 < 0) { // int overflowed -- return max allowed array size return Integer.MAX_VALUE; } if (Constants.JRE_IS_64BIT) { // round up to 8 byte alignment in 64bit env switch(bytesPerElement) { case 4: // round up to multiple of 2 return (newSize + 1) & 0x7ffffffe; case 2: // round up to multiple of 4 return (newSize + 3) & 0x7ffffffc; case 1: // round up to multiple of 8 return (newSize + 7) & 0x7ffffff8; case 8: // no rounding default: // odd (invalid?) size return newSize; } } else { // round up to 4 byte alignment in 64bit env switch(bytesPerElement) { case 2: // round up to multiple of 2 return (newSize + 1) & 0x7ffffffe; case 1: // round up to multiple of 4 return (newSize + 3) & 0x7ffffffc; case 4: case 8: // no rounding default: // odd (invalid?) size return newSize; } } }
diff --git a/Chat/src/Utilities/Parser.java b/Chat/src/Utilities/Parser.java index 14f8b2c..43bba9f 100644 --- a/Chat/src/Utilities/Parser.java +++ b/Chat/src/Utilities/Parser.java @@ -1,103 +1,104 @@ package Utilities; import Messages.*; public class Parser { int op; long length; long reserved; String options; boolean needsMore=false; public int parse(byte[] message){//returns the number of body bytes it needs System.out.println("Parsing"); if(needsMore){ return -2; } needsMore=true; op=(int)fromByteArray(new byte[]{message[0]}); length=fromByteArray(new byte[]{message[1],message[2]}); reserved=fromByteArray(new byte[]{message[3]}); byte[] optArray=new byte[message.length-4]; options=new String(optArray,0,optArray.length); switch(op){ case 1: //udp broadcast return (int) UDPBroadcastMessage.minSize; case 2:// server handshake start return (int) ServerHandShakeMessage.minSize; case 3:// client handshake start return (int) ClientHandShakeMessage.minSize; case 4://server accept return 0; case 5://client accept return 0; case 6://client request update return (int) ClientRequestUpdateMessage.minSize; case 7://server confirmation of update return (int) ServerConfirmationUpdateMessage.minSize; case 8://client request info from server return (int) ClientRequestInfoMessage.minSize; case 9://server sends requested info return (int) ServerSendsInfoMessage.minSize; case 10://Decline to connect Message return 0; case 11: //chat message if(length-132<0){ return -1; } else{ return (int) (length-132); } case 12: //lookup failed return (int) LookupFailedMessage.minSize; case 13://Error return 0; case 14://name collision return (int) NameCollisionMessage.minSize; default: + System.out.println("Error op code was "+Integer.toString(op)); return -1; } } public Message addBody(byte[] body){ System.out.println("Adding body"); needsMore=false; switch(op){ case 1: //udp broadcast return new UDPBroadcastMessage(op,length,reserved,options,body); case 2:// server handshake start return new ServerHandShakeMessage(op,length,reserved,options,body); case 3:// client handshake start return new ClientHandShakeMessage(op,length,reserved,options,body); case 4://server accept return new ServerAcceptMessage(op,length,reserved,options,body); case 5://client accept return new ClientAcceptMessage(op,length,reserved,options,body); case 6://client request update return new ClientRequestUpdateMessage(op,length,reserved,options,body); case 7://server confirmation of update return new ServerConfirmationUpdateMessage(op,length,reserved,options,body); case 8://client request info from server return new ClientRequestInfoMessage(op,length,reserved,options,body); case 9://server sends requested info return new ServerSendsInfoMessage(op,length,reserved,options,body); case 10://Decline to connect Message return new DeclineConnectMessage(op,length,reserved,options,body); case 11: //chat message return new ChatMsgMessage(op,length,reserved,options,body); case 12: //lookup failed return new LookupFailedMessage(op,length,reserved,options,body); case 13://Error return new ErrorMessage(op,length,reserved,options,body); case 14://name collision return new NameCollisionMessage(op,length,reserved,options,body); default: return null; } } protected long fromByteArray(byte[] bytes) { long total=0; for(int i=0;i<bytes.length;i++){ total+=(long)i*Math.pow(2, 8*i); } return total; } }
true
true
public int parse(byte[] message){//returns the number of body bytes it needs System.out.println("Parsing"); if(needsMore){ return -2; } needsMore=true; op=(int)fromByteArray(new byte[]{message[0]}); length=fromByteArray(new byte[]{message[1],message[2]}); reserved=fromByteArray(new byte[]{message[3]}); byte[] optArray=new byte[message.length-4]; options=new String(optArray,0,optArray.length); switch(op){ case 1: //udp broadcast return (int) UDPBroadcastMessage.minSize; case 2:// server handshake start return (int) ServerHandShakeMessage.minSize; case 3:// client handshake start return (int) ClientHandShakeMessage.minSize; case 4://server accept return 0; case 5://client accept return 0; case 6://client request update return (int) ClientRequestUpdateMessage.minSize; case 7://server confirmation of update return (int) ServerConfirmationUpdateMessage.minSize; case 8://client request info from server return (int) ClientRequestInfoMessage.minSize; case 9://server sends requested info return (int) ServerSendsInfoMessage.minSize; case 10://Decline to connect Message return 0; case 11: //chat message if(length-132<0){ return -1; } else{ return (int) (length-132); } case 12: //lookup failed return (int) LookupFailedMessage.minSize; case 13://Error return 0; case 14://name collision return (int) NameCollisionMessage.minSize; default: return -1; } }
public int parse(byte[] message){//returns the number of body bytes it needs System.out.println("Parsing"); if(needsMore){ return -2; } needsMore=true; op=(int)fromByteArray(new byte[]{message[0]}); length=fromByteArray(new byte[]{message[1],message[2]}); reserved=fromByteArray(new byte[]{message[3]}); byte[] optArray=new byte[message.length-4]; options=new String(optArray,0,optArray.length); switch(op){ case 1: //udp broadcast return (int) UDPBroadcastMessage.minSize; case 2:// server handshake start return (int) ServerHandShakeMessage.minSize; case 3:// client handshake start return (int) ClientHandShakeMessage.minSize; case 4://server accept return 0; case 5://client accept return 0; case 6://client request update return (int) ClientRequestUpdateMessage.minSize; case 7://server confirmation of update return (int) ServerConfirmationUpdateMessage.minSize; case 8://client request info from server return (int) ClientRequestInfoMessage.minSize; case 9://server sends requested info return (int) ServerSendsInfoMessage.minSize; case 10://Decline to connect Message return 0; case 11: //chat message if(length-132<0){ return -1; } else{ return (int) (length-132); } case 12: //lookup failed return (int) LookupFailedMessage.minSize; case 13://Error return 0; case 14://name collision return (int) NameCollisionMessage.minSize; default: System.out.println("Error op code was "+Integer.toString(op)); return -1; } }
diff --git a/target_explorer/plugins/org.eclipse.tcf.te.tests/src/org/eclipse/tcf/te/tests/tcf/launch/TcfLaunchTests.java b/target_explorer/plugins/org.eclipse.tcf.te.tests/src/org/eclipse/tcf/te/tests/tcf/launch/TcfLaunchTests.java index 8486faa87..e5ee2dbef 100644 --- a/target_explorer/plugins/org.eclipse.tcf.te.tests/src/org/eclipse/tcf/te/tests/tcf/launch/TcfLaunchTests.java +++ b/target_explorer/plugins/org.eclipse.tcf.te.tests/src/org/eclipse/tcf/te/tests/tcf/launch/TcfLaunchTests.java @@ -1,118 +1,118 @@ /******************************************************************************* * Copyright (c) 2012 Wind River Systems, Inc. and others. All rights reserved. * This program and the accompanying materials are made available under the terms * of the Eclipse Public License v1.0 which accompanies this distribution, and is * available at http://www.eclipse.org/legal/epl-v10.html * * Contributors: * Wind River Systems - initial API and implementation *******************************************************************************/ package org.eclipse.tcf.te.tests.tcf.launch; import junit.framework.Test; import junit.framework.TestSuite; import org.eclipse.core.runtime.IPath; import org.eclipse.core.runtime.NullProgressMonitor; import org.eclipse.core.runtime.Path; import org.eclipse.core.runtime.Platform; import org.eclipse.debug.core.ILaunchConfiguration; import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy; import org.eclipse.debug.core.ILaunchManager; import org.eclipse.tcf.te.launch.core.lm.LaunchManager; import org.eclipse.tcf.te.launch.core.lm.LaunchSpecification; import org.eclipse.tcf.te.launch.core.lm.interfaces.ILaunchSpecification; import org.eclipse.tcf.te.launch.core.persistence.filetransfer.FileTransfersPersistenceDelegate; import org.eclipse.tcf.te.launch.core.persistence.launchcontext.LaunchContextsPersistenceDelegate; import org.eclipse.tcf.te.runtime.model.interfaces.IModelNode; import org.eclipse.tcf.te.runtime.services.filetransfer.FileTransferItem; import org.eclipse.tcf.te.runtime.services.interfaces.filetransfer.IFileTransferItem; import org.eclipse.tcf.te.runtime.utils.Host; import org.eclipse.tcf.te.tcf.launch.core.interfaces.ILaunchTypes; import org.eclipse.tcf.te.tcf.launch.core.interfaces.IRemoteAppLaunchAttributes; import org.eclipse.tcf.te.tests.tcf.TcfTestCase; /** * TCF Launch tests. */ public class TcfLaunchTests extends TcfTestCase { /** * Provides a test suite to the caller which combines all single * test bundled within this category. * * @return Test suite containing all test for this test category. */ public static Test getTestSuite() { TestSuite testSuite = new TestSuite("TCF Launch tests"); //$NON-NLS-1$ // add ourself to the test suite testSuite.addTestSuite(TcfLaunchTests.class); return testSuite; } public void testRemoteAppLaunch() { final ILaunchSpecification spec = new LaunchSpecification(ILaunchTypes.REMOTE_APPLICATION, ILaunchManager.RUN_MODE); LaunchContextsPersistenceDelegate.setLaunchContexts(spec, new IModelNode[]{peerModel}); IPath helloWorldLocation = getHelloWorldLocation(); assertTrue("Missing hello world example for current OS and Arch:" + Platform.getOS() + "/" + Platform.getOSArch(), //$NON-NLS-1$ //$NON-NLS-2$ helloWorldLocation != null && helloWorldLocation.toFile().exists() && - helloWorldLocation.toFile().canExecute()); + helloWorldLocation.toFile().canRead()); String temp = System.getProperty("java.io.tmpdir"); //$NON-NLS-1$ IPath tempDir = temp != null ? new Path(temp) : null; assertNotNull("Missing java temp directory", tempDir); //$NON-NLS-1$ IPath tempHelloWorld = tempDir.append(helloWorldLocation.lastSegment()); if (tempHelloWorld.toFile().exists()) { tempHelloWorld.toFile().delete(); } assertFalse("Cannot delete process image " + tempHelloWorld.toOSString(), tempHelloWorld.toFile().exists()); //$NON-NLS-1$ IPath outFile = tempDir.append("/HelloWorld.out"); //$NON-NLS-1$ if (outFile.toFile().exists()) { outFile.toFile().delete(); } assertFalse("Cannot delete console output file " + outFile.toOSString(), outFile.toFile().exists()); //$NON-NLS-1$ FileTransfersPersistenceDelegate.setFileTransfers(spec, new IFileTransferItem[]{new FileTransferItem(helloWorldLocation, tempDir)}); spec.addAttribute(IRemoteAppLaunchAttributes.ATTR_PROCESS_IMAGE, tempHelloWorld.toOSString()); ILaunchConfiguration config = null; try { config = LaunchManager.getInstance().getLaunchConfiguration(spec, true); ILaunchConfigurationWorkingCopy wc = config.getWorkingCopy(); wc.setAttribute("org.eclipse.debug.ui.ATTR_CONSOLE_OUTPUT_ON", false); //$NON-NLS-1$ wc.setAttribute("org.eclipse.debug.ui.ATTR_CAPTURE_IN_FILE", outFile.toOSString()); //$NON-NLS-1$ config = wc.doSave(); } catch (Exception e) { assertNull("Unexpected exception when creating launch: " + e, e); //$NON-NLS-1$ } try { LaunchManager.getInstance().launch(config, ILaunchManager.RUN_MODE, false, new NullProgressMonitor()); } catch (Exception e) { assertNull("Unexpected exception when launching hello world: " + e, e); //$NON-NLS-1$ } assertTrue("Missing console output file", outFile.toFile().exists() && outFile.toFile().length() > 0); //$NON-NLS-1$ } private IPath getHelloWorldLocation() { IPath path = getDataLocation("helloWorld", true, true); //$NON-NLS-1$ if (path != null) { path = path.append("HelloWorld"); //$NON-NLS-1$ if (Host.isWindowsHost()) { path = path.addFileExtension("exe"); //$NON-NLS-1$ } } return path; } }
true
true
public void testRemoteAppLaunch() { final ILaunchSpecification spec = new LaunchSpecification(ILaunchTypes.REMOTE_APPLICATION, ILaunchManager.RUN_MODE); LaunchContextsPersistenceDelegate.setLaunchContexts(spec, new IModelNode[]{peerModel}); IPath helloWorldLocation = getHelloWorldLocation(); assertTrue("Missing hello world example for current OS and Arch:" + Platform.getOS() + "/" + Platform.getOSArch(), //$NON-NLS-1$ //$NON-NLS-2$ helloWorldLocation != null && helloWorldLocation.toFile().exists() && helloWorldLocation.toFile().canExecute()); String temp = System.getProperty("java.io.tmpdir"); //$NON-NLS-1$ IPath tempDir = temp != null ? new Path(temp) : null; assertNotNull("Missing java temp directory", tempDir); //$NON-NLS-1$ IPath tempHelloWorld = tempDir.append(helloWorldLocation.lastSegment()); if (tempHelloWorld.toFile().exists()) { tempHelloWorld.toFile().delete(); } assertFalse("Cannot delete process image " + tempHelloWorld.toOSString(), tempHelloWorld.toFile().exists()); //$NON-NLS-1$ IPath outFile = tempDir.append("/HelloWorld.out"); //$NON-NLS-1$ if (outFile.toFile().exists()) { outFile.toFile().delete(); } assertFalse("Cannot delete console output file " + outFile.toOSString(), outFile.toFile().exists()); //$NON-NLS-1$ FileTransfersPersistenceDelegate.setFileTransfers(spec, new IFileTransferItem[]{new FileTransferItem(helloWorldLocation, tempDir)}); spec.addAttribute(IRemoteAppLaunchAttributes.ATTR_PROCESS_IMAGE, tempHelloWorld.toOSString()); ILaunchConfiguration config = null; try { config = LaunchManager.getInstance().getLaunchConfiguration(spec, true); ILaunchConfigurationWorkingCopy wc = config.getWorkingCopy(); wc.setAttribute("org.eclipse.debug.ui.ATTR_CONSOLE_OUTPUT_ON", false); //$NON-NLS-1$ wc.setAttribute("org.eclipse.debug.ui.ATTR_CAPTURE_IN_FILE", outFile.toOSString()); //$NON-NLS-1$ config = wc.doSave(); } catch (Exception e) { assertNull("Unexpected exception when creating launch: " + e, e); //$NON-NLS-1$ } try { LaunchManager.getInstance().launch(config, ILaunchManager.RUN_MODE, false, new NullProgressMonitor()); } catch (Exception e) { assertNull("Unexpected exception when launching hello world: " + e, e); //$NON-NLS-1$ } assertTrue("Missing console output file", outFile.toFile().exists() && outFile.toFile().length() > 0); //$NON-NLS-1$ }
public void testRemoteAppLaunch() { final ILaunchSpecification spec = new LaunchSpecification(ILaunchTypes.REMOTE_APPLICATION, ILaunchManager.RUN_MODE); LaunchContextsPersistenceDelegate.setLaunchContexts(spec, new IModelNode[]{peerModel}); IPath helloWorldLocation = getHelloWorldLocation(); assertTrue("Missing hello world example for current OS and Arch:" + Platform.getOS() + "/" + Platform.getOSArch(), //$NON-NLS-1$ //$NON-NLS-2$ helloWorldLocation != null && helloWorldLocation.toFile().exists() && helloWorldLocation.toFile().canRead()); String temp = System.getProperty("java.io.tmpdir"); //$NON-NLS-1$ IPath tempDir = temp != null ? new Path(temp) : null; assertNotNull("Missing java temp directory", tempDir); //$NON-NLS-1$ IPath tempHelloWorld = tempDir.append(helloWorldLocation.lastSegment()); if (tempHelloWorld.toFile().exists()) { tempHelloWorld.toFile().delete(); } assertFalse("Cannot delete process image " + tempHelloWorld.toOSString(), tempHelloWorld.toFile().exists()); //$NON-NLS-1$ IPath outFile = tempDir.append("/HelloWorld.out"); //$NON-NLS-1$ if (outFile.toFile().exists()) { outFile.toFile().delete(); } assertFalse("Cannot delete console output file " + outFile.toOSString(), outFile.toFile().exists()); //$NON-NLS-1$ FileTransfersPersistenceDelegate.setFileTransfers(spec, new IFileTransferItem[]{new FileTransferItem(helloWorldLocation, tempDir)}); spec.addAttribute(IRemoteAppLaunchAttributes.ATTR_PROCESS_IMAGE, tempHelloWorld.toOSString()); ILaunchConfiguration config = null; try { config = LaunchManager.getInstance().getLaunchConfiguration(spec, true); ILaunchConfigurationWorkingCopy wc = config.getWorkingCopy(); wc.setAttribute("org.eclipse.debug.ui.ATTR_CONSOLE_OUTPUT_ON", false); //$NON-NLS-1$ wc.setAttribute("org.eclipse.debug.ui.ATTR_CAPTURE_IN_FILE", outFile.toOSString()); //$NON-NLS-1$ config = wc.doSave(); } catch (Exception e) { assertNull("Unexpected exception when creating launch: " + e, e); //$NON-NLS-1$ } try { LaunchManager.getInstance().launch(config, ILaunchManager.RUN_MODE, false, new NullProgressMonitor()); } catch (Exception e) { assertNull("Unexpected exception when launching hello world: " + e, e); //$NON-NLS-1$ } assertTrue("Missing console output file", outFile.toFile().exists() && outFile.toFile().length() > 0); //$NON-NLS-1$ }
diff --git a/java/engine/org/apache/derby/impl/store/raw/data/FileContainer.java b/java/engine/org/apache/derby/impl/store/raw/data/FileContainer.java index 26864b737..289d335dc 100644 --- a/java/engine/org/apache/derby/impl/store/raw/data/FileContainer.java +++ b/java/engine/org/apache/derby/impl/store/raw/data/FileContainer.java @@ -1,3430 +1,3430 @@ /* Derby - Class org.apache.derby.impl.store.raw.data.FileContainer Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.derby.impl.store.raw.data; import org.apache.derby.iapi.reference.Property; import org.apache.derby.iapi.reference.Limits; import org.apache.derby.iapi.reference.SQLState; import org.apache.derby.impl.store.raw.data.BaseContainer; import org.apache.derby.impl.store.raw.data.BaseContainerHandle; import org.apache.derby.impl.store.raw.data.BasePage; import org.apache.derby.impl.store.raw.data.PageVersion; import org.apache.derby.iapi.services.cache.Cacheable; import org.apache.derby.iapi.services.cache.CacheManager; import org.apache.derby.iapi.services.context.ContextService; import org.apache.derby.iapi.services.daemon.DaemonService; import org.apache.derby.iapi.services.daemon.Serviceable; import org.apache.derby.iapi.services.monitor.Monitor; import org.apache.derby.iapi.services.sanity.SanityManager; import org.apache.derby.iapi.services.io.FormatIdUtil; import org.apache.derby.iapi.services.io.FormatIdOutputStream; import org.apache.derby.iapi.services.io.StoredFormatIds; import org.apache.derby.iapi.services.io.TypedFormat; import org.apache.derby.iapi.error.StandardException; import org.apache.derby.iapi.store.raw.ContainerHandle; import org.apache.derby.iapi.store.raw.ContainerKey; import org.apache.derby.iapi.store.raw.LockingPolicy; import org.apache.derby.iapi.store.raw.Loggable; import org.apache.derby.iapi.store.raw.Page; import org.apache.derby.iapi.store.raw.PageKey; import org.apache.derby.iapi.store.raw.PageTimeStamp; import org.apache.derby.iapi.store.raw.RecordHandle; import org.apache.derby.iapi.store.raw.RawStoreFactory; import org.apache.derby.iapi.store.raw.Transaction; import org.apache.derby.iapi.store.raw.log.LogInstant; import org.apache.derby.iapi.store.raw.xact.RawTransaction; import org.apache.derby.iapi.store.access.TransactionController; import org.apache.derby.iapi.store.access.AccessFactory; import org.apache.derby.iapi.store.access.SpaceInfo; import org.apache.derby.iapi.services.io.ArrayInputStream; import org.apache.derby.iapi.services.io.ArrayOutputStream; import org.apache.derby.iapi.services.property.PropertyUtil; import org.apache.derby.iapi.util.ByteArray; import java.io.IOException; import java.io.DataInput; import java.io.DataOutput; import java.util.Properties; import java.util.zip.CRC32; /** FileContainer is an abstract base class for containers which are based on files. This class extends BaseContainer and implements Cacheable and TypedFormat */ abstract class FileContainer extends BaseContainer implements Cacheable, TypedFormat { /* * typed format */ protected static final int formatIdInteger = StoredFormatIds.RAW_STORE_SINGLE_CONTAINER_FILE; // format Id must fit in 4 bytes /** Return my format identifier. */ public int getTypeFormatId() { return StoredFormatIds.RAW_STORE_SINGLE_CONTAINER_FILE; } /* ** Immutable fields */ protected final CacheManager pageCache; // my page's cache protected final CacheManager containerCache; // cache I am in. protected final BaseDataFileFactory dataFactory; // creating factory /* ** Fields that are mutable only during identity changes */ protected int pageSize; // size of my pages protected int spareSpace; // % space kept free on page in inserts protected int minimumRecordSize; // minimum space a record should // occupy on the page. protected short initialPages; // initial number of pages preallocated // to the container when created protected boolean canUpdate; // can I be written to? private int PreAllocThreshold; // how many pages before preallocation // kicks in, only stored in memory private int PreAllocSize; // how many pages to preallocate at once // only stored in memory private boolean bulkIncreaseContainerSize;// if true, the next addPage will // attempt to preallocate a larger // than normal number of pages. // // preallocation parameters private static final int PRE_ALLOC_THRESHOLD = 8; private static final int MIN_PRE_ALLOC_SIZE = 1; private static final int DEFAULT_PRE_ALLOC_SIZE = 8; private static final int MAX_PRE_ALLOC_SIZE = 1000; /* ** Mutable fields, only valid when the identity is valid. */ // RESOLVE: if we run out of bytes in the container, we can change // containerVersion from a long to an int because this number is only // bumped when the container is dropped (and rolled back), so it is almost // impossible for the containverVersion to get beyond a short, let alone // and int - someone will have to write an application that attempt to drop // the container 2 billion times for that to happen. protected long firstAllocPageNumber; // first alloc page number protected long firstAllocPageOffset; // first alloc page offset protected long containerVersion; // the logged version number protected long estimatedRowCount; // value is changed unlogged protected LogInstant lastLogInstant; // last time this container // object was touched. /** * The sequence number for reusable recordIds . * As long as this number does not change, recordIds will be stable within * the container. **/ private long reusableRecordIdSequenceNumber; /** The page that was last inserted into. Use this for getPageForInsert. Remember the last allocated non-overflow page, and remember it in memory only. Use Get/Set method to access this field except when we know it is being single thread access. */ private long lastInsertedPage[]; private int lastInsertedPage_index; /** The last unfilled page found. Use this for getPageForInsert. Remember the last unfilled page found, and remember it in memory only. Use Get/Set method to access this field except when we know it is being single thread access. */ private long lastUnfilledPage; /** The last allocated page. This global var is access *without* synchronization. It is used as a hint for page allocation to find the next reusable page. */ private long lastAllocatedPage; /** An estimated page count. Use this for getEstimatedPagecount. Remember it in memory only. */ private long estimatedPageCount; // The isDirty flag indicates if the container has been modified. The // preDirty flag indicates that the container is about to be modified. The // reason for these 2 flags instead of just one is to accomodate // checkpoint. After a clean container sends a log record to the log // stream but before that conatiner is dirtied by the log operation, a // checkpoint could be taken. If so, then the redoLWM will be after the // log record but, without preDirty, the cache cleaning will not have // waited for the change. So the preDirty bit is to stop the cache // cleaning from skipping over this container even though it has not really // been modified yet. protected boolean preDirty; protected boolean isDirty; /* allocation information cached by the container object. <P>MT - Access to the allocation cache MUST be synchronized on the allocCache object. FileContainer manages all MT issue w/r to AllocationCache. The AllocationCache object itself is not MT-safe. <P> The protocol for accessing both the allocation cache and the alloc page is: get the alloc cache semaphore, then get the alloc page. Once both are held, they can be released in any order. <BR> It is legal to get one or the other, i.e, it is legal to only get the alloc cache semaphore without latching the alloc page, and it is legal to get the alloc page latch without the alloc cache semaphore. <BR> it is illegal to hold alloc page latch and then get the allocation cache semaphore <PRE> Writer to alloc Page (to invalidate alloc cache) 1) synchronized(allocCache) 2) invalidate cache 3) get latch on alloc Page 4) release synchonized(allocCache) Reader: 1) synchronized(allocCache) 2) if valid, read value and release synchronized(allocCache) 3) if cache is invalid, get latch on alloc page 4) validate cache 5) release alloc page latch 6) read value 7) release synchonized(allocCache) </PRE> */ protected AllocationCache allocCache; /* * array to store persistently stored fields */ byte[] containerInfo; private CRC32 checksum; // holder for the checksum /* ** buffer for encryption/decryption */ private byte[] encryptionBuffer; /* * constants */ /** the container format must fit in this many bytes */ private static final int CONTAINER_FORMAT_ID_SIZE = 4; /* the checksum size */ protected static final int CHECKSUM_SIZE = 8; /** The size of the persistently stored container info ContainerHeader contains the following information: 4 bytes int FormatId 4 bytes int status 4 bytes int pageSize 4 bytes int spareSpace 4 bytes int minimumRecordSize 2 bytes short initialPages 2 bytes short spare1 8 bytes long first Allocation page number 8 bytes long first Allocation page offset 8 bytes long container version 8 bytes long estimated number of rows 8 bytes long reusable recordId sequence number 8 bytes long spare3 8 bytes long checksum container info size is 80 bytes, with 10 bytes of spare space */ protected static final int CONTAINER_INFO_SIZE = CONTAINER_FORMAT_ID_SIZE+4+4+4+4+2+2+8+8+8+8+CHECKSUM_SIZE+8+8; /** the number of arguments we need to pass to alloc page for create */ protected static final int STORED_PAGE_ARG_NUM = 5; protected static final int ALLOC_PAGE_ARG_NUM = 6; /** * where the first alloc page is located - * the logical page number and the physical page offset * NOTE if it is not 0 this is not going to work for Stream * file which doesn't support seek */ public static final long FIRST_ALLOC_PAGE_NUMBER = 0L; public static final long FIRST_ALLOC_PAGE_OFFSET = 0L; // file status for persistent storage private static final int FILE_DROPPED = 0x1; private static final int FILE_COMMITTED_DROP = 0x2; // recordId in this container can be reused when a page is reused. private static final int FILE_REUSABLE_RECORDID = 0x8; protected static final String SPACE_TRACE = (SanityManager.DEBUG ? "SpaceTrace" : null); FileContainer(BaseDataFileFactory factory) { dataFactory = factory; pageCache = factory.getPageCache(); containerCache = factory.getContainerCache(); initContainerHeader(true); } /** Get information about space used by the container. **/ public SpaceInfo getSpaceInfo(BaseContainerHandle handle) throws StandardException { SpaceInformation spaceInfo; synchronized(allocCache) { spaceInfo = allocCache.getAllPageCounts(handle,firstAllocPageNumber); } spaceInfo.setPageSize(pageSize); return spaceInfo; } /* ** Methods of Cacheable ** ** getIdentity() and clearIdentity() are implemented by BaseContainer */ /** Containers */ /** Open the container. @return a valid object if the container was successfully opened, null if it does not exist. @exception StandardException Some problem in opening a container. @see Cacheable#setIdentity */ public Cacheable setIdentity(Object key) throws StandardException { return setIdent((ContainerKey) key); } /** * Open the container. * <p> * Open the container with key "newIdentity". * <p> * should be same name as setIdentity but seems to cause method resolution * ambiguities * * @exception StandardException Some problem in opening a container. * * @see Cacheable#setIdentity **/ protected Cacheable setIdent(ContainerKey newIdentity) throws StandardException { boolean ok = openContainer(newIdentity); initializeLastInsertedPage(1); lastUnfilledPage = ContainerHandle.INVALID_PAGE_NUMBER; lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER; estimatedPageCount = -1; if (ok) { // set up our identity. // If we raise an exception after this we must clear our identity. fillInIdentity(newIdentity); return this; } else { return null; } } public Cacheable createIdentity(Object key, Object createParameter) throws StandardException { if (SanityManager.DEBUG) { SanityManager.ASSERT( !(key instanceof PageKey), "PageKey input to create container"); } return createIdent((ContainerKey) key, createParameter); } // should be same name as createIdentity but seems to cause method // resolution ambiguities protected Cacheable createIdent( ContainerKey newIdentity, Object createParameter) throws StandardException { // createParameter will be this object if this method is being called // from itself to re-initialize the container (only for tempRAF) // if createParameter == this, do not reinitialize the header, this // object is not being reused for another container if (createParameter != this) { initContainerHeader(true /* change to different container */); if (createParameter != null && (createParameter instanceof ByteArray)) { // this is called during load tran, the create container // Operation has a byte array created by logCreateContainerInfo // which contains all the information necessary to recreate the // container. Use that to recreate the container properties. createInfoFromLog((ByteArray)createParameter); } else { if (SanityManager.DEBUG) { if (createParameter != null && !(createParameter instanceof Properties)) { SanityManager.THROWASSERT( "Expecting a non-null createParameter to a " + "Properties instead of " + createParameter.getClass().getName()); } } createInfoFromProp((Properties)createParameter); } } else { // we don't need to completely re-initialize the header // just re-initialize the relavent fields initContainerHeader(false); } if (initialPages > 1) { PreAllocThreshold = 0; PreAllocSize = initialPages; bulkIncreaseContainerSize = true; } else { PreAllocThreshold = PRE_ALLOC_THRESHOLD; } createContainer(newIdentity); setDirty(true); // set up our identity. // If we raise an exception after this we must clear our identity. fillInIdentity(newIdentity); return this; } public void clearIdentity() { closeContainer(); initializeLastInsertedPage(1); lastUnfilledPage = ContainerHandle.INVALID_PAGE_NUMBER; lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER; canUpdate = false; super.clearIdentity(); } /** We treat this container as dirty if it has the container file open. @see Cacheable#isDirty */ public boolean isDirty() { synchronized (this) { return isDirty; } } public void preDirty(boolean preDirtyOn) { synchronized (this) { if (preDirtyOn) { // prevent the cleaner from cleaning this container or skipping // over it until the operation which preDirtied it got a chance // to do the change. preDirty = true; } else { preDirty = false; // if a cleaner is waiting on the dirty bit, wake it up notifyAll(); } } } protected void setDirty(boolean dirty) { synchronized(this) { preDirty = false; isDirty = dirty; // if a cleaner is waiting on the dirty bit, wake it up notifyAll(); } } /* ** Container creation, opening, and closing */ /** * Create a new container. * <p> * Create a new container, all references to identity must be through the * passed in identity, this object will no identity until after this * method returns. * * @exception StandardException Cloudscape Standard error policy **/ abstract void createContainer(ContainerKey newIdentity) throws StandardException; /** * Open a container. * <p> * Longer descrption of routine. * <p> * Open a container. Open the file that maps to this container, if the * file does not exist then we assume the container was never created. * If the file exists but we have trouble opening it then we throw some * exception. * * <BR> MT - single thread required - Enforced by cache manager. * * @exception StandardException Standard exception policy. **/ abstract boolean openContainer(ContainerKey newIdentity) throws StandardException; abstract void closeContainer(); /** * Drop Container. * <p> * * @see Transaction#dropContainer * **/ protected void dropContainer( LogInstant instant, boolean isDropped) { synchronized(this) { setDroppedState(isDropped); setDirty(true); bumpContainerVersion(instant); } } /** increment the version by one and return the new version. <BR> MT - caller must synchronized this in the same sync block that modifies the container header. */ protected final void bumpContainerVersion(LogInstant instant) { lastLogInstant = instant; ++containerVersion; } protected long getContainerVersion() { // it is not really necessary to synchronized this because the only time the // container version is looked at is during recovery, which is single // threaded at the moment. Put it in an sync block anyway just in case // some other people want to look at this for some bizarre reasons synchronized(this) { return containerVersion; } } /** * Request the system properties associated with a container. * <p> * Request the value of properties that are associated with a container. * The following properties can be requested: * derby.storage.pageSize * derby.storage.pageReservedSpace * derby.storage.minimumRecordSize * derby.storage.reusableRecordId * cloudsacpe.storage.initialPages * <p> * To get the value of a particular property add it to the property list, * and on return the value of the property will be set to it's current * value. For example: * * get_prop(ConglomerateController cc) * { * Properties prop = new Properties(); * prop.put("derby.storage.pageSize", ""); * cc.getContainerProperties(prop); * * System.out.println( * "table's page size = " + * prop.getProperty("derby.storage.pageSize"); * } * * @param prop Property list to fill in. * * @exception StandardException Standard exception policy. **/ public void getContainerProperties(Properties prop) throws StandardException { // derby.storage.pageSize if (prop.getProperty(Property.PAGE_SIZE_PARAMETER) != null) { prop.put( Property.PAGE_SIZE_PARAMETER, Integer.toString(pageSize)); } // derby.storage.minimumRecordSize if (prop.getProperty(RawStoreFactory.MINIMUM_RECORD_SIZE_PARAMETER) != null) { prop.put( RawStoreFactory.MINIMUM_RECORD_SIZE_PARAMETER, Integer.toString(minimumRecordSize)); } // derby.storage.pageReservedSpace if (prop.getProperty(RawStoreFactory.PAGE_RESERVED_SPACE_PARAMETER) != null) { prop.put( RawStoreFactory.PAGE_RESERVED_SPACE_PARAMETER, Integer.toString(spareSpace)); } // derby.storage.reusableRecordId if (prop.getProperty(RawStoreFactory.PAGE_REUSABLE_RECORD_ID) != null) { Boolean bool = new Boolean(isReusableRecordId()); prop.put(RawStoreFactory.PAGE_REUSABLE_RECORD_ID, bool.toString()); } // derby.storage.initialPages if (prop.getProperty(RawStoreFactory.CONTAINER_INITIAL_PAGES) != null) { prop.put(RawStoreFactory.CONTAINER_INITIAL_PAGES, Integer.toString(initialPages)); } } /** Read the container's header. Assumes the input stream (fileData) is positioned at the beginning of the file. Subclass that implements openContainer is expected to manufacture a DataInput stream which is used here to read the header. <BR> MT - single thread required - Enforced by caller. @exception StandardException Cloudscape Standard error policy @exception IOException error in reading the header from file */ protected void readHeader(DataInput fileData) throws IOException, StandardException { // Always read the header from the input stread even if the alloc page may // still be in cache. This is because a stubbify operation only writes // the stub to disk, it did not get rid of any stale page from the page // cache. So if it so happen that the stubbified container object is // aged out of the container cache but the first alloc page hasn't, // then when any stale page of this container wants to be written out, // the container needs to be reopened, which is when this routine is // called. We must not get the alloc page in cache because it may be // stale page and it may still say the container has not been dropped. byte[] epage = getEmbryonicPage(fileData); // read persistent container header into containerInfo AllocPage.ReadContainerInfo(containerInfo, epage); // initialize header from information stored in containerInfo readHeaderFromArray(containerInfo); epage = null; } // initialize header information so this container object can be safely // reused as if this container object has just been new'ed private void initContainerHeader(boolean changeContainer) { if (containerInfo == null) containerInfo = new byte[CONTAINER_INFO_SIZE]; if (checksum == null) checksum = new CRC32(); else checksum.reset(); if (allocCache == null) allocCache = new AllocationCache(); else allocCache.reset(); if (changeContainer) { pageSize = 0; spareSpace = 0; minimumRecordSize = 0; } initialPages = 1; firstAllocPageNumber = ContainerHandle.INVALID_PAGE_NUMBER; firstAllocPageOffset = -1; containerVersion = 0; estimatedRowCount = 0; reusableRecordIdSequenceNumber = 0; setDroppedState(false); setCommittedDropState(false); setReusableRecordIdState(false); // instance variables that are not stored on disk lastLogInstant = null; initializeLastInsertedPage(1); lastUnfilledPage = ContainerHandle.INVALID_PAGE_NUMBER; lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER; estimatedPageCount = -1; PreAllocThreshold = PRE_ALLOC_THRESHOLD; PreAllocSize = DEFAULT_PRE_ALLOC_SIZE; bulkIncreaseContainerSize = false; } /** Read containerInfo from a byte array The container Header array must be written by or of the same format as put together by writeHeaderFromArray. @exception StandardException Cloudscape Standard error policy @exception IOException error in reading the header from file */ private void readHeaderFromArray(byte[] a) throws StandardException, IOException { ArrayInputStream inStream = new ArrayInputStream(a); inStream.setLimit(0, CONTAINER_INFO_SIZE); int fid = inStream.readInt(); if (fid != formatIdInteger) { throw StandardException.newException( SQLState.DATA_UNKNOWN_CONTAINER_FORMAT, getIdentity(), new Long(fid)); } int status = inStream.readInt(); pageSize = inStream.readInt(); spareSpace = inStream.readInt(); minimumRecordSize = inStream.readInt(); initialPages = inStream.readShort(); PreAllocSize = inStream.readShort(); firstAllocPageNumber = inStream.readLong(); firstAllocPageOffset = inStream.readLong(); containerVersion = inStream.readLong(); estimatedRowCount = inStream.readLong(); reusableRecordIdSequenceNumber = inStream.readLong(); lastLogInstant = null; if (PreAllocSize == 0) // pre 2.0, we don't store this. PreAllocSize = DEFAULT_PRE_ALLOC_SIZE; long spare3 = inStream.readLong(); // read spare long // upgrade - if this is a container that was created before // initialPages was stored, it will have a zero value. Set it to the // default of 1. if (initialPages == 0) initialPages = 1; // container read in from disk, reset preAllocation values PreAllocThreshold = PRE_ALLOC_THRESHOLD; // validate checksum long onDiskChecksum = inStream.readLong(); checksum.reset(); checksum.update(a, 0, CONTAINER_INFO_SIZE - CHECKSUM_SIZE); if (onDiskChecksum != checksum.getValue()) { PageKey pk = new PageKey(identity, FIRST_ALLOC_PAGE_NUMBER); throw dataFactory.markCorrupt (StandardException.newException( SQLState.FILE_BAD_CHECKSUM, pk, new Long(checksum.getValue()), new Long(onDiskChecksum), org.apache.derby.iapi.util.StringUtil.hexDump(a))); } allocCache.reset(); // set the in memory state setDroppedState((status & FILE_DROPPED) != 0); setCommittedDropState((status & FILE_COMMITTED_DROP) != 0); setReusableRecordIdState((status & FILE_REUSABLE_RECORDID) != 0); } /** Write the container header to a page array (the first allocation page) @exception StandardException Cloudscape Standard error policy @exception IOException error in writing the header to file */ protected void writeHeader(byte[] pageData) throws StandardException, IOException { // write out the current containerInfo in the borrowed space to byte // array containerInfo writeHeaderToArray(containerInfo); AllocPage.WriteContainerInfo(containerInfo, pageData, false); } /** Write the container header directly to output stream (fileData). Assumes the output stream is positioned at the beginning of the file. Subclasses that can writes the container header is expected to manufacture a DataOutput stream which is used here. <BR> MT - single thread required - Enforced by caller @exception StandardException Cloudscape Standard error policy @exception IOException error in writing the header to file */ protected void writeHeader(DataOutput fileData, boolean create, byte[] epage) throws IOException, StandardException { // write out the current containerInfo in the borrowed space to byte // array containerInfo writeHeaderToArray(containerInfo); // RESOLVE: get no wait on the page cache to see if allocation page is // there, if so, use that instead of making a new array and a static // function. AllocPage.WriteContainerInfo(containerInfo, epage, create); // now epage has the containerInfo written inside it // force WAL - and check to see if database is corrupt or is frozen. dataFactory.flush(lastLogInstant); if (lastLogInstant != null) lastLogInstant = null; // write it out dataFactory.writeInProgress(); try { fileData.write(epage); } finally { dataFactory.writeFinished(); } } /** Get an embryonic page from the dataInput stream. If fileData is not null, then the embyronic page will be read in from the input stream (fileData), which is assumed to be positioned at the beginning of the first allocation page. if fileData is null, then just manufacture an array which is the size of an embryonic page. @exception IOException error in read the embryonic page from file */ protected byte[] getEmbryonicPage(DataInput fileData) throws IOException { byte[] epage = new byte[AllocPage.MAX_BORROWED_SPACE]; if (fileData != null) { fileData.readFully(epage); } return epage; } /** Write containerInfo into a byte array The container Header thus put together can be read by readHeaderFromArray. @exception IOException error in writing the header */ private void writeHeaderToArray(byte[] a) throws IOException { if (SanityManager.DEBUG) SanityManager.ASSERT(a.length >= CONTAINER_INFO_SIZE, "header won't fit in array"); ArrayOutputStream a_out = new ArrayOutputStream(a); FormatIdOutputStream outStream = new FormatIdOutputStream(a_out); int status = 0; if (getDroppedState()) status |= FILE_DROPPED; if (getCommittedDropState()) status |= FILE_COMMITTED_DROP; if (isReusableRecordId()) status |= FILE_REUSABLE_RECORDID; a_out.setPosition(0); a_out.setLimit(CONTAINER_INFO_SIZE); outStream.writeInt(formatIdInteger); outStream.writeInt(status); outStream.writeInt(pageSize); outStream.writeInt(spareSpace); outStream.writeInt(minimumRecordSize); outStream.writeShort(initialPages); outStream.writeShort(PreAllocSize); // write spare1 outStream.writeLong(firstAllocPageNumber); outStream.writeLong(firstAllocPageOffset); outStream.writeLong(containerVersion); outStream.writeLong(estimatedRowCount); outStream.writeLong(reusableRecordIdSequenceNumber); outStream.writeLong(0); //Write spare3 checksum.reset(); checksum.update(a, 0, CONTAINER_INFO_SIZE - CHECKSUM_SIZE); // write the checksum to the array outStream.writeLong(checksum.getValue()); a_out.clearLimit(); } /** Log all information on the container creation necessary to recreate the container during a load tran. @exception StandardException Cloudscape Standard error policy */ protected ByteArray logCreateContainerInfo() throws StandardException { // just write out the whole container header byte[] array = new byte[CONTAINER_INFO_SIZE]; if (array == null || array.length != CONTAINER_INFO_SIZE) { throw StandardException.newException( SQLState.DATA_OBJECT_ALLOCATION_FAILED, "byte[]"); } try { writeHeaderToArray(array); } catch (IOException ioe) { throw StandardException.newException( SQLState.DATA_UNEXPECTED_EXCEPTION, ioe); } return new ByteArray(array); } /** Set container properties from the passed in ByteArray, which is created by logCreateContainerInfo. This information is used to recreate the container during recovery load tran. The following container properties are set: pageSize spareSpace minimumRecordSize isReusableRecordId initialPages */ private void createInfoFromLog(ByteArray byteArray) throws StandardException { if (SanityManager.DEBUG) { SanityManager.ASSERT(byteArray != null, "setCreateContainerInfoFromLog: ByteArray is null"); SanityManager.ASSERT(byteArray.getLength() == CONTAINER_INFO_SIZE, "setCreateContainerInfoFromLog: ByteArrays.length() != CONTAINER_INFO_SIZE"); } byte[] array = byteArray.getArray(); // now extract the relavent information from array - basically // duplicate the code in readHeaderFromArray ArrayInputStream inStream = new ArrayInputStream(array); int status = 0; try { inStream.setLimit(0, CONTAINER_INFO_SIZE); int fid = inStream.readInt(); if (fid != formatIdInteger) { // RESOLVE: do something about this when we have > 1 container format throw StandardException.newException( SQLState.DATA_UNKNOWN_CONTAINER_FORMAT, getIdentity(), new Long(fid)); } status = inStream.readInt(); pageSize = inStream.readInt(); spareSpace = inStream.readInt(); minimumRecordSize = inStream.readInt(); initialPages = inStream.readShort(); } catch (IOException ioe) { throw StandardException.newException( SQLState.DATA_UNEXPECTED_EXCEPTION, ioe); } // set reusable record id property setReusableRecordIdState((status & FILE_REUSABLE_RECORDID) != 0); // sanity check to make sure we are not encoutering any // dropped Container if (SanityManager.DEBUG) { SanityManager.ASSERT((status & FILE_DROPPED) == 0 && (status & FILE_COMMITTED_DROP) == 0, "cannot load a dropped container"); } } /** Set container properties from the passed in createArgs. The following container properties are set: pageSize spareSpace minimumRecordSize isReusableRecordId initialPages RESOLVE - in the future setting parameters should be overridable by sub-class, e.g. one implementation of Container may require a minimum page size of 4k. */ private void createInfoFromProp(Properties createArgs) throws StandardException { // Need a TransactionController to get database/service wide properties. AccessFactory af = (AccessFactory) Monitor.getServiceModule(dataFactory, AccessFactory.MODULE); // RESOLVE: sku defectid 2014 TransactionController tc = (af == null) ? null : af.getTransaction( ContextService.getFactory().getCurrentContextManager()); pageSize = PropertyUtil.getServiceInt(tc, createArgs, Property.PAGE_SIZE_PARAMETER, Limits.DB2_MIN_PAGE_SIZE, Limits.DB2_MAX_PAGE_SIZE, RawStoreFactory.PAGE_SIZE_DEFAULT); // rather than throw error, just automatically set page size to // default if bad value given. if ((pageSize != 4096) && (pageSize != 8192) && (pageSize != 16384) && (pageSize != 32768)) { pageSize= RawStoreFactory.PAGE_SIZE_DEFAULT; } spareSpace = PropertyUtil.getServiceInt(tc, createArgs, RawStoreFactory.PAGE_RESERVED_SPACE_PARAMETER, 0, 100, 20); PreAllocSize = PropertyUtil.getServiceInt(tc, createArgs, RawStoreFactory.PRE_ALLOCATE_PAGE, MIN_PRE_ALLOC_SIZE, MAX_PRE_ALLOC_SIZE, DEFAULT_PRE_ALLOC_SIZE /* default */); // RESOLVE - in the future, we will allow user to set minimumRecordSize // to be larger than pageSize, when long rows are supported. if (createArgs == null) { // if the createArgs is null, then the following method call // will get the system properties from the appropriete places. // we want to make sure minimumRecrodSize is set to at least // the default value MINIMUM_RECORD_SIZE_DEFAULT (12) // as set in rawStoreFactory. minimumRecordSize = PropertyUtil.getServiceInt(tc, RawStoreFactory.MINIMUM_RECORD_SIZE_PARAMETER, RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT, // this is different from the next call // reserving 100 bytes for record/field headers (pageSize * (1 - spareSpace/100) - 100), RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT); } else { // if the createArgs is not null, then it has already been set // by upper layer or create statement, then, we allow the minimum // value of this to be MINIMUM_RECORD_SIZE_MINIMUM (1). minimumRecordSize = PropertyUtil.getServiceInt(tc, createArgs, RawStoreFactory.MINIMUM_RECORD_SIZE_PARAMETER, RawStoreFactory.MINIMUM_RECORD_SIZE_MINIMUM, // this is different from the last call // reserving 100 bytes for record/field headers (pageSize * (1 - spareSpace/100) - 100), RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT); } // For the following properties, do not check value set in global // properties, we only listen to what access has to say about them. // // whether or not container's recordIds can be reused // if container is to be created with a large number of pages if (createArgs != null) { String reusableRecordIdParameter = createArgs.getProperty(RawStoreFactory.PAGE_REUSABLE_RECORD_ID); if (reusableRecordIdParameter != null) { Boolean reusableRecordId = new Boolean(reusableRecordIdParameter); setReusableRecordIdState(reusableRecordId.booleanValue()); } String containerInitialPageParameter = createArgs.getProperty(RawStoreFactory.CONTAINER_INITIAL_PAGES); if (containerInitialPageParameter != null) { initialPages = Short.parseShort(containerInitialPageParameter); if (initialPages > 1) { if (initialPages > RawStoreFactory.MAX_CONTAINER_INITIAL_PAGES) initialPages = RawStoreFactory.MAX_CONTAINER_INITIAL_PAGES; } } } } /** */ protected boolean canUpdate() { return canUpdate; } /** Deallocate a page from the container. @param handle the container handle doing the deallocation @param page the page to be deallocated. It is latched upon entry and will be unlatched by the caller of this function @exception StandardException Cloudscape Standard error policy */ protected void deallocatePage(BaseContainerHandle handle, BasePage page) throws StandardException { if (SanityManager.DEBUG) { SanityManager.ASSERT(page.isLatched(), "page is not latched"); SanityManager.ASSERT(page.getPageNumber() != FIRST_ALLOC_PAGE_NUMBER, "cannot deallocate an alloc page"); } long pnum = page.getPageNumber(); // dealloc the page from the alloc page deallocatePagenum(handle, pnum); // mark the page as deallocated. Page should not be touched after this // the page latch is released by the BaseContainer upon return of this // method. Regardless of whether this operation is successful or not, // the page will be unlatched by BaseContainer. page.deallocatePage(); } /** deallocate the page from the alloc page */ private void deallocatePagenum(BaseContainerHandle handle, long pnum) throws StandardException { synchronized(allocCache) { long allocPageNum = allocCache.getAllocPageNumber(handle, pnum, firstAllocPageNumber); if (SanityManager.DEBUG) { if (allocPageNum == ContainerHandle.INVALID_PAGE_NUMBER) allocCache.dumpAllocationCache(); if (allocPageNum == ContainerHandle.INVALID_PAGE_NUMBER) SanityManager.THROWASSERT( "can't find alloc page for page number " + pnum); } // get the alloc page to deallocate this pnum AllocPage allocPage = (AllocPage)handle.getAllocPage(allocPageNum); if (allocPage == null) { PageKey pkey = new PageKey(identity, allocPageNum); throw StandardException.newException( SQLState.FILE_NO_ALLOC_PAGE, pkey); } try { allocCache.invalidate(allocPage, allocPageNum); // Unlatch alloc page. The page is protected by the dealloc // lock. allocPage.deallocatePage(handle, pnum); } finally { allocPage.unlatch(); } } // make sure this page gets looked at when someone needs a new page if (pnum <= lastAllocatedPage) { lastAllocatedPage = pnum - 1; } } /** Compress free space from container. <BR> MT - thread aware - It is assumed that our caller (our super class) has already arranged a logical lock on page allocation to only allow a single thread through here. Compressing free space is done in allocation page units, working it's way from the end of the container to the beginning. Each loop operates on the last allocation page in the container. Freeing space in the container page involves 2 transactions, an update to an allocation page, N data pages, and possibly the delete of the allocation page. The User Transaction (UT) initiated the compress call. The Nested Top Transaction (NTT) is the transaction started by RawStore inside the compress call. This NTT is committed before compress returns. The NTT is used to access high traffic data structures such as the AllocPage. This is outline of the algorithm used in compressing the container. Until a non free page is found loop, in each loop return to the OS all space at the end of the container occupied by free pages, including the allocation page itself if all of it's pages are free. 1) Find last 2 allocation pages in container (last if there is only one). 2) invalidate the allocation information cached by the container. Without the cache no page can be gotten from the container. Pages already in the page cache are not affected. Thus by latching the allocPage and invalidating the allocation cache, this NTT blocks out all page gets from this container until it commits. 3) the allocPage determines which pages can be released to the OS, mark that in its data structure (the alloc extent). Mark the contiguous block of nallocated/free pages at the end of the file as unallocated. This change is associated with the NTT. 4) The NTT calls the OS to deallocate the space from the file. Note that the system can handle being booted and asked to get an allocated page which is past end of file, it just extends the file automatically. 5) If freeing all space on the alloc page, and there is more than one alloc page, then free the alloc page - this requires an update to the previous alloc page which the loop has kept latched also. 6) if the last alloc page was deleted, restart loop at #1 All NTT latches are released before this routine returns. If we use an NTT, the caller has to commit the NTT to release the allocPage latch. If we don't use an NTT, the allocPage latch is released as this routine returns. @param ntt - the nested top transaction for the purpose of freeing space. If ntt is null, use the user transaction for allocation. #param allocHandle - the container handle opened by the ntt, use this to latch the alloc page @exception StandardException Standard Cloudscape error policy */ protected void compressContainer( RawTransaction ntt, BaseContainerHandle allocHandle) throws StandardException { AllocPage alloc_page = null; AllocPage prev_alloc_page = null; if (firstAllocPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) { // no allocation pages in container, no work to do! return; } // make sure we don't execute redo recovery on any page // which is getting truncated. At this point we have an exclusive // table lock on the table, so after checkpoint no page change // can happen between checkpoint log record and compress of space. dataFactory.getRawStoreFactory().checkpoint(); // block the backup, If backup is already in progress wait // for the backup to finish. Otherwise restore from the backup // can start recovery at different checkpoint and possibly // do redo on pages that are going to get truncated. ntt.blockBackup(true); try { synchronized(allocCache) { // loop until last 2 alloc pages are reached. alloc_page = (AllocPage) allocHandle.getAllocPage(firstAllocPageNumber); while (!alloc_page.isLast()) { if (prev_alloc_page != null) { // there are more than 2 alloc pages, unlatch the // earliest one. prev_alloc_page.unlatch(); } prev_alloc_page = alloc_page; alloc_page = null; long nextAllocPageNumber = prev_alloc_page.getNextAllocPageNumber(); long nextAllocPageOffset = prev_alloc_page.getNextAllocPageOffset(); alloc_page = (AllocPage) allocHandle.getAllocPage(nextAllocPageNumber); } // invalidate cache before compress changes cached information, // while holding synchronization on cache and latch on // allocation page. This should guarantee that only new info // is seen after this operation completes. allocCache.invalidate(); // reset, as pages may not exist after compress lastUnfilledPage = ContainerHandle.INVALID_PAGE_NUMBER; lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER; alloc_page.compress(ntt, this); } } finally { if (alloc_page != null) { alloc_page.unlatch(); alloc_page = null; } if (prev_alloc_page != null) { prev_alloc_page.unlatch(); prev_alloc_page = null; } // flush all changes to this file from cache. flushAll(); // make sure all truncated pages are removed from the cache, // as it will get confused in the future if we allocate the same // page again, but find an existing copy of it in the cache - // it expects to not find new pages in the cache. Could just // get rid of truncated pages, iterface allows one page or // all pages. pageCache.discard(identity); } } /** * Get the reusable RecordId sequence number for the container. * @see BaseContainer#getReusableRecordIdSequenceNumber * @return reusable RecordId sequence number for the container. */ public final long getReusableRecordIdSequenceNumber() { synchronized(this) { return reusableRecordIdSequenceNumber; } } /** * Increment the reusable RecordId version sequence number. */ protected final void incrementReusableRecordIdSequenceNumber() { final boolean readOnly = dataFactory.isReadOnly(); synchronized (this) { reusableRecordIdSequenceNumber++; if (!readOnly) { isDirty = true; } } } /** Create a new page in the container. <BR> MT - thread aware - It is assumed that our caller (our super class) has already arranged a logical lock on page allocation to only allow a single thread through here. Adding a new page involves 2 transactions and 2 pages. The User Transaction (UT) initiated the addPage call and expects a latched page (owns by the UT) to be returned. The Nested Top Transaction (NTT) is the transaction started by RawStore inside an addPage call. This NTT is committed before the page is returned. The NTT is used to accessed high traffic data structure such as the AllocPage. This is outline of the algorithm used in adding a page: 1) find or make an allocPage which can handle the addding of a new page. Latch the allocPage with the NTT. 2) invalidate the allocation information cached by the container. Without the cache no page can be gotten from the container. Pages already in the page cache is not affected. Thus by latching the allocPage and invalidating the allocation cache, this NTT blocks out all page gets from this container until it commits. 3) the allocPage determines which page can be allocated, mark that in its data structure (the alloc extent) and returns the page number of the new page. This change is associated with the NTT. 4) the NTT gets or creates the new page in the page cache (bypassing the lookup of the allocPage since that is already latched by the NTT and will deadlock). 5) the NTT initializes the page (mark it is being a VALID page). 6) the page latch is transfered to the UT from the NTT. 7) the new page is returned, latched by UT If we use an NTT, the caller has to commit the NTT to release the allocPage latch. If we don't use an NTT, the allocPage latch is released as this routine returns. @param userHandle - the container handle opened by the user transaction, use this to latch the new user page @param ntt - the nested top transaction for the purpose of allocating the new page If ntt is null, use the user transaction for allocation. #param allocHandle - the container handle opened by the ntt, use this to latch the alloc page @exception StandardException Standard Cloudscape error policy */ protected BasePage newPage(BaseContainerHandle userHandle, RawTransaction ntt, BaseContainerHandle allocHandle, boolean isOverflow) throws StandardException { // NOTE: we are single threaded thru this method, see MT comment boolean useNTT = (ntt != null); // if ntt is null, use user transaction if (!useNTT) ntt = userHandle.getTransaction(); long lastPage; // last allocated page long lastPreallocPage; // last pre-allcated page long pageNumber; // the page number of the new page PageKey pkey; // the identity of the new page boolean reuse; // if true, we are trying to reuse a page /* in case the page recommeded by allocPage is not committed yet, may /* need to retry a couple of times */ boolean retry; int numtries = 0; long startSearch = lastAllocatedPage; AllocPage allocPage = null; // the alloc page BasePage page = null; // the new page try { do { retry = false; // we don't expect we need to retry synchronized(allocCache) { if (SanityManager.DEBUG) { SanityManager.ASSERT( ntt.getId().equals( allocHandle.getTransaction().getId())); if (useNTT) SanityManager.ASSERT( !ntt.getId().equals( userHandle.getTransaction().getId())); } /* find an allocation page that can handle adding a new * page. * * allocPage is unlatched when the ntt commits. The new * page is initialized by the ntt but the latch is * transfered to the user transaction before the allocPage * is unlatched. The allocPage latch prevents almost any * other reader or writer from finding the new page until * the ntt is committed and the new page is latched by the * user transaction. * * (If the page is being reused, it is possible for another * xact which kept a handle on the reused page to find the * page during the transfer UT -> NTT. If this unlikely * even occurs and the transfer fails [see code relating * to transfer below], we retry from the beginning.) * * After the NTT commits a reader (getNextPageNumber) may * get the page number of the newly allocated page and it * will wait for the new page and latch it when the user * transaction commits, aborts or unlatches the new page. * Whether the user transaction commits or aborts, the new * page stay allocated. * * RESOLVE: before NTT rolls back (or commits) the latch is * released. To repopulate the allocation cache, need to * get either the container lock on add page, or get a per * allocation page lock. * * This blocks all page read (getPage) from accessing this * alloc page in this container until the alloc page is * unlatched. Those who already have a page handle into * this container are unaffected. * * In other words, allocation blocks out reader (of any * page that is managed by this alloc page) by the latch * on the allocation page. * * Note that write page can proceed as usual. */ allocPage = findAllocPageForAdd(allocHandle, ntt, startSearch); allocCache.invalidate(allocPage, allocPage.getPageNumber()); } if (SanityManager.DEBUG) { if (allocPage == null) allocCache.dumpAllocationCache(); SanityManager.ASSERT(allocPage != null, "findAllocPageForAdd returned a null alloc page"); } // // get the next free page's number. // for case 1, page number > lastPreallocPage // for case 2, page number <= lastPage // for case 3, lastPage < page number <= lastPreallocPage // pageNumber = allocPage.nextFreePageNumber(startSearch); // need to distinguish between the following 3 cases: // 1) the page has not been allocate or initalized. // Create it in the page cache and sync it to disk. // 2) the page is being re-allocated. // We need to read it in to re-initialize it // 3) the page has been preallocated. // Create it in the page cache and don't sync it to disk // // first find out the current last initialized page and // preallocated page before the new page is added lastPage = allocPage.getLastPagenum(); lastPreallocPage = allocPage.getLastPreallocPagenum(); reuse = pageNumber <= lastPage; // no address translation necessary pkey = new PageKey(identity, pageNumber); if (reuse) { // if re-useing a page, make sure the deallocLock on the new // page is not held. We only need a zero duration lock on // the new page because the allocPage is latched and this // is the only thread which can be looking at this // pageNumber. RecordHandle deallocLock = BasePage.MakeRecordHandle(pkey, RecordHandle.DEALLOCATE_PROTECTION_HANDLE); if (!getDeallocLock(allocHandle, deallocLock, false /* nowait */, true /* zeroDuration */)) { // The transaction which deallocated this page has not // committed yet. Try going to some other page. If // this is the first time we fail to get the dealloc // lock, try from the beginning of the allocated page. // If we already did that and still fail, keep going // until we get a brand new page. if (numtries == 0) { startSearch = ContainerHandle.INVALID_PAGE_NUMBER; lastAllocatedPage = pageNumber; } else // continue from where we were startSearch = pageNumber; numtries++; // We have to unlatch the allocPage so that if that // transaction rolls back, it won't deadlock with this // transaction. allocPage.unlatch(); allocPage = null; retry = true; } else { // we got the lock, next time start from there lastAllocatedPage = pageNumber; } } else { // we got a new page, next time, start from beginning of // the bit map again if we suspect there are some some // deallocated pages if (numtries > 0) lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER; else lastAllocatedPage = pageNumber; } // Retry from the beginning if necessary. if (retry) continue; // If we get past here must have (retry == false) if (SanityManager.DEBUG) { SanityManager.ASSERT(retry == false); } // Now we have verified that the allocPage is latched and we // can get the zeroDuration deallocLock nowait. This means the // transaction which freed the page has committed. Had that // transaction aborted, we would have retried. if (SanityManager.DEBUG) { // ASSERT lastPage <= lastPreallocPage if (lastPage > lastPreallocPage) { SanityManager.THROWASSERT("last page " + lastPage + " > lastPreallocPage " + lastPreallocPage); } } // No I/O at all if this new page is requested as part of a // create and load statement or this new page is in a temporary // container. // // In the former case, BaseContainer will allow the // MODE_UNLOGGED bit to go thru to the nested top transaction // alloc handle. In the later case, there is no nested top // transaction and the alloc handle is the user handle, which // is UNLOGGED. boolean noIO = (allocHandle.getMode() & ContainerHandle.MODE_UNLOGGED) == ContainerHandle.MODE_UNLOGGED; // If we do not need the I/O (either because we are in a // create_unlogged mode or we are dealing with a temp table), // don't do any preallocation. Otherwise, see if we should be // pre-Allocating page by now. We don't call it before // nextFreePageNumber because finding a reusable page may be // expensive and we don't want to start preAllocation unless // there is no more reusable page. Unless we are called // explicitly to bulk increase the container size in a preload // or in a create container. if (!noIO && (bulkIncreaseContainerSize || (pageNumber > lastPreallocPage && pageNumber > PreAllocThreshold))) { allocPage.preAllocatePage( this, PreAllocThreshold, PreAllocSize); } // update last preAllocated Page, it may have been changed by // the preAllocatePage call. We don't want to do the sync if // preAllocatePage already took care of it. lastPreallocPage = allocPage.getLastPreallocPagenum(); boolean prealloced = pageNumber <= lastPreallocPage; // Argument to the create is an array of ints. // The array is only used for new page creation or for creating // a preallocated page, not for reuse. // 0'th element is the page format // 1'st element is whether or not to sync the page to disk // 2'nd element is pagesize // 3'rd element is spareSpace int[] createPageArgs = new int[STORED_PAGE_ARG_NUM]; createPageArgs[0] = StoredPage.FORMAT_NUMBER; createPageArgs[1] = prealloced ? 0 : (noIO ? 0 : CachedPage.WRITE_SYNC); createPageArgs[2] = pageSize; createPageArgs[3] = spareSpace; createPageArgs[4] = minimumRecordSize; // RESOLVE: right now, there is no re-mapping of pages, so // pageOffset = pageNumber*pageSize long pageOffset = pageNumber * pageSize; // initialize a new user page // we first use the NTT to initialize the new page - in case the // allocation failed, it is rolled back with the NTT. // Later, we transfer the latch to the userHandle so it won't be // released when the ntt commits try { page = initPage(allocHandle, pkey, createPageArgs, pageOffset, reuse, isOverflow); } catch (StandardException se) { if (SanityManager.DEBUG) { SanityManager.DEBUG_PRINT("FileContainer", "got exception from initPage:" + "\nreuse = " + reuse + "\ncreatePageArgs[1] = " + createPageArgs[1] + "\nallocPage = " + allocPage ); } allocCache.dumpAllocationCache(); throw se; } if (SanityManager.DEBUG) { SanityManager.ASSERT( page != null, "initPage returns null page"); SanityManager.ASSERT( page.isLatched(), "initPage returns unlatched page"); } // allocate the page in the allocation page bit map allocPage.addPage(this, pageNumber, ntt, userHandle); if (useNTT) { // transfer the page latch from NTT to UT. // // after the page is unlatched by NTT, it is still // protected from being found by almost everybody else // because the alloc page is still latched and the alloc // cache is invalidated. // - // However (beetle 3942) it is possible for the page to be + // However it is possible for the page to be // found by threads who specifically ask for this // pagenumber (e.g. HeapPostCommit). // We may find that such a thread has latched the page. // We shouldn't wait for it because we have the alloc page // latch, and this could cause deadlock (e.g. // HeapPostCommit might call removePage and this would wait // on the alloc page). // // We may instead find that we can latch the page, but that // another thread has managed to get hold of it during the - // transfer and either deallocate it or otherwise change it + // transfer and either deallocated it or otherwise change it // (add rows, delete rows etc.) // // Since this doesn't happen very often, we retry in these // 2 cases (we give up the alloc page and page and we start // this method from scratch). // // If the lock manager were changed to allow latches to be // transferred between transactions, wouldn't need to // unlatch to do the transfer, and would avoid having to - // retry in these cases (beetle 4011). + // retry in these cases (DERBY-2337). page.unlatch(); page = null; // need to find it in the cache again since unlatch also // unkept the page from the cache page = (BasePage)pageCache.find(pkey); page = latchPage( userHandle, page, false /* don't wait, it might deadlock */); if (page == null || // recordCount will only return true if there are no // rows (including deleted rows) page.recordCount() != 0 || page.getPageStatus() != BasePage.VALID_PAGE) { retry = true; if (page != null) { page.unlatch(); page = null; } allocPage.unlatch(); allocPage = null; } } // if ntt is null, no need to transfer. Page is latched by user // transaction already. Will be no need to retry. // the alloc page is unlatched in the finally block. } while (retry == true); // At this point, should have a page suitable for returning if (SanityManager.DEBUG) SanityManager.ASSERT(page.isLatched()); } catch (StandardException se) { if (page != null) page.unlatch(); page = null; throw se; // rethrow error } finally { if (!useNTT && allocPage != null) { allocPage.unlatch(); allocPage = null; } // NTT is committed by the caller } if (SanityManager.DEBUG) SanityManager.ASSERT(page.isLatched()); // if bulkIncreaseContainerSize is set, that means this newPage call // may have greatly expanded the container size due to preallocation. // Regardless of how many page it actually created, reset preAllocSize // to the default so we won't attempt to always preallocate 1000 pages // at a time in the future. if (bulkIncreaseContainerSize) { bulkIncreaseContainerSize = false; PreAllocSize = DEFAULT_PRE_ALLOC_SIZE; } if (!isOverflow && page != null) setLastInsertedPage(pageNumber); // increase estimated page count - without any synchronization or // logging, this is an estimate only if (estimatedPageCount >= 0) estimatedPageCount++; if (!this.identity.equals(page.getPageId().getContainerId())) { if (SanityManager.DEBUG) { SanityManager.THROWASSERT( "just created a new page from a different container" + "\n this.identity = " + this.identity + "\n page.getPageId().getContainerId() = " + page.getPageId().getContainerId() + "\n userHandle is: " + userHandle + "\n allocHandle is: " + allocHandle + "\n this container is: " + this); } throw StandardException.newException( SQLState.DATA_DIFFERENT_CONTAINER, this.identity, page.getPageId().getContainerId()); } return page; // return the newly added page } protected void clearPreallocThreshold() { // start life with preallocated page if possible PreAllocThreshold = 0; } protected void prepareForBulkLoad(BaseContainerHandle handle, int numPage) { clearPreallocThreshold(); RawTransaction tran = handle.getTransaction(); // find the last allocation page - do not invalidate the alloc cache, // we don't want to prevent other people from reading or writing // pages. AllocPage allocPage = findLastAllocPage(handle, tran); // preallocate numPages. Do whatever this allocPage can handle, if it // is full, too bad. We don't guarentee that we will preallocate this // many pages, we only promise to try. if (allocPage != null) { allocPage.preAllocatePage(this, 0, numPage); allocPage.unlatch(); } } private boolean pageValid(BaseContainerHandle handle, long pagenum) throws StandardException { boolean retval = false; synchronized(allocCache) { if (pagenum <= allocCache.getLastPageNumber(handle, firstAllocPageNumber) && allocCache.getPageStatus(handle, pagenum, firstAllocPageNumber) == AllocExtent.ALLOCATED_PAGE) retval = true; } return retval; } protected long getLastPageNumber(BaseContainerHandle handle) throws StandardException { long retval; synchronized(allocCache) { // check if the first alloc page number is valid, it is invalid // if some one attempts to access the container info before the // first alloc page got created. One such case is online backup. // If first alloc page itself is invalid, then there are no pages // on the disk yet for this container, just return // ContainerHandle.INVALID_PAGE_NUMBER, caller can decide what to // do. if (firstAllocPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) { retval = ContainerHandle.INVALID_PAGE_NUMBER; } else { retval = allocCache.getLastPageNumber(handle, firstAllocPageNumber); } } return retval; } /* Find or allocate an allocation page which can handle adding a new page. Return a latched allocPage. <BR> MT - single thread required - called as part of add page */ private AllocPage findAllocPageForAdd(BaseContainerHandle allocHandle, RawTransaction ntt, long lastAllocatedPage) throws StandardException { AllocPage allocPage = null; AllocPage oldAllocPage = null; // in case we need to walk the alloc page chain boolean success = false; // set this for clean up try { if (firstAllocPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) { // make and return a latched new allocation page allocPage = makeAllocPage(ntt, allocHandle, FIRST_ALLOC_PAGE_NUMBER, FIRST_ALLOC_PAGE_OFFSET, CONTAINER_INFO_SIZE); if (SanityManager.DEBUG) { SanityManager.ASSERT(firstAllocPageNumber == FIRST_ALLOC_PAGE_NUMBER, "first Alloc Page number is still not set"); SanityManager.ASSERT(firstAllocPageOffset == FIRST_ALLOC_PAGE_OFFSET, "first Alloc Page offset is still not set"); } } else { // an allocation page already exist, go get it allocPage = (AllocPage)allocHandle.getAllocPage(firstAllocPageNumber); } /* allocPage is latched by allocHandle */ if (!allocPage.canAddFreePage(lastAllocatedPage)) { // allocPage cannot manage the addition of one more page, walk the // alloc page chain till we find an allocPage that can // RESOLVE: always start with the first page for now... boolean found = false; // found an alloc page that can handle // adding a new page while(allocPage.isLast() != true) { long nextAllocPageNumber = allocPage.getNextAllocPageNumber(); long nextAllocPageOffset = allocPage.getNextAllocPageOffset(); // RESOLVE (future): chain this info to in memory structure so // getAllocPage can find this alloc page allocPage.unlatch(); allocPage = null; // the nextAllocPage is stable once set - even though it is // save to get the next page latch before releasing this // allocPage. allocPage = (AllocPage)allocHandle.getAllocPage(nextAllocPageNumber); if (allocPage.canAddFreePage(lastAllocatedPage)) { found = true; break; } } if (!found) { // allocPage is last and it is full oldAllocPage = allocPage; allocPage = null; if (SanityManager.DEBUG) SanityManager.ASSERT(oldAllocPage.getLastPagenum() == oldAllocPage.getMaxPagenum(), "expect allocpage to be full but last pagenum != maxpagenum"); long newAllocPageNum = oldAllocPage.getMaxPagenum() + 1; long newAllocPageOffset = newAllocPageNum; // no translation allocPage = makeAllocPage(ntt, allocHandle, newAllocPageNum, newAllocPageOffset, 0 /* no containerInfo */); // this writes out the new alloc page and return a latched page // nobody can find the new alloc page until oldAllocPage is unlatched. // oldAllocPage is no longer the last alloc page, // it has a pointer to the new last alloc page oldAllocPage.chainNewAllocPage(allocHandle, newAllocPageNum, newAllocPageOffset); oldAllocPage.unlatch(); oldAllocPage = null; } } /* no error handling necessary */ success = true; } finally // unlatch allocation page if any error happened { if (!success) { if (oldAllocPage != null) oldAllocPage.unlatch(); if (allocPage != null) allocPage.unlatch(); allocPage = null; } // if success drop out of finally block } return allocPage; } /** Find the last alloc page, returns null if no alloc page is found */ private AllocPage findLastAllocPage(BaseContainerHandle handle, RawTransaction tran) { AllocPage allocPage = null; AllocPage oldAllocPage = null; if (firstAllocPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) return null; try { allocPage = (AllocPage)handle.getAllocPage(firstAllocPageNumber); while(!allocPage.isLast()) { long nextAllocPageNumber = allocPage.getNextAllocPageNumber(); long nextAllocPageOffset = allocPage.getNextAllocPageOffset(); allocPage.unlatch(); allocPage = null; allocPage = (AllocPage)handle.getAllocPage(nextAllocPageNumber); } } catch (StandardException se) { if (allocPage != null) allocPage.unlatch(); allocPage = null; } return allocPage; } /* Make a new alloc page, latch it with the passed in container handle. */ private AllocPage makeAllocPage(RawTransaction ntt, BaseContainerHandle handle, long pageNumber, long pageOffset, int containerInfoSize) throws StandardException { if (SanityManager.DEBUG) { if (containerInfoSize != 0 && containerInfoSize != CONTAINER_INFO_SIZE) SanityManager.THROWASSERT( "expect 0 or " + CONTAINER_INFO_SIZE + ", got " + containerInfoSize); if (pageNumber != FIRST_ALLOC_PAGE_NUMBER && containerInfoSize != 0) SanityManager.THROWASSERT( "Not first alloc page but container info size " + containerInfoSize); } // argument to the create is an array of ints // 0'th element is the page format // 1'st element is whether or not to sync the page to disk // 2'nd element is the pagesize // 3'rd element is spareSpace // 4'th element is number of bytes to reserve for the container header // 5'th element is the minimumRecordSize // NOTE: the arg list here must match the one in allocPage // No I/O at all if this new page is requested as part of a create // and load statement or this new alloc page is in a temporary // container. // In the former case, BaseContainer will allow the MODE_UNLOGGED // bit to go thru to the nested top transaction alloc handle. // In the later case, there is no nested top transaction and the // alloc handle is the user handle, which is UNLOGGED. boolean noIO = (handle.getMode() & ContainerHandle.MODE_UNLOGGED) == ContainerHandle.MODE_UNLOGGED; int[] createAllocPageArgs = new int[ALLOC_PAGE_ARG_NUM]; createAllocPageArgs[0] = AllocPage.FORMAT_NUMBER; createAllocPageArgs[1] = noIO ? 0 : CachedPage.WRITE_SYNC; createAllocPageArgs[2] = pageSize; createAllocPageArgs[3] = 0; // allocation page has no need for spare createAllocPageArgs[4] = containerInfoSize; createAllocPageArgs[5] = minimumRecordSize; if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(SPACE_TRACE)) { SanityManager.DEBUG( SPACE_TRACE, "making new allocation page at " + pageNumber); } } if (pageNumber == FIRST_ALLOC_PAGE_NUMBER) { // RESOLVE: make sure the following is true // // firstAllocPageNumber and Offset can be set and access without // synchronization since the first allocation page is // created as part of the container create, this value is set // before any other transaction has a chance to open the container. // Once set, the first allocation page does not move or change // position firstAllocPageNumber = pageNumber; firstAllocPageOffset = pageOffset; } PageKey pkey = new PageKey(identity, pageNumber); // return a latched new alloc page return (AllocPage)initPage(handle, pkey, createAllocPageArgs, pageOffset, false, /* not reuse */ false /* not overflow */); } /** Initialize a page @return a latched page that has been initialized. @param allochandle the contianer handle to initialize the page with - the ntt @param pkey the page number of the page to be initialized @param createArgs the int array for page creation @param reuse is true if we are reusing a page that has already been initialized once @exception StandardException Cloudscape Standard error policy */ protected BasePage initPage(BaseContainerHandle allochandle, PageKey pkey, int[] createArgs, long pageOffset, boolean reuse, boolean overflow) throws StandardException { BasePage page = null; boolean releasePage = true; try { if (reuse) // read the page in first { // Cannot go thru the container handle because all read pages are blocked. // do it underneath the handle and directly to the cache. // Nobody can get thru becuase getPage will block at getting the alloc page. if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(SPACE_TRACE)) { SanityManager.DEBUG( SPACE_TRACE, "reusing page " + pkey); } } page = (BasePage)pageCache.find(pkey); if (page == null) // hmmm? { throw StandardException.newException( SQLState.FILE_REUSE_PAGE_NOT_FOUND, pkey); } } else { if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON(SPACE_TRACE)) { SanityManager.DEBUG( SPACE_TRACE, "allocation new page " + pkey); } } // a brand new page, initialize and a new page in cache page = (BasePage) pageCache.create(pkey, createArgs); if (SanityManager.DEBUG) SanityManager.ASSERT(page != null, "page Cache create return a null page"); } releasePage = false; page = latchPage(allochandle, page, true /* may need to wait, track3822 */); if (page == null) { throw StandardException.newException( SQLState.FILE_NEW_PAGE_NOT_LATCHED, pkey); } // page is either brand new or is read from disk, in either case, // it knows how to get itself initialized. int initPageFlag = 0; if (reuse) initPageFlag |= BasePage.INIT_PAGE_REUSE; if (overflow) initPageFlag |= BasePage.INIT_PAGE_OVERFLOW; if (reuse && isReusableRecordId()) initPageFlag |= BasePage.INIT_PAGE_REUSE_RECORDID; page.initPage(initPageFlag, pageOffset); page.setContainerRowCount(estimatedRowCount); } finally { if (releasePage && page != null) { // release the new page from cache if it errors // out before the exclusive lock is set pageCache.release((Cacheable)page); page = null; } } return page; } /** Get a page in the container. Get User page is the generic base routine for all user (client to raw store) getPage. This routine coordinate with allocation/deallocation to ensure that no page can be gotten from the container while page is in the middle of being allocated or deallocated. This routine latches the page. @param handle the container handle @param pageNumber the page number of the page to get @param overflowOK if true then an overflow page is OK, if false, then only non-overflow page is OK @param wait if true then wait for a latch @return the latched page <BR> MT - thread safe @exception StandardException Standard Cloudscape error policy */ private BasePage getUserPage(BaseContainerHandle handle, long pageNumber, boolean overflowOK, boolean wait) throws StandardException { if (SanityManager.DEBUG) { SanityManager.ASSERT( pageNumber != FIRST_ALLOC_PAGE_NUMBER, "getUserPage trying to get an alloc page, pageNumber = " + pageNumber); if (pageNumber < ContainerHandle.FIRST_PAGE_NUMBER) SanityManager.THROWASSERT("pageNumber = " + pageNumber); } if (pageNumber < ContainerHandle.FIRST_PAGE_NUMBER) return null; if (getCommittedDropState()) // committed and dropped, cannot get a page return null; if (!pageValid(handle, pageNumber)) { return null; } // RESOLVE: no translation! PageKey pageSearch = new PageKey(identity, pageNumber); BasePage page = (BasePage)pageCache.find(pageSearch); if (page == null) { return page; } // latch the page if (latchPage(handle,page,wait) == null) { // page was already released from cache return null; } // double check for overflow and deallocated page // a page that was valid before maybe invalid by now if it was // deallocated in the interum. // a page that is invalid can also become valid in the interim, but // we do not handle that. The client must supply other locking // mechanism to prevent that (an allocatino happenning where there are // readers) if that is needed if ((page.isOverflowPage() && !overflowOK) || (page.getPageStatus() != BasePage.VALID_PAGE)) { // unlatch releases page from cache, see StoredPage.releaseExclusive() page.unlatch(); page = null; } return page; } protected void trackUnfilledPage(long pagenumber, boolean unfilled) { if (!dataFactory.isReadOnly()) allocCache.trackUnfilledPage(pagenumber, unfilled); } /** Get a valid (non-deallocated or free) page in the container. Overflow page is OK. Resulting page is latched. <BR> MT - thread safe @exception StandardException Standard Cloudscape error policy */ protected BasePage getPage(BaseContainerHandle handle, long pageNumber, boolean wait) throws StandardException { return getUserPage(handle, pageNumber, true /* overflow page OK */, wait); } /** Get any old page - turn off all validation @exception StandardException Cloudscape Standard error policy */ protected BasePage getAnyPage(BaseContainerHandle handle, long pageNumber) throws StandardException { // get AllocPage get a page without any validation (exception a // committed dropped container) if (getCommittedDropState()) // committed and dropped, cannot get a page return null; // make sure alloc cache has no stale info synchronized(allocCache) { allocCache.invalidate(); } PageKey pageSearch = new PageKey(identity, pageNumber); BasePage page = (BasePage) pageCache.find(pageSearch); return page; } /** * ReCreate a page for rollforward recovery. * <p> * During redo recovery it is possible for the system to try to redo * the creation of a page (ie. going from non-existence to version 0). * It first trys to read the page from disk, but a few different types * of errors can occur: * o the page does not exist at all on disk, this can happen during * rollforward recovery applied to a backup where the file was * copied and the page was added to the file during the time frame * of the backup but after the physical file was copied. * o space in the file exists, but it was never initalized. This * can happen if you happen to crash at just the right moment during * the allocation process. Also * on some OS's it is possible to read from a part of the file that * was not ever written - resulting in garbage from the store's * point of view (often the result is all 0's). * * All these errors are easy to recover from as the system can easily * create a version 0 from scratch and write it to disk. * * Because the system does not sync allocation of data pages, it is also * possible at this point that whlie writing the version 0 to disk to * create it we may encounter an out of disk space error (caught in this * routine as a StandardException from the create() call. We can't * recovery from this without help from outside, so the caught exception * is nested and a new exception thrown which the recovery system will * output to the user asking them to check their disk for space/errors. * * @exception StandardException Standard exception policy. **/ protected BasePage reCreatePageForRedoRecovery( BaseContainerHandle handle, int pageFormat, long pageNumber, long pageOffset) throws StandardException { // recreating a page should be done only if are in the middle of // rollforward recovery or if derby.storage.patchInitPageRecoverError // is set to true. //check if we are in rollforward recovery boolean rollForwardRecovery = ((RawTransaction)handle.getTransaction()).inRollForwardRecovery(); if (!rollForwardRecovery && !(PropertyUtil.getSystemBoolean( RawStoreFactory.PATCH_INITPAGE_RECOVER_ERROR))) { return null; } // RESOLVE: first need to verify that the page is really NOT in the // container! // no address translation necessary PageKey pkey = new PageKey(identity, pageNumber); int[] reCreatePageArgs = null; if (pageFormat == StoredPage.FORMAT_NUMBER) { reCreatePageArgs = new int[STORED_PAGE_ARG_NUM]; reCreatePageArgs[0] = pageFormat; reCreatePageArgs[1] = CachedPage.WRITE_SYNC; reCreatePageArgs[2] = pageSize; reCreatePageArgs[3] = spareSpace; reCreatePageArgs[4] = minimumRecordSize; } else if (pageFormat == AllocPage.FORMAT_NUMBER) { reCreatePageArgs = new int[ALLOC_PAGE_ARG_NUM]; // only the first allocation page have borrowed space for the // container info int containerInfoSize = 0; if (pageNumber == FIRST_ALLOC_PAGE_NUMBER) { containerInfoSize = CONTAINER_INFO_SIZE; firstAllocPageNumber = pageNumber; firstAllocPageOffset = pageOffset; } reCreatePageArgs[0] = pageFormat; reCreatePageArgs[1] = CachedPage.WRITE_SYNC; reCreatePageArgs[2] = pageSize; reCreatePageArgs[3] = 0; // allocation page has no need for spare reCreatePageArgs[4] = containerInfoSize; reCreatePageArgs[5] = minimumRecordSize; } else { throw StandardException.newException( SQLState.DATA_UNKNOWN_PAGE_FORMAT, pkey); } if (SanityManager.DEBUG) { if (SanityManager.DEBUG_ON("LoadTran")) SanityManager.DEBUG_PRINT( "Trace", "recreating page " + pkey + " for load tran"); } // Can't just call initPage because that wants to log an initPage // operation, whereas we are here because of an initPage operation in // the log already. BasePage page = null; boolean releasePage = true; try { try { // a brand new page, initialize a new page in cache page = (BasePage) pageCache.create(pkey, reCreatePageArgs); } catch (StandardException se) { throw StandardException.newException( SQLState.FILE_NEW_PAGE_DURING_RECOVERY, se, pkey); } if (page != null) { releasePage = false; page = latchPage(handle, page, false /* never need to wait */); if (page == null) { throw StandardException.newException( SQLState.FILE_NEW_PAGE_NOT_LATCHED, pkey); } } else { throw StandardException.newException( SQLState.FILE_NEW_PAGE_DURING_RECOVERY, pkey); } } finally { if (releasePage && page != null) { // release the new page from cache if it errors out before // the exclusive lock is set error in roll forward recovery. // , we are doomed anyway pageCache.release((Cacheable)page); page = null; } } return page; } /** Get an alloc page - only accessible to the raw store (container and recovery) @exception StandardException Cloudscape Standard error policy */ protected BasePage getAllocPage(long pageNumber) throws StandardException { if (getCommittedDropState()) // committed and dropped, cannot get a page return null; PageKey pageSearch = new PageKey(identity, pageNumber); BasePage page = (BasePage) pageCache.find(pageSearch); if (SanityManager.DEBUG) { if (page == null) SanityManager.THROWASSERT( "getting a null alloc page page " + getIdentity() + pageNumber); if ( ! (page instanceof AllocPage)) SanityManager.THROWASSERT( "trying to get a user page as an alloc page " + getIdentity() + pageNumber); } // assuming that allocation page lives in the page cache... return page; } /** Get only a valid, non-overflow page. If page number is either invalid or overflow, returns null @exception StandardException Cloudscape Standard error policy */ protected BasePage getHeadPage(BaseContainerHandle handle, long pageNumber, boolean wait) throws StandardException { return getUserPage(handle, pageNumber, false /* overflow not ok */, wait); } /** Get the first valid page in the container @exception StandardException Cloudscape Standard error policy */ protected BasePage getFirstHeadPage(BaseContainerHandle handle, boolean wait) throws StandardException { return getNextHeadPage(handle, ContainerHandle.FIRST_PAGE_NUMBER-1, wait); } /** Get the next page in the container. @exception StandardException Standard Cloudscape error policy */ protected BasePage getNextHeadPage(BaseContainerHandle handle, long pageNumber, boolean wait) throws StandardException { long nextNumber; while(true) { synchronized(allocCache) { // ask the cache for the next pagenumber nextNumber = allocCache.getNextValidPage(handle, pageNumber, firstAllocPageNumber); } if (nextNumber == ContainerHandle.INVALID_PAGE_NUMBER) return null; // optimistically go for the next page BasePage p = getUserPage(handle, nextNumber, false /* no overflow page*/, wait); if (p != null) return p; pageNumber = nextNumber; } } private BasePage getInsertablePage(BaseContainerHandle handle, long pageNumber, boolean wait, boolean overflowOK) throws StandardException { if (pageNumber == ContainerHandle.INVALID_PAGE_NUMBER) return null; BasePage p = getUserPage(handle, pageNumber, overflowOK, wait); if (p != null) { // make sure the page is not too full if (!p.allowInsert()) { p.unlatch(); p = null; // it is too full, make sure we are tracking it so we won't // see it again. allocCache.trackUnfilledPage(pageNumber, false); } } /* RESOLVE track 3757 Need to check if this fix resolves the bug. This is commented out because we can't conclude here that this is not a user page, it may just be that we failed to get a latch on the page. In a high contention scenario this could cause alot of relatively empty pages to not be considered for insert. TODO May be a good idea to move the trackUnfilledPage call below to some of the lines in the getUserPage method. else { // it is not a user page, make sure we are tracking its fillness so // we won't consider it as a 1/2 filled page ever allocCache.trackUnfilledPage(pageNumber, false); } */ return p; } /** * Get candidate page to move a row for compressing the table. * <p> * The caller is moving rows from the end of the table toward the beginning, * with the goal of freeing up a block of empty pages at the end of the * container which can be returned to the OS. * <p> * On entry pageno will be latched by the caller. Only return pages with * numbers below pageno. Attempting to return pageno will result in a * latch/latch deadlock on the same thread. * * @exception StandardException Standard exception policy. **/ protected BasePage getPageForCompress( BaseContainerHandle handle, int flag, long pageno) throws StandardException { BasePage p = null; boolean getLastInserted = (flag & ContainerHandle.GET_PAGE_UNFILLED) == 0; if (getLastInserted) { // There is nothing protecting lastInsertePage from being changed // by another thread. Make a local copy. long localLastInsertedPage = getLastInsertedPage(); if ((localLastInsertedPage < pageno) && (localLastInsertedPage != ContainerHandle.INVALID_PAGE_NUMBER)) { // First try getting last inserted page. p = getInsertablePage( handle, localLastInsertedPage, true, /* wait */ false /* no overflow page */); // if localLastInsertedPage is not an insertable page, // don't waste time getting it again. if (p == null) { // There is a slight possibility that lastUnfilledPage and // lastInsertedPage will change between the if and the // assignment. The worse that will happen is we lose the // optimization. Don't want to slow down allocation by // adding more synchronization. if (localLastInsertedPage == getLastUnfilledPage()) setLastUnfilledPage( ContainerHandle.INVALID_PAGE_NUMBER); if (localLastInsertedPage == getLastInsertedPage()) setLastInsertedPage( ContainerHandle.INVALID_PAGE_NUMBER); } } } else { // get a relatively unfilled page that is not the last Inserted page long localLastUnfilledPage = getLastUnfilledPage(); if (localLastUnfilledPage == ContainerHandle.INVALID_PAGE_NUMBER || localLastUnfilledPage >= pageno || localLastUnfilledPage == getLastInsertedPage()) { // get an unfilled page, searching from beginning of container. localLastUnfilledPage = getUnfilledPageNumber(handle, 0); } if ((localLastUnfilledPage != ContainerHandle.INVALID_PAGE_NUMBER) && (localLastUnfilledPage < pageno)) { p = getInsertablePage( handle, localLastUnfilledPage, true, false); } // return this page for insert if (p != null) { setLastUnfilledPage(localLastUnfilledPage); setLastInsertedPage(localLastUnfilledPage); } } return p; } /** Get a potentially suitable page for insert and latch it. @exception StandardException Standard Cloudscape error policy */ protected BasePage getPageForInsert(BaseContainerHandle handle, int flag) throws StandardException { BasePage p = null; boolean getLastInserted = (flag & ContainerHandle.GET_PAGE_UNFILLED) == 0; if (getLastInserted) { // There is nothing protecting lastInsertePage from being changed // by another thread. Make a local copy. long localLastInsertedPage = getLastInsertedPage(); if (localLastInsertedPage != ContainerHandle.INVALID_PAGE_NUMBER) { // First try getting last allocated page, NOWAIT p = getInsertablePage(handle, localLastInsertedPage, false, /* wait */ false /* no overflow page */); if (p == null) { // most likely we could not get the latch NOWAIT, try again // with a new page, and tell the system to switch to // multi-page mode. /* switchToMultiInsertPageMode(handle); */ localLastInsertedPage = getLastInsertedPage(); p = getInsertablePage(handle, localLastInsertedPage, true, /* wait */ false /* no overflow page */); } } // if lastUnfilledPage is not an insertable page, don't waste time // getting it again. if (p == null) { // There is a slight possibility that lastUnfilledPage and // lastInsertedPage will change between the if and the // assignment. The worse that will happen is we lose the // optimization. Don't want to slow down allocation by adding // more synchronization. if (localLastInsertedPage == getLastUnfilledPage()) setLastUnfilledPage(ContainerHandle.INVALID_PAGE_NUMBER); if (localLastInsertedPage == getLastInsertedPage()) setLastInsertedPage(ContainerHandle.INVALID_PAGE_NUMBER); } } else // get a relatively unfilled page that is not { // the last Inserted page long localLastUnfilledPage = getLastUnfilledPage(); if (localLastUnfilledPage == ContainerHandle.INVALID_PAGE_NUMBER || localLastUnfilledPage == getLastInsertedPage()) localLastUnfilledPage = getUnfilledPageNumber(handle, localLastUnfilledPage); if (localLastUnfilledPage != ContainerHandle.INVALID_PAGE_NUMBER) { // try the last unfilled page we found - this could be // different from lastInserted if the last unfilled one we // found does not have enough space for the insert and the // client wants to get a brand new page. p = getInsertablePage(handle, localLastUnfilledPage, true, false); // try again if (p == null) { localLastUnfilledPage = getUnfilledPageNumber(handle, localLastUnfilledPage); if (localLastUnfilledPage != ContainerHandle.INVALID_PAGE_NUMBER) { p = getInsertablePage(handle, localLastUnfilledPage, true, false); } } } // return this page for insert if (p != null) { setLastUnfilledPage(localLastUnfilledPage); setLastInsertedPage(localLastUnfilledPage); } } return p; } /** * Get a latched page. Incase of backup page Latch is necessary to * prevent modification to the page when it is being written to the backup. * Backup process relies on latches to get consistent snap * shot of the page , user level table/page/row locks are NOT * acquired by the online backup mechanism. * * @param handle the container handle used to latch the page * @param pageNumber the page number of the page to get * @return the latched page * @exception StandardException Standard Derby error policy */ protected BasePage getLatchedPage(BaseContainerHandle handle, long pageNumber) throws StandardException { PageKey pageKey = new PageKey(identity, pageNumber); BasePage page = (BasePage) pageCache.find(pageKey); if (SanityManager.DEBUG){ SanityManager.ASSERT(page != null, "page is not found :" + pageKey); } // latch the page page = latchPage(handle, page, true); if (SanityManager.DEBUG){ SanityManager.ASSERT(page.isLatched(), "page is not latched:" + pageKey); } return page; } private long getUnfilledPageNumber(BaseContainerHandle handle, long pagenum) throws StandardException { synchronized(allocCache) { return allocCache. getUnfilledPageNumber(handle, firstAllocPageNumber, pagenum); } } /* Cost estimates */ /** <BR>MT - this routine is NOT MT-safe and clients don't need to provide synchronization. @see ContainerHandle#getEstimatedRowCount */ public long getEstimatedRowCount(int flag) { return estimatedRowCount; } /** @see ContainerHandle#setEstimatedRowCount */ public void setEstimatedRowCount(long count, int flag) { boolean readOnly = dataFactory.isReadOnly(); synchronized(this) { estimatedRowCount = count; if (!readOnly) isDirty = true; } } /** Update estimated row count by page as it leaves the cache. The estimated row count is updated without logging! */ protected void updateEstimatedRowCount(int delta) { boolean readOnly = dataFactory.isReadOnly(); synchronized(this) { estimatedRowCount += delta; if (estimatedRowCount < 0) estimatedRowCount = 0; // mark the container as dirty without bumping the container // version because row count changes are not logged. if (!readOnly) isDirty = true; } } /** @see ContainerHandle#getEstimatedPageCount @exception StandardException Standard Cloudscape error policy */ public long getEstimatedPageCount(BaseContainerHandle handle, int flag) throws StandardException { // page count is set once per container materialization in cache if (estimatedPageCount < 0) { synchronized(allocCache) { estimatedPageCount = allocCache.getEstimatedPageCount(handle, firstAllocPageNumber); } } if (SanityManager.DEBUG) SanityManager.ASSERT(estimatedPageCount >= 0, "AllocCache returns negatie estimatedPageCount"); return estimatedPageCount; } /* ** Methods used solely by StoredPage */ /** Read a page into the supplied array. <BR> MT - thread safe @exception IOException error reading page @exception StandardException standard cloudscape error message */ protected abstract void readPage(long pageNumber, byte[] pageData) throws IOException, StandardException; /** Write a page from the supplied array. <BR> MT - thread safe @exception IOException error writing page @exception StandardException Standard Cloudscape error policy */ protected abstract void writePage(long pageNumber, byte[] pageData, boolean syncPage) throws IOException, StandardException; /* * Encryption/decryption */ /** Decrypts a page <BR>MT - MT safe. @exception StandardException Standard Cloudscape error policy */ protected void decryptPage(byte[] pageData, int pageSize) throws StandardException { // because all our page header looks identical, the // checksum is moved to the front so that it will hopefully // encrypt differently from page to page synchronized(this) { if (encryptionBuffer == null || encryptionBuffer.length < pageSize) encryptionBuffer = new byte[pageSize]; int len = dataFactory.decrypt(pageData, 0, pageSize, encryptionBuffer, 0); if (SanityManager.DEBUG) SanityManager.ASSERT(len == pageSize, "Encrypted page length != page length"); // put the checksum where it belongs System.arraycopy(encryptionBuffer, 8, pageData, 0, pageSize-8); System.arraycopy(encryptionBuffer, 0, pageData, pageSize-8, 8); } } /** Encrypts a page. <BR> MT - not safe, call within synchronized block and only use the returned byte array withing synchronized block. @exception StandardException Standard Cloudscape error policy */ protected byte[] encryptPage(byte[] pageData, int pageSize, byte[] encryptionBuffer, boolean newEngine) throws StandardException { // because all our page header looks identical, move the // checksum to the front so that it will hopefully encrypt // differently from page to page System.arraycopy(pageData, pageSize-8, encryptionBuffer, 0, 8); System.arraycopy(pageData, 0, encryptionBuffer, 8, pageSize-8); int len = dataFactory.encrypt(encryptionBuffer, 0, pageSize, encryptionBuffer, 0, newEngine); if (SanityManager.DEBUG) SanityManager.ASSERT(len == pageSize, "Encrypted page length != page length"); return encryptionBuffer; } /** * Get encryption buffer. * MT - not safe, call within synchronized block and only use the * returned byte array withing synchronized block. * @return byte array to be used for encryping a page. */ protected byte[] getEncryptionBuffer() { if (encryptionBuffer == null || encryptionBuffer.length < pageSize) encryptionBuffer = new byte[pageSize]; return encryptionBuffer; } /* * page preallocation */ /** preAllocate writes out the preallocated pages to disk if necessary. <BR>Make sure the container is large enough and the pages are well formatted. The only reason to do this is to save some I/O during page initialization. Once the initPage log record is written, it is expected that the page really do exist and is well formed or recovery will fail. However, we can gain some performance by writing a bunch of pages at a time rather than one at a time. <BR>If it doesn't make sense for the the implementation to have pre-allocation, just return 0. <BR>If the container is not being logged, don't actually do anything, just return 0. @return number of successfully preallocated page, or 0 if no page has been preallocated @param lastPreallocPagenum the last preallocated page number as known by the allocation page @param preAllocSize try to preallocate this page number of pages. Since only the container knows how many pages are actually on disk, it may determine that certain number of pages that the allocation page thinks need to be preallocated is already allocated, in those case, act as if the preallocation is successful. */ protected abstract int preAllocate(long lastPreallocPagenum, int preAllocSize); /** Preallocate the pages - actually doing it, called by subclass only */ protected int doPreAllocatePages(long lastPreallocPagenum, int preAllocSize) { if (SanityManager.DEBUG) SanityManager.ASSERT(!dataFactory.isReadOnly(), "how can we be Preallocating pages in a read only database?"); // initialize and a new page in cache int[] createArgs = new int[5]; createArgs[0] = StoredPage.FORMAT_NUMBER; // default is a stored page createArgs[1] = CachedPage.WRITE_NO_SYNC; // write it but no sync createArgs[2] = pageSize; createArgs[3] = spareSpace; createArgs[4] = minimumRecordSize; StoredPage page = new StoredPage(); page.setFactory(dataFactory); boolean error = false; int count = 0; while(count < preAllocSize) { PageKey pkey = new PageKey(identity, lastPreallocPagenum+count+1); try { // create Identity will do a writePage page.createIdentity(pkey, createArgs); // if create identity somehow failed to do a write page if (SanityManager.DEBUG) SanityManager.ASSERT(!page.isDirty(), "create identity failed to do a write page"); page.clearIdentity(); // ready the page for the next loop } catch (StandardException se) { // if something went wrong, stop and return how many we did // successfully error = true; } if (error) break; count++; } return count; } protected int getPageSize() { return pageSize; } protected int getSpareSpace() { return spareSpace; } protected int getMinimumRecordSize() { return minimumRecordSize; } private synchronized void switchToMultiInsertPageMode( BaseContainerHandle handle) throws StandardException { if (lastInsertedPage.length == 1) { long last = lastInsertedPage[0]; lastInsertedPage = new long[4]; lastInsertedPage[0] = last; for (int i = 3; i > 0; i--) { Page page = addPage(handle, false); lastInsertedPage[i] = page.getPageNumber(); page.unlatch(); } } } /* * Setting and getting lastInserted Page and lastUnfilledPage in a thead * safe manner. */ private synchronized long getLastInsertedPage() { if (lastInsertedPage.length == 1) { if (SanityManager.DEBUG) SanityManager.ASSERT(lastInsertedPage_index == 0); // optimize the usual case where no concurrent insert has kicked us // into multi-page mode - ie. only ONE last page. return(lastInsertedPage[0]); } else { long ret = lastInsertedPage[lastInsertedPage_index++]; if (lastInsertedPage_index > (lastInsertedPage.length - 1)) { lastInsertedPage_index = 0; } return(ret); } } private synchronized long getLastUnfilledPage() { return lastUnfilledPage; } private synchronized void initializeLastInsertedPage(int size) { lastInsertedPage = new long[size]; for (int i = lastInsertedPage.length - 1; i >= 0; i--) lastInsertedPage[i] = ContainerHandle.INVALID_PAGE_NUMBER; lastInsertedPage_index = 0; } private synchronized void setLastInsertedPage(long val) { lastInsertedPage[lastInsertedPage_index] = val; } private synchronized void setLastUnfilledPage(long val) { lastUnfilledPage = val; } /* ** Hide our super-classes methods to ensure that cache management ** is correct when the container is obtained and release. */ /** The container is kept by the find() in File.openContainer. */ protected void letGo(BaseContainerHandle handle) { super.letGo(handle); containerCache.release(this); } protected BasePage latchPage(BaseContainerHandle handle, BasePage foundPage, boolean wait) throws StandardException { if (foundPage == null) return null; BasePage ret = super.latchPage(handle, foundPage, wait); if (ret == null) { // page is still cached pageCache.release((Cacheable) foundPage); } return ret; } /** * backup the container. * * @param handle the container handle. * @param backupLocation location of the backup container. * @exception StandardException Standard Derby error policy */ protected abstract void backupContainer(BaseContainerHandle handle, String backupLocation) throws StandardException; }
false
true
protected BasePage newPage(BaseContainerHandle userHandle, RawTransaction ntt, BaseContainerHandle allocHandle, boolean isOverflow) throws StandardException { // NOTE: we are single threaded thru this method, see MT comment boolean useNTT = (ntt != null); // if ntt is null, use user transaction if (!useNTT) ntt = userHandle.getTransaction(); long lastPage; // last allocated page long lastPreallocPage; // last pre-allcated page long pageNumber; // the page number of the new page PageKey pkey; // the identity of the new page boolean reuse; // if true, we are trying to reuse a page /* in case the page recommeded by allocPage is not committed yet, may /* need to retry a couple of times */ boolean retry; int numtries = 0; long startSearch = lastAllocatedPage; AllocPage allocPage = null; // the alloc page BasePage page = null; // the new page try { do { retry = false; // we don't expect we need to retry synchronized(allocCache) { if (SanityManager.DEBUG) { SanityManager.ASSERT( ntt.getId().equals( allocHandle.getTransaction().getId())); if (useNTT) SanityManager.ASSERT( !ntt.getId().equals( userHandle.getTransaction().getId())); } /* find an allocation page that can handle adding a new * page. * * allocPage is unlatched when the ntt commits. The new * page is initialized by the ntt but the latch is * transfered to the user transaction before the allocPage * is unlatched. The allocPage latch prevents almost any * other reader or writer from finding the new page until * the ntt is committed and the new page is latched by the * user transaction. * * (If the page is being reused, it is possible for another * xact which kept a handle on the reused page to find the * page during the transfer UT -> NTT. If this unlikely * even occurs and the transfer fails [see code relating * to transfer below], we retry from the beginning.) * * After the NTT commits a reader (getNextPageNumber) may * get the page number of the newly allocated page and it * will wait for the new page and latch it when the user * transaction commits, aborts or unlatches the new page. * Whether the user transaction commits or aborts, the new * page stay allocated. * * RESOLVE: before NTT rolls back (or commits) the latch is * released. To repopulate the allocation cache, need to * get either the container lock on add page, or get a per * allocation page lock. * * This blocks all page read (getPage) from accessing this * alloc page in this container until the alloc page is * unlatched. Those who already have a page handle into * this container are unaffected. * * In other words, allocation blocks out reader (of any * page that is managed by this alloc page) by the latch * on the allocation page. * * Note that write page can proceed as usual. */ allocPage = findAllocPageForAdd(allocHandle, ntt, startSearch); allocCache.invalidate(allocPage, allocPage.getPageNumber()); } if (SanityManager.DEBUG) { if (allocPage == null) allocCache.dumpAllocationCache(); SanityManager.ASSERT(allocPage != null, "findAllocPageForAdd returned a null alloc page"); } // // get the next free page's number. // for case 1, page number > lastPreallocPage // for case 2, page number <= lastPage // for case 3, lastPage < page number <= lastPreallocPage // pageNumber = allocPage.nextFreePageNumber(startSearch); // need to distinguish between the following 3 cases: // 1) the page has not been allocate or initalized. // Create it in the page cache and sync it to disk. // 2) the page is being re-allocated. // We need to read it in to re-initialize it // 3) the page has been preallocated. // Create it in the page cache and don't sync it to disk // // first find out the current last initialized page and // preallocated page before the new page is added lastPage = allocPage.getLastPagenum(); lastPreallocPage = allocPage.getLastPreallocPagenum(); reuse = pageNumber <= lastPage; // no address translation necessary pkey = new PageKey(identity, pageNumber); if (reuse) { // if re-useing a page, make sure the deallocLock on the new // page is not held. We only need a zero duration lock on // the new page because the allocPage is latched and this // is the only thread which can be looking at this // pageNumber. RecordHandle deallocLock = BasePage.MakeRecordHandle(pkey, RecordHandle.DEALLOCATE_PROTECTION_HANDLE); if (!getDeallocLock(allocHandle, deallocLock, false /* nowait */, true /* zeroDuration */)) { // The transaction which deallocated this page has not // committed yet. Try going to some other page. If // this is the first time we fail to get the dealloc // lock, try from the beginning of the allocated page. // If we already did that and still fail, keep going // until we get a brand new page. if (numtries == 0) { startSearch = ContainerHandle.INVALID_PAGE_NUMBER; lastAllocatedPage = pageNumber; } else // continue from where we were startSearch = pageNumber; numtries++; // We have to unlatch the allocPage so that if that // transaction rolls back, it won't deadlock with this // transaction. allocPage.unlatch(); allocPage = null; retry = true; } else { // we got the lock, next time start from there lastAllocatedPage = pageNumber; } } else { // we got a new page, next time, start from beginning of // the bit map again if we suspect there are some some // deallocated pages if (numtries > 0) lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER; else lastAllocatedPage = pageNumber; } // Retry from the beginning if necessary. if (retry) continue; // If we get past here must have (retry == false) if (SanityManager.DEBUG) { SanityManager.ASSERT(retry == false); } // Now we have verified that the allocPage is latched and we // can get the zeroDuration deallocLock nowait. This means the // transaction which freed the page has committed. Had that // transaction aborted, we would have retried. if (SanityManager.DEBUG) { // ASSERT lastPage <= lastPreallocPage if (lastPage > lastPreallocPage) { SanityManager.THROWASSERT("last page " + lastPage + " > lastPreallocPage " + lastPreallocPage); } } // No I/O at all if this new page is requested as part of a // create and load statement or this new page is in a temporary // container. // // In the former case, BaseContainer will allow the // MODE_UNLOGGED bit to go thru to the nested top transaction // alloc handle. In the later case, there is no nested top // transaction and the alloc handle is the user handle, which // is UNLOGGED. boolean noIO = (allocHandle.getMode() & ContainerHandle.MODE_UNLOGGED) == ContainerHandle.MODE_UNLOGGED; // If we do not need the I/O (either because we are in a // create_unlogged mode or we are dealing with a temp table), // don't do any preallocation. Otherwise, see if we should be // pre-Allocating page by now. We don't call it before // nextFreePageNumber because finding a reusable page may be // expensive and we don't want to start preAllocation unless // there is no more reusable page. Unless we are called // explicitly to bulk increase the container size in a preload // or in a create container. if (!noIO && (bulkIncreaseContainerSize || (pageNumber > lastPreallocPage && pageNumber > PreAllocThreshold))) { allocPage.preAllocatePage( this, PreAllocThreshold, PreAllocSize); } // update last preAllocated Page, it may have been changed by // the preAllocatePage call. We don't want to do the sync if // preAllocatePage already took care of it. lastPreallocPage = allocPage.getLastPreallocPagenum(); boolean prealloced = pageNumber <= lastPreallocPage; // Argument to the create is an array of ints. // The array is only used for new page creation or for creating // a preallocated page, not for reuse. // 0'th element is the page format // 1'st element is whether or not to sync the page to disk // 2'nd element is pagesize // 3'rd element is spareSpace int[] createPageArgs = new int[STORED_PAGE_ARG_NUM]; createPageArgs[0] = StoredPage.FORMAT_NUMBER; createPageArgs[1] = prealloced ? 0 : (noIO ? 0 : CachedPage.WRITE_SYNC); createPageArgs[2] = pageSize; createPageArgs[3] = spareSpace; createPageArgs[4] = minimumRecordSize; // RESOLVE: right now, there is no re-mapping of pages, so // pageOffset = pageNumber*pageSize long pageOffset = pageNumber * pageSize; // initialize a new user page // we first use the NTT to initialize the new page - in case the // allocation failed, it is rolled back with the NTT. // Later, we transfer the latch to the userHandle so it won't be // released when the ntt commits try { page = initPage(allocHandle, pkey, createPageArgs, pageOffset, reuse, isOverflow); } catch (StandardException se) { if (SanityManager.DEBUG) { SanityManager.DEBUG_PRINT("FileContainer", "got exception from initPage:" + "\nreuse = " + reuse + "\ncreatePageArgs[1] = " + createPageArgs[1] + "\nallocPage = " + allocPage ); } allocCache.dumpAllocationCache(); throw se; } if (SanityManager.DEBUG) { SanityManager.ASSERT( page != null, "initPage returns null page"); SanityManager.ASSERT( page.isLatched(), "initPage returns unlatched page"); } // allocate the page in the allocation page bit map allocPage.addPage(this, pageNumber, ntt, userHandle); if (useNTT) { // transfer the page latch from NTT to UT. // // after the page is unlatched by NTT, it is still // protected from being found by almost everybody else // because the alloc page is still latched and the alloc // cache is invalidated. // // However (beetle 3942) it is possible for the page to be // found by threads who specifically ask for this // pagenumber (e.g. HeapPostCommit). // We may find that such a thread has latched the page. // We shouldn't wait for it because we have the alloc page // latch, and this could cause deadlock (e.g. // HeapPostCommit might call removePage and this would wait // on the alloc page). // // We may instead find that we can latch the page, but that // another thread has managed to get hold of it during the // transfer and either deallocate it or otherwise change it // (add rows, delete rows etc.) // // Since this doesn't happen very often, we retry in these // 2 cases (we give up the alloc page and page and we start // this method from scratch). // // If the lock manager were changed to allow latches to be // transferred between transactions, wouldn't need to // unlatch to do the transfer, and would avoid having to // retry in these cases (beetle 4011). page.unlatch(); page = null; // need to find it in the cache again since unlatch also // unkept the page from the cache page = (BasePage)pageCache.find(pkey); page = latchPage( userHandle, page, false /* don't wait, it might deadlock */); if (page == null || // recordCount will only return true if there are no // rows (including deleted rows) page.recordCount() != 0 || page.getPageStatus() != BasePage.VALID_PAGE) { retry = true; if (page != null) { page.unlatch(); page = null; } allocPage.unlatch(); allocPage = null; } } // if ntt is null, no need to transfer. Page is latched by user // transaction already. Will be no need to retry. // the alloc page is unlatched in the finally block. } while (retry == true); // At this point, should have a page suitable for returning if (SanityManager.DEBUG) SanityManager.ASSERT(page.isLatched()); } catch (StandardException se) { if (page != null) page.unlatch(); page = null; throw se; // rethrow error } finally { if (!useNTT && allocPage != null) { allocPage.unlatch(); allocPage = null; } // NTT is committed by the caller } if (SanityManager.DEBUG) SanityManager.ASSERT(page.isLatched()); // if bulkIncreaseContainerSize is set, that means this newPage call // may have greatly expanded the container size due to preallocation. // Regardless of how many page it actually created, reset preAllocSize // to the default so we won't attempt to always preallocate 1000 pages // at a time in the future. if (bulkIncreaseContainerSize) { bulkIncreaseContainerSize = false; PreAllocSize = DEFAULT_PRE_ALLOC_SIZE; } if (!isOverflow && page != null) setLastInsertedPage(pageNumber); // increase estimated page count - without any synchronization or // logging, this is an estimate only if (estimatedPageCount >= 0) estimatedPageCount++; if (!this.identity.equals(page.getPageId().getContainerId())) { if (SanityManager.DEBUG) { SanityManager.THROWASSERT( "just created a new page from a different container" + "\n this.identity = " + this.identity + "\n page.getPageId().getContainerId() = " + page.getPageId().getContainerId() + "\n userHandle is: " + userHandle + "\n allocHandle is: " + allocHandle + "\n this container is: " + this); } throw StandardException.newException( SQLState.DATA_DIFFERENT_CONTAINER, this.identity, page.getPageId().getContainerId()); } return page; // return the newly added page }
protected BasePage newPage(BaseContainerHandle userHandle, RawTransaction ntt, BaseContainerHandle allocHandle, boolean isOverflow) throws StandardException { // NOTE: we are single threaded thru this method, see MT comment boolean useNTT = (ntt != null); // if ntt is null, use user transaction if (!useNTT) ntt = userHandle.getTransaction(); long lastPage; // last allocated page long lastPreallocPage; // last pre-allcated page long pageNumber; // the page number of the new page PageKey pkey; // the identity of the new page boolean reuse; // if true, we are trying to reuse a page /* in case the page recommeded by allocPage is not committed yet, may /* need to retry a couple of times */ boolean retry; int numtries = 0; long startSearch = lastAllocatedPage; AllocPage allocPage = null; // the alloc page BasePage page = null; // the new page try { do { retry = false; // we don't expect we need to retry synchronized(allocCache) { if (SanityManager.DEBUG) { SanityManager.ASSERT( ntt.getId().equals( allocHandle.getTransaction().getId())); if (useNTT) SanityManager.ASSERT( !ntt.getId().equals( userHandle.getTransaction().getId())); } /* find an allocation page that can handle adding a new * page. * * allocPage is unlatched when the ntt commits. The new * page is initialized by the ntt but the latch is * transfered to the user transaction before the allocPage * is unlatched. The allocPage latch prevents almost any * other reader or writer from finding the new page until * the ntt is committed and the new page is latched by the * user transaction. * * (If the page is being reused, it is possible for another * xact which kept a handle on the reused page to find the * page during the transfer UT -> NTT. If this unlikely * even occurs and the transfer fails [see code relating * to transfer below], we retry from the beginning.) * * After the NTT commits a reader (getNextPageNumber) may * get the page number of the newly allocated page and it * will wait for the new page and latch it when the user * transaction commits, aborts or unlatches the new page. * Whether the user transaction commits or aborts, the new * page stay allocated. * * RESOLVE: before NTT rolls back (or commits) the latch is * released. To repopulate the allocation cache, need to * get either the container lock on add page, or get a per * allocation page lock. * * This blocks all page read (getPage) from accessing this * alloc page in this container until the alloc page is * unlatched. Those who already have a page handle into * this container are unaffected. * * In other words, allocation blocks out reader (of any * page that is managed by this alloc page) by the latch * on the allocation page. * * Note that write page can proceed as usual. */ allocPage = findAllocPageForAdd(allocHandle, ntt, startSearch); allocCache.invalidate(allocPage, allocPage.getPageNumber()); } if (SanityManager.DEBUG) { if (allocPage == null) allocCache.dumpAllocationCache(); SanityManager.ASSERT(allocPage != null, "findAllocPageForAdd returned a null alloc page"); } // // get the next free page's number. // for case 1, page number > lastPreallocPage // for case 2, page number <= lastPage // for case 3, lastPage < page number <= lastPreallocPage // pageNumber = allocPage.nextFreePageNumber(startSearch); // need to distinguish between the following 3 cases: // 1) the page has not been allocate or initalized. // Create it in the page cache and sync it to disk. // 2) the page is being re-allocated. // We need to read it in to re-initialize it // 3) the page has been preallocated. // Create it in the page cache and don't sync it to disk // // first find out the current last initialized page and // preallocated page before the new page is added lastPage = allocPage.getLastPagenum(); lastPreallocPage = allocPage.getLastPreallocPagenum(); reuse = pageNumber <= lastPage; // no address translation necessary pkey = new PageKey(identity, pageNumber); if (reuse) { // if re-useing a page, make sure the deallocLock on the new // page is not held. We only need a zero duration lock on // the new page because the allocPage is latched and this // is the only thread which can be looking at this // pageNumber. RecordHandle deallocLock = BasePage.MakeRecordHandle(pkey, RecordHandle.DEALLOCATE_PROTECTION_HANDLE); if (!getDeallocLock(allocHandle, deallocLock, false /* nowait */, true /* zeroDuration */)) { // The transaction which deallocated this page has not // committed yet. Try going to some other page. If // this is the first time we fail to get the dealloc // lock, try from the beginning of the allocated page. // If we already did that and still fail, keep going // until we get a brand new page. if (numtries == 0) { startSearch = ContainerHandle.INVALID_PAGE_NUMBER; lastAllocatedPage = pageNumber; } else // continue from where we were startSearch = pageNumber; numtries++; // We have to unlatch the allocPage so that if that // transaction rolls back, it won't deadlock with this // transaction. allocPage.unlatch(); allocPage = null; retry = true; } else { // we got the lock, next time start from there lastAllocatedPage = pageNumber; } } else { // we got a new page, next time, start from beginning of // the bit map again if we suspect there are some some // deallocated pages if (numtries > 0) lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER; else lastAllocatedPage = pageNumber; } // Retry from the beginning if necessary. if (retry) continue; // If we get past here must have (retry == false) if (SanityManager.DEBUG) { SanityManager.ASSERT(retry == false); } // Now we have verified that the allocPage is latched and we // can get the zeroDuration deallocLock nowait. This means the // transaction which freed the page has committed. Had that // transaction aborted, we would have retried. if (SanityManager.DEBUG) { // ASSERT lastPage <= lastPreallocPage if (lastPage > lastPreallocPage) { SanityManager.THROWASSERT("last page " + lastPage + " > lastPreallocPage " + lastPreallocPage); } } // No I/O at all if this new page is requested as part of a // create and load statement or this new page is in a temporary // container. // // In the former case, BaseContainer will allow the // MODE_UNLOGGED bit to go thru to the nested top transaction // alloc handle. In the later case, there is no nested top // transaction and the alloc handle is the user handle, which // is UNLOGGED. boolean noIO = (allocHandle.getMode() & ContainerHandle.MODE_UNLOGGED) == ContainerHandle.MODE_UNLOGGED; // If we do not need the I/O (either because we are in a // create_unlogged mode or we are dealing with a temp table), // don't do any preallocation. Otherwise, see if we should be // pre-Allocating page by now. We don't call it before // nextFreePageNumber because finding a reusable page may be // expensive and we don't want to start preAllocation unless // there is no more reusable page. Unless we are called // explicitly to bulk increase the container size in a preload // or in a create container. if (!noIO && (bulkIncreaseContainerSize || (pageNumber > lastPreallocPage && pageNumber > PreAllocThreshold))) { allocPage.preAllocatePage( this, PreAllocThreshold, PreAllocSize); } // update last preAllocated Page, it may have been changed by // the preAllocatePage call. We don't want to do the sync if // preAllocatePage already took care of it. lastPreallocPage = allocPage.getLastPreallocPagenum(); boolean prealloced = pageNumber <= lastPreallocPage; // Argument to the create is an array of ints. // The array is only used for new page creation or for creating // a preallocated page, not for reuse. // 0'th element is the page format // 1'st element is whether or not to sync the page to disk // 2'nd element is pagesize // 3'rd element is spareSpace int[] createPageArgs = new int[STORED_PAGE_ARG_NUM]; createPageArgs[0] = StoredPage.FORMAT_NUMBER; createPageArgs[1] = prealloced ? 0 : (noIO ? 0 : CachedPage.WRITE_SYNC); createPageArgs[2] = pageSize; createPageArgs[3] = spareSpace; createPageArgs[4] = minimumRecordSize; // RESOLVE: right now, there is no re-mapping of pages, so // pageOffset = pageNumber*pageSize long pageOffset = pageNumber * pageSize; // initialize a new user page // we first use the NTT to initialize the new page - in case the // allocation failed, it is rolled back with the NTT. // Later, we transfer the latch to the userHandle so it won't be // released when the ntt commits try { page = initPage(allocHandle, pkey, createPageArgs, pageOffset, reuse, isOverflow); } catch (StandardException se) { if (SanityManager.DEBUG) { SanityManager.DEBUG_PRINT("FileContainer", "got exception from initPage:" + "\nreuse = " + reuse + "\ncreatePageArgs[1] = " + createPageArgs[1] + "\nallocPage = " + allocPage ); } allocCache.dumpAllocationCache(); throw se; } if (SanityManager.DEBUG) { SanityManager.ASSERT( page != null, "initPage returns null page"); SanityManager.ASSERT( page.isLatched(), "initPage returns unlatched page"); } // allocate the page in the allocation page bit map allocPage.addPage(this, pageNumber, ntt, userHandle); if (useNTT) { // transfer the page latch from NTT to UT. // // after the page is unlatched by NTT, it is still // protected from being found by almost everybody else // because the alloc page is still latched and the alloc // cache is invalidated. // // However it is possible for the page to be // found by threads who specifically ask for this // pagenumber (e.g. HeapPostCommit). // We may find that such a thread has latched the page. // We shouldn't wait for it because we have the alloc page // latch, and this could cause deadlock (e.g. // HeapPostCommit might call removePage and this would wait // on the alloc page). // // We may instead find that we can latch the page, but that // another thread has managed to get hold of it during the // transfer and either deallocated it or otherwise change it // (add rows, delete rows etc.) // // Since this doesn't happen very often, we retry in these // 2 cases (we give up the alloc page and page and we start // this method from scratch). // // If the lock manager were changed to allow latches to be // transferred between transactions, wouldn't need to // unlatch to do the transfer, and would avoid having to // retry in these cases (DERBY-2337). page.unlatch(); page = null; // need to find it in the cache again since unlatch also // unkept the page from the cache page = (BasePage)pageCache.find(pkey); page = latchPage( userHandle, page, false /* don't wait, it might deadlock */); if (page == null || // recordCount will only return true if there are no // rows (including deleted rows) page.recordCount() != 0 || page.getPageStatus() != BasePage.VALID_PAGE) { retry = true; if (page != null) { page.unlatch(); page = null; } allocPage.unlatch(); allocPage = null; } } // if ntt is null, no need to transfer. Page is latched by user // transaction already. Will be no need to retry. // the alloc page is unlatched in the finally block. } while (retry == true); // At this point, should have a page suitable for returning if (SanityManager.DEBUG) SanityManager.ASSERT(page.isLatched()); } catch (StandardException se) { if (page != null) page.unlatch(); page = null; throw se; // rethrow error } finally { if (!useNTT && allocPage != null) { allocPage.unlatch(); allocPage = null; } // NTT is committed by the caller } if (SanityManager.DEBUG) SanityManager.ASSERT(page.isLatched()); // if bulkIncreaseContainerSize is set, that means this newPage call // may have greatly expanded the container size due to preallocation. // Regardless of how many page it actually created, reset preAllocSize // to the default so we won't attempt to always preallocate 1000 pages // at a time in the future. if (bulkIncreaseContainerSize) { bulkIncreaseContainerSize = false; PreAllocSize = DEFAULT_PRE_ALLOC_SIZE; } if (!isOverflow && page != null) setLastInsertedPage(pageNumber); // increase estimated page count - without any synchronization or // logging, this is an estimate only if (estimatedPageCount >= 0) estimatedPageCount++; if (!this.identity.equals(page.getPageId().getContainerId())) { if (SanityManager.DEBUG) { SanityManager.THROWASSERT( "just created a new page from a different container" + "\n this.identity = " + this.identity + "\n page.getPageId().getContainerId() = " + page.getPageId().getContainerId() + "\n userHandle is: " + userHandle + "\n allocHandle is: " + allocHandle + "\n this container is: " + this); } throw StandardException.newException( SQLState.DATA_DIFFERENT_CONTAINER, this.identity, page.getPageId().getContainerId()); } return page; // return the newly added page }
diff --git a/src/org/jruby/debug/Util.java b/src/org/jruby/debug/Util.java index 31448b6..8f56421 100644 --- a/src/org/jruby/debug/Util.java +++ b/src/org/jruby/debug/Util.java @@ -1,126 +1,130 @@ /* * header & license * Copyright (c) 2007-2008 Martin Krauskopf * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package org.jruby.debug; import java.io.File; import java.io.IOException; import java.util.logging.Logger; import org.jruby.RubyBoolean; import org.jruby.runtime.RubyEvent; import org.jruby.runtime.builtin.IRubyObject; import static org.jruby.runtime.RubyEvent.*; final class Util { private static final Logger LOGGER = Logger.getLogger(Util.class.getName()); private final static CharSequence JRUBY_BUILTIN_PATH_PART = "builtin" + File.separator + "javasupport"; private final static CharSequence JRUBY_JAR_PART = "lib" + File.separator + "jruby.jar!" + File.separator; private Util() {/* forbid instances */} /** * Convenient delegate to {@link RubyBoolean#newBoolean} using <em>ro</em>'s * runtime. */ static RubyBoolean toRBoolean(IRubyObject ro, boolean value) { return RubyBoolean.newBoolean(ro.getRuntime(), value); } /** * Convenient delegate to {@link org.jruby.Ruby#getNil} using * <em>recv</em>'s runtime. */ static IRubyObject nil(final IRubyObject ro) { return ro.getRuntime().getNil(); } static String relativizeToPWD(final String path) { return Util.relativizeFile(System.getProperty("user.dir"), path); } static String relativizeFile(final String base, final String filepath) { String result = filepath; if (filepath.startsWith(base)) { result = filepath.substring(base.length() + 1); } return result; } /** * Tests whether the give files, being in whatever form (relative, absolute, * containing '..', etc.) points to the same files, using canonical paths. * * @param first to be compared with second * @param second to be compared with first * @return if first and second are/points to the same files */ static boolean areSameFiles(String first, String second) { try { String firstF = new File(first).getCanonicalPath(); String secondF = new File(second).getCanonicalPath(); return firstF.equals(secondF); } catch (IOException ioe) { LOGGER.fine("Cannot resolve cannocical path (falling back to String comparison):" + "\n first: " + first + "\n second: " + second + "\n ioe:" + ioe); return first.equals(second); } } static void logEvent(RubyEvent event, String file, int line, String methodName, IRubyObject klass) { LOGGER.info(file + ":" + line + "[" + event + "]" +klass + "#" + methodName + "\n"); } static boolean isJRubyCore(final String file) { return file == null || file.contains(JRUBY_BUILTIN_PATH_PART) || file.contains(JRUBY_JAR_PART); } static boolean isLineEvent(String event) { return LINE.getName().equals(event); } static RubyEvent typeForEvent(final String event) { if ("line".equals(event)) { return LINE; } else if ("class".equals(event)) { return CLASS; } else if ("end".equals(event)) { return END; } else if ("call".equals(event)) { return CALL; } else if ("return".equals(event)) { return RETURN; } else if ("c-call".equals(event)) { return C_CALL; } else if ("c-return".equals(event)) { return C_RETURN; + } else if ("b-call".equals(event)) { + return C_CALL; + } else if ("b-return".equals(event)) { + return C_RETURN; } else if ("raise".equals(event)) { return RAISE; } else { throw new IllegalArgumentException("unknown event type: " + event); } } }
true
true
static RubyEvent typeForEvent(final String event) { if ("line".equals(event)) { return LINE; } else if ("class".equals(event)) { return CLASS; } else if ("end".equals(event)) { return END; } else if ("call".equals(event)) { return CALL; } else if ("return".equals(event)) { return RETURN; } else if ("c-call".equals(event)) { return C_CALL; } else if ("c-return".equals(event)) { return C_RETURN; } else if ("raise".equals(event)) { return RAISE; } else { throw new IllegalArgumentException("unknown event type: " + event); } }
static RubyEvent typeForEvent(final String event) { if ("line".equals(event)) { return LINE; } else if ("class".equals(event)) { return CLASS; } else if ("end".equals(event)) { return END; } else if ("call".equals(event)) { return CALL; } else if ("return".equals(event)) { return RETURN; } else if ("c-call".equals(event)) { return C_CALL; } else if ("c-return".equals(event)) { return C_RETURN; } else if ("b-call".equals(event)) { return C_CALL; } else if ("b-return".equals(event)) { return C_RETURN; } else if ("raise".equals(event)) { return RAISE; } else { throw new IllegalArgumentException("unknown event type: " + event); } }
diff --git a/ee3_common/ee3/core/CraftingHandler.java b/ee3_common/ee3/core/CraftingHandler.java index 182947ca..973debce 100644 --- a/ee3_common/ee3/core/CraftingHandler.java +++ b/ee3_common/ee3/core/CraftingHandler.java @@ -1,35 +1,38 @@ package ee3.core; import ee3.core.interfaces.IProxy; import ee3.item.ItemPhilosopherStone; import ee3.item.ModItems; import net.minecraft.src.EntityPlayer; import net.minecraft.src.IInventory; import net.minecraft.src.ItemStack; import net.minecraft.src.ModLoader; import net.minecraft.src.forge.ICraftingHandler; public class CraftingHandler implements ICraftingHandler { @Override public void onTakenFromCrafting(EntityPlayer player, ItemStack stack, IInventory craftMatrix) { if (mod_EE3.proxy.isPortableCraftingGUIOpen()) { - player.inventory.getCurrentItem().damageItem(1, player); + ItemStack currentInventoryItem = player.inventory.getCurrentItem(); + if (currentInventoryItem != null) { + player.inventory.getCurrentItem().damageItem(1, player); + } } ItemStack currentItemStack; for (int i = 0; i < craftMatrix.getSizeInventory(); i++) { currentItemStack = craftMatrix.getStackInSlot(i); if (currentItemStack != null) { if (currentItemStack.itemID == ModItems.miniumStone.shiftedIndex) { currentItemStack.damageItem(1, player); currentItemStack.stackSize++; } else if (currentItemStack.itemID == ModItems.philStone.shiftedIndex) { currentItemStack.stackSize++; } } } } }
true
true
public void onTakenFromCrafting(EntityPlayer player, ItemStack stack, IInventory craftMatrix) { if (mod_EE3.proxy.isPortableCraftingGUIOpen()) { player.inventory.getCurrentItem().damageItem(1, player); } ItemStack currentItemStack; for (int i = 0; i < craftMatrix.getSizeInventory(); i++) { currentItemStack = craftMatrix.getStackInSlot(i); if (currentItemStack != null) { if (currentItemStack.itemID == ModItems.miniumStone.shiftedIndex) { currentItemStack.damageItem(1, player); currentItemStack.stackSize++; } else if (currentItemStack.itemID == ModItems.philStone.shiftedIndex) { currentItemStack.stackSize++; } } } }
public void onTakenFromCrafting(EntityPlayer player, ItemStack stack, IInventory craftMatrix) { if (mod_EE3.proxy.isPortableCraftingGUIOpen()) { ItemStack currentInventoryItem = player.inventory.getCurrentItem(); if (currentInventoryItem != null) { player.inventory.getCurrentItem().damageItem(1, player); } } ItemStack currentItemStack; for (int i = 0; i < craftMatrix.getSizeInventory(); i++) { currentItemStack = craftMatrix.getStackInSlot(i); if (currentItemStack != null) { if (currentItemStack.itemID == ModItems.miniumStone.shiftedIndex) { currentItemStack.damageItem(1, player); currentItemStack.stackSize++; } else if (currentItemStack.itemID == ModItems.philStone.shiftedIndex) { currentItemStack.stackSize++; } } } }
diff --git a/db/src/main/java/org/syphr/mythtv/db/schema/impl/Channel1268.java b/db/src/main/java/org/syphr/mythtv/db/schema/impl/Channel1268.java index cab155c..ddfecac 100644 --- a/db/src/main/java/org/syphr/mythtv/db/schema/impl/Channel1268.java +++ b/db/src/main/java/org/syphr/mythtv/db/schema/impl/Channel1268.java @@ -1,591 +1,591 @@ /* * Copyright 2011 Gregory P. Moyer * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.syphr.mythtv.db.schema.impl; import java.util.Date; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.Id; import javax.persistence.Table; import org.hibernate.annotations.GenericGenerator; import org.syphr.mythtv.db.schema.Channel; @Entity @Table(name = "channel") public class Channel1268 implements Channel { @Id @GeneratedValue(generator = "assigned") @GenericGenerator(name = "assigned", strategy = "assigned") private int chanid; @Column(nullable = false, length = 10) private String channum; @Column(length = 10) private String freqid; private Integer sourceid; @Column(nullable = false, length = 20) private String callsign; @Column(nullable = false, length = 64) private String name; @Column(nullable = false) private String icon; private Integer finetune; @Column(nullable = false) private String videofilters; @Column(nullable = false, length = 255) private String xmltvid; @Column(nullable = false) private int recpriority; private Integer contrast; private Integer brightness; private Integer colour; private Integer hue; @Column(nullable = false, length = 10) private String tvformat; @Column(nullable = false) private boolean visible; @Column(nullable = false) private String outputfilters; private Boolean useonairguide; private Short mplexid; private Integer serviceid; @Column(nullable = false) private int tmoffset; @Column(name = "atsc_major_chan", nullable = false) private int atscMajorChan; @Column(name = "atsc_minor_chan", nullable = false) private int atscMinorChan; @Column(name = "last_record", nullable = false, length = 19) private Date lastRecord; @Column(name = "default_authority", nullable = false, length = 32) private String defaultAuthority; @Column(nullable = false) private int commmethod; public Channel1268() { super(); } public Channel1268(int chanid, String channum, String callsign, String name, String icon, String videofilters, String xmltvid, int recpriority, String tvformat, boolean visible, String outputfilters, int tmoffset, int atscMajorChan, int atscMinorChan, Date lastRecord, String defaultAuthority, int commmethod) { this.chanid = chanid; this.channum = channum; this.callsign = callsign; this.name = name; this.icon = icon; this.videofilters = videofilters; this.xmltvid = xmltvid; this.recpriority = recpriority; this.tvformat = tvformat; this.visible = visible; this.outputfilters = outputfilters; this.tmoffset = tmoffset; this.atscMajorChan = atscMajorChan; this.atscMinorChan = atscMinorChan; this.lastRecord = lastRecord; this.defaultAuthority = defaultAuthority; this.commmethod = commmethod; } public Channel1268(int chanid, String channum, String freqid, Integer sourceid, String callsign, String name, String icon, Integer finetune, String videofilters, String xmltvid, int recpriority, Integer contrast, Integer brightness, Integer colour, Integer hue, String tvformat, boolean visible, String outputfilters, Boolean useonairguide, Short mplexid, Integer serviceid, int tmoffset, int atscMajorChan, int atscMinorChan, Date lastRecord, String defaultAuthority, int commmethod) { this.chanid = chanid; this.channum = channum; this.freqid = freqid; this.sourceid = sourceid; this.callsign = callsign; this.name = name; this.icon = icon; this.finetune = finetune; this.videofilters = videofilters; this.xmltvid = xmltvid; this.recpriority = recpriority; this.contrast = contrast; this.brightness = brightness; this.colour = colour; this.hue = hue; this.tvformat = tvformat; this.visible = visible; this.outputfilters = outputfilters; this.useonairguide = useonairguide; this.mplexid = mplexid; this.serviceid = serviceid; this.tmoffset = tmoffset; this.atscMajorChan = atscMajorChan; this.atscMinorChan = atscMinorChan; this.lastRecord = lastRecord; this.defaultAuthority = defaultAuthority; this.commmethod = commmethod; } @Override public int getChanid() { return this.chanid; } @Override public void setChanid(int chanid) { this.chanid = chanid; } @Override public String getChannum() { return this.channum; } @Override public void setChannum(String channum) { this.channum = channum; } @Override public String getFreqid() { return this.freqid; } @Override public void setFreqid(String freqid) { this.freqid = freqid; } @Override public Integer getSourceid() { return this.sourceid; } @Override public void setSourceid(Integer sourceid) { this.sourceid = sourceid; } @Override public String getCallsign() { return this.callsign; } @Override public void setCallsign(String callsign) { this.callsign = callsign; } @Override public String getName() { return this.name; } @Override public void setName(String name) { this.name = name; } @Override public String getIcon() { return this.icon; } @Override public void setIcon(String icon) { this.icon = icon; } @Override public Integer getFinetune() { return this.finetune; } @Override public void setFinetune(Integer finetune) { this.finetune = finetune; } @Override public String getVideofilters() { return this.videofilters; } @Override public void setVideofilters(String videofilters) { this.videofilters = videofilters; } @Override public String getXmltvid() { return this.xmltvid; } @Override public void setXmltvid(String xmltvid) { this.xmltvid = xmltvid; } @Override public int getRecpriority() { return this.recpriority; } @Override public void setRecpriority(int recpriority) { this.recpriority = recpriority; } @Override public Integer getContrast() { return this.contrast; } @Override public void setContrast(Integer contrast) { this.contrast = contrast; } @Override public Integer getBrightness() { return this.brightness; } @Override public void setBrightness(Integer brightness) { this.brightness = brightness; } @Override public Integer getColour() { return this.colour; } @Override public void setColour(Integer colour) { this.colour = colour; } @Override public Integer getHue() { return this.hue; } @Override public void setHue(Integer hue) { this.hue = hue; } @Override public String getTvformat() { return this.tvformat; } @Override public void setTvformat(String tvformat) { this.tvformat = tvformat; } @Override public boolean isVisible() { return this.visible; } @Override public void setVisible(boolean visible) { this.visible = visible; } @Override public String getOutputfilters() { return this.outputfilters; } @Override public void setOutputfilters(String outputfilters) { this.outputfilters = outputfilters; } @Override public Boolean getUseonairguide() { return this.useonairguide; } @Override public void setUseonairguide(Boolean useonairguide) { this.useonairguide = useonairguide; } @Override public Short getMplexid() { return this.mplexid; } @Override public void setMplexid(Short mplexid) { this.mplexid = mplexid; } @Override public Integer getServiceid() { return this.serviceid; } @Override public void setServiceid(Integer serviceid) { this.serviceid = serviceid; } @Override public int getTmoffset() { return this.tmoffset; } @Override public void setTmoffset(int tmoffset) { this.tmoffset = tmoffset; } @Override public int getAtscMajorChan() { return this.atscMajorChan; } @Override public void setAtscMajorChan(int atscMajorChan) { this.atscMajorChan = atscMajorChan; } @Override public int getAtscMinorChan() { return this.atscMinorChan; } @Override public void setAtscMinorChan(int atscMinorChan) { this.atscMinorChan = atscMinorChan; } @Override public Date getLastRecord() { return this.lastRecord; } @Override public void setLastRecord(Date lastRecord) { this.lastRecord = lastRecord; } @Override public String getDefaultAuthority() { return this.defaultAuthority; } @Override public void setDefaultAuthority(String defaultAuthority) { this.defaultAuthority = defaultAuthority; } @Override public int getCommmethod() { return this.commmethod; } @Override public void setCommmethod(int commmethod) { this.commmethod = commmethod; } @Override public String toString() { StringBuilder builder = new StringBuilder(); - builder.append("Channel1264 [chanid="); + builder.append("Channel1268 [chanid="); builder.append(chanid); builder.append(", channum="); builder.append(channum); builder.append(", freqid="); builder.append(freqid); builder.append(", sourceid="); builder.append(sourceid); builder.append(", callsign="); builder.append(callsign); builder.append(", name="); builder.append(name); builder.append(", icon="); builder.append(icon); builder.append(", finetune="); builder.append(finetune); builder.append(", videofilters="); builder.append(videofilters); builder.append(", xmltvid="); builder.append(xmltvid); builder.append(", recpriority="); builder.append(recpriority); builder.append(", contrast="); builder.append(contrast); builder.append(", brightness="); builder.append(brightness); builder.append(", colour="); builder.append(colour); builder.append(", hue="); builder.append(hue); builder.append(", tvformat="); builder.append(tvformat); builder.append(", visible="); builder.append(visible); builder.append(", outputfilters="); builder.append(outputfilters); builder.append(", useonairguide="); builder.append(useonairguide); builder.append(", mplexid="); builder.append(mplexid); builder.append(", serviceid="); builder.append(serviceid); builder.append(", tmoffset="); builder.append(tmoffset); builder.append(", atscMajorChan="); builder.append(atscMajorChan); builder.append(", atscMinorChan="); builder.append(atscMinorChan); builder.append(", lastRecord="); builder.append(lastRecord); builder.append(", defaultAuthority="); builder.append(defaultAuthority); builder.append(", commmethod="); builder.append(commmethod); builder.append("]"); return builder.toString(); } }
true
true
public String toString() { StringBuilder builder = new StringBuilder(); builder.append("Channel1264 [chanid="); builder.append(chanid); builder.append(", channum="); builder.append(channum); builder.append(", freqid="); builder.append(freqid); builder.append(", sourceid="); builder.append(sourceid); builder.append(", callsign="); builder.append(callsign); builder.append(", name="); builder.append(name); builder.append(", icon="); builder.append(icon); builder.append(", finetune="); builder.append(finetune); builder.append(", videofilters="); builder.append(videofilters); builder.append(", xmltvid="); builder.append(xmltvid); builder.append(", recpriority="); builder.append(recpriority); builder.append(", contrast="); builder.append(contrast); builder.append(", brightness="); builder.append(brightness); builder.append(", colour="); builder.append(colour); builder.append(", hue="); builder.append(hue); builder.append(", tvformat="); builder.append(tvformat); builder.append(", visible="); builder.append(visible); builder.append(", outputfilters="); builder.append(outputfilters); builder.append(", useonairguide="); builder.append(useonairguide); builder.append(", mplexid="); builder.append(mplexid); builder.append(", serviceid="); builder.append(serviceid); builder.append(", tmoffset="); builder.append(tmoffset); builder.append(", atscMajorChan="); builder.append(atscMajorChan); builder.append(", atscMinorChan="); builder.append(atscMinorChan); builder.append(", lastRecord="); builder.append(lastRecord); builder.append(", defaultAuthority="); builder.append(defaultAuthority); builder.append(", commmethod="); builder.append(commmethod); builder.append("]"); return builder.toString(); }
public String toString() { StringBuilder builder = new StringBuilder(); builder.append("Channel1268 [chanid="); builder.append(chanid); builder.append(", channum="); builder.append(channum); builder.append(", freqid="); builder.append(freqid); builder.append(", sourceid="); builder.append(sourceid); builder.append(", callsign="); builder.append(callsign); builder.append(", name="); builder.append(name); builder.append(", icon="); builder.append(icon); builder.append(", finetune="); builder.append(finetune); builder.append(", videofilters="); builder.append(videofilters); builder.append(", xmltvid="); builder.append(xmltvid); builder.append(", recpriority="); builder.append(recpriority); builder.append(", contrast="); builder.append(contrast); builder.append(", brightness="); builder.append(brightness); builder.append(", colour="); builder.append(colour); builder.append(", hue="); builder.append(hue); builder.append(", tvformat="); builder.append(tvformat); builder.append(", visible="); builder.append(visible); builder.append(", outputfilters="); builder.append(outputfilters); builder.append(", useonairguide="); builder.append(useonairguide); builder.append(", mplexid="); builder.append(mplexid); builder.append(", serviceid="); builder.append(serviceid); builder.append(", tmoffset="); builder.append(tmoffset); builder.append(", atscMajorChan="); builder.append(atscMajorChan); builder.append(", atscMinorChan="); builder.append(atscMinorChan); builder.append(", lastRecord="); builder.append(lastRecord); builder.append(", defaultAuthority="); builder.append(defaultAuthority); builder.append(", commmethod="); builder.append(commmethod); builder.append("]"); return builder.toString(); }
diff --git a/photark-security/src/main/java/org/apache/photark/security/authorization/services/SecurityServiceImpl.java b/photark-security/src/main/java/org/apache/photark/security/authorization/services/SecurityServiceImpl.java index 4587655..668722e 100644 --- a/photark-security/src/main/java/org/apache/photark/security/authorization/services/SecurityServiceImpl.java +++ b/photark-security/src/main/java/org/apache/photark/security/authorization/services/SecurityServiceImpl.java @@ -1,150 +1,150 @@ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.photark.security.authorization.services; import org.apache.photark.security.authorization.AccessList; import org.apache.photark.security.authorization.User; import org.apache.photark.security.authorization.UserInfo; import org.oasisopen.sca.annotation.Reference; import org.oasisopen.sca.annotation.Scope; import org.oasisopen.sca.annotation.Service; import javax.servlet.Servlet; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import java.io.PrintWriter; @Service(Servlet.class) @Scope("COMPOSITE") public class SecurityServiceImpl extends HttpServlet implements Servlet /*SecurityService*/ { /** * */ private static final long serialVersionUID = -6452934544772432330L; private AccessManager accessManager; @Reference(name = "accessmanager") protected void setAccessService(AccessManager accessManager) { this.accessManager = accessManager; } @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/html"); doPost(request, response); } @Override public void doPost(HttpServletRequest request, HttpServletResponse response) throws IOException { response.setContentType("text/html"); StringBuffer sb = new StringBuffer(); if ("getUserInfo".equalsIgnoreCase(request.getParameter("request"))) { AccessList accessList = (AccessList) request.getSession().getAttribute("accessList"); String userId = accessList.getUserId(); User user; user = accessManager.getUser(userId); UserInfo userInfo = user.getUserInfo(); if (accessManager.isUserStoredInRole(userId, "registeredUserRole")) { request.getSession().setAttribute("toRigester", "false"); /* sb.append("userId="+ userId); sb.append(",displayName=" + userInfo.getDisplayName()); sb.append(",email=" + userInfo.getEmail()); sb.append(",realName=" + userInfo.getRealName()); sb.append(",webSite=" + userInfo.getWebsite());*/ sb.append("registered,").append(userId).append(",").append(userInfo.getRealName()).append(",").append(userInfo.getDisplayName()).append(",").append(userInfo.getEmail()).append(",").append(userInfo.getWebsite()); } else { /*sb.append("userId="+ userId); sb.append(",unRegistered=false");*/ sb.append("unRegistered,").append(userId).append(",").append(userInfo.getRealName()).append(",").append(userInfo.getDisplayName()).append(",").append(userInfo.getEmail()).append(",").append(userInfo.getWebsite()); } } else if ("setUserInfo".equalsIgnoreCase(request.getParameter("request"))) { AccessList accessList = (AccessList) request.getSession().getAttribute("accessList"); String userId = accessList.getUserId(); User user; if (request.getParameter("displayName") != null && !request.getParameter("displayName").trim().equals("")) { request.getSession().setAttribute("toRigester", "false"); user = new User(userId); UserInfo userInfo = new UserInfo(request.getParameter("displayName"), request.getParameter("email"), request.getParameter("realName"), request.getParameter("webSite")); user.setUserInfo(userInfo); if (accessManager.isUserStoredInRole(userId, "unRegisteredUserRole")) { accessManager.removeUserFromRole(userId, "unRegisteredUserRole"); } if (!accessManager.isUserStoredInRole(userId, "registeredUserRole")) { accessManager.addUserToRole(user, "registeredUserRole"); } - //sb.append("userId="+ userId); + sb.append("OK"); //sb.append(",unRegistered=false"); } } else if ("getUser".equalsIgnoreCase(request.getParameter("request"))) { if (request.getSession().getAttribute("accessList") != null && request.getSession().getAttribute("accessList") != "") { AccessList accessList = (AccessList) request.getSession().getAttribute("accessList"); String userId = accessList.getUserId(); if(userId.equals("SuperAdmin")){ sb.append("{user:{userId:'" + userId + "',userInfo:{realName:'" + "',displayName:'"+userId + "',email:'" + "',website:'" + "'}}}"); } else{ User user; user = accessManager.getUser(userId); UserInfo userInfo = user.getUserInfo(); /* sb.append("userId="+ userId); sb.append(",displayName=" + userInfo.getDisplayName()); sb.append(",email=" + userInfo.getEmail()); sb.append(",realName=" + userInfo.getRealName()); sb.append(",webSite=" + userInfo.getWebsite());*/ sb.append("{user:{userId:'" + userId + "',userInfo:{realName:'" + userInfo.getRealName() + "',displayName:'" + userInfo.getDisplayName() + "',email:'" + userInfo.getEmail() + "',website:'" + userInfo.getWebsite() + "'}}}"); } }else{ sb.append("{user:{userId:'null'}}"); } } PrintWriter out = response.getWriter(); out.write(sb.toString()); out.flush(); out.close(); } }
true
true
public void doPost(HttpServletRequest request, HttpServletResponse response) throws IOException { response.setContentType("text/html"); StringBuffer sb = new StringBuffer(); if ("getUserInfo".equalsIgnoreCase(request.getParameter("request"))) { AccessList accessList = (AccessList) request.getSession().getAttribute("accessList"); String userId = accessList.getUserId(); User user; user = accessManager.getUser(userId); UserInfo userInfo = user.getUserInfo(); if (accessManager.isUserStoredInRole(userId, "registeredUserRole")) { request.getSession().setAttribute("toRigester", "false"); /* sb.append("userId="+ userId); sb.append(",displayName=" + userInfo.getDisplayName()); sb.append(",email=" + userInfo.getEmail()); sb.append(",realName=" + userInfo.getRealName()); sb.append(",webSite=" + userInfo.getWebsite());*/ sb.append("registered,").append(userId).append(",").append(userInfo.getRealName()).append(",").append(userInfo.getDisplayName()).append(",").append(userInfo.getEmail()).append(",").append(userInfo.getWebsite()); } else { /*sb.append("userId="+ userId); sb.append(",unRegistered=false");*/ sb.append("unRegistered,").append(userId).append(",").append(userInfo.getRealName()).append(",").append(userInfo.getDisplayName()).append(",").append(userInfo.getEmail()).append(",").append(userInfo.getWebsite()); } } else if ("setUserInfo".equalsIgnoreCase(request.getParameter("request"))) { AccessList accessList = (AccessList) request.getSession().getAttribute("accessList"); String userId = accessList.getUserId(); User user; if (request.getParameter("displayName") != null && !request.getParameter("displayName").trim().equals("")) { request.getSession().setAttribute("toRigester", "false"); user = new User(userId); UserInfo userInfo = new UserInfo(request.getParameter("displayName"), request.getParameter("email"), request.getParameter("realName"), request.getParameter("webSite")); user.setUserInfo(userInfo); if (accessManager.isUserStoredInRole(userId, "unRegisteredUserRole")) { accessManager.removeUserFromRole(userId, "unRegisteredUserRole"); } if (!accessManager.isUserStoredInRole(userId, "registeredUserRole")) { accessManager.addUserToRole(user, "registeredUserRole"); } //sb.append("userId="+ userId); //sb.append(",unRegistered=false"); } } else if ("getUser".equalsIgnoreCase(request.getParameter("request"))) { if (request.getSession().getAttribute("accessList") != null && request.getSession().getAttribute("accessList") != "") { AccessList accessList = (AccessList) request.getSession().getAttribute("accessList"); String userId = accessList.getUserId(); if(userId.equals("SuperAdmin")){ sb.append("{user:{userId:'" + userId + "',userInfo:{realName:'" + "',displayName:'"+userId + "',email:'" + "',website:'" + "'}}}"); } else{ User user; user = accessManager.getUser(userId); UserInfo userInfo = user.getUserInfo(); /* sb.append("userId="+ userId); sb.append(",displayName=" + userInfo.getDisplayName()); sb.append(",email=" + userInfo.getEmail()); sb.append(",realName=" + userInfo.getRealName()); sb.append(",webSite=" + userInfo.getWebsite());*/ sb.append("{user:{userId:'" + userId + "',userInfo:{realName:'" + userInfo.getRealName() + "',displayName:'" + userInfo.getDisplayName() + "',email:'" + userInfo.getEmail() + "',website:'" + userInfo.getWebsite() + "'}}}"); } }else{ sb.append("{user:{userId:'null'}}"); } } PrintWriter out = response.getWriter(); out.write(sb.toString()); out.flush(); out.close(); }
public void doPost(HttpServletRequest request, HttpServletResponse response) throws IOException { response.setContentType("text/html"); StringBuffer sb = new StringBuffer(); if ("getUserInfo".equalsIgnoreCase(request.getParameter("request"))) { AccessList accessList = (AccessList) request.getSession().getAttribute("accessList"); String userId = accessList.getUserId(); User user; user = accessManager.getUser(userId); UserInfo userInfo = user.getUserInfo(); if (accessManager.isUserStoredInRole(userId, "registeredUserRole")) { request.getSession().setAttribute("toRigester", "false"); /* sb.append("userId="+ userId); sb.append(",displayName=" + userInfo.getDisplayName()); sb.append(",email=" + userInfo.getEmail()); sb.append(",realName=" + userInfo.getRealName()); sb.append(",webSite=" + userInfo.getWebsite());*/ sb.append("registered,").append(userId).append(",").append(userInfo.getRealName()).append(",").append(userInfo.getDisplayName()).append(",").append(userInfo.getEmail()).append(",").append(userInfo.getWebsite()); } else { /*sb.append("userId="+ userId); sb.append(",unRegistered=false");*/ sb.append("unRegistered,").append(userId).append(",").append(userInfo.getRealName()).append(",").append(userInfo.getDisplayName()).append(",").append(userInfo.getEmail()).append(",").append(userInfo.getWebsite()); } } else if ("setUserInfo".equalsIgnoreCase(request.getParameter("request"))) { AccessList accessList = (AccessList) request.getSession().getAttribute("accessList"); String userId = accessList.getUserId(); User user; if (request.getParameter("displayName") != null && !request.getParameter("displayName").trim().equals("")) { request.getSession().setAttribute("toRigester", "false"); user = new User(userId); UserInfo userInfo = new UserInfo(request.getParameter("displayName"), request.getParameter("email"), request.getParameter("realName"), request.getParameter("webSite")); user.setUserInfo(userInfo); if (accessManager.isUserStoredInRole(userId, "unRegisteredUserRole")) { accessManager.removeUserFromRole(userId, "unRegisteredUserRole"); } if (!accessManager.isUserStoredInRole(userId, "registeredUserRole")) { accessManager.addUserToRole(user, "registeredUserRole"); } sb.append("OK"); //sb.append(",unRegistered=false"); } } else if ("getUser".equalsIgnoreCase(request.getParameter("request"))) { if (request.getSession().getAttribute("accessList") != null && request.getSession().getAttribute("accessList") != "") { AccessList accessList = (AccessList) request.getSession().getAttribute("accessList"); String userId = accessList.getUserId(); if(userId.equals("SuperAdmin")){ sb.append("{user:{userId:'" + userId + "',userInfo:{realName:'" + "',displayName:'"+userId + "',email:'" + "',website:'" + "'}}}"); } else{ User user; user = accessManager.getUser(userId); UserInfo userInfo = user.getUserInfo(); /* sb.append("userId="+ userId); sb.append(",displayName=" + userInfo.getDisplayName()); sb.append(",email=" + userInfo.getEmail()); sb.append(",realName=" + userInfo.getRealName()); sb.append(",webSite=" + userInfo.getWebsite());*/ sb.append("{user:{userId:'" + userId + "',userInfo:{realName:'" + userInfo.getRealName() + "',displayName:'" + userInfo.getDisplayName() + "',email:'" + userInfo.getEmail() + "',website:'" + userInfo.getWebsite() + "'}}}"); } }else{ sb.append("{user:{userId:'null'}}"); } } PrintWriter out = response.getWriter(); out.write(sb.toString()); out.flush(); out.close(); }
diff --git a/src/com/app/getconnected/activities/RateRideActivity.java b/src/com/app/getconnected/activities/RateRideActivity.java index 15222de..1b8b81f 100644 --- a/src/com/app/getconnected/activities/RateRideActivity.java +++ b/src/com/app/getconnected/activities/RateRideActivity.java @@ -1,60 +1,60 @@ package com.app.getconnected.activities; import android.os.Bundle; import android.util.Log; import android.view.Menu; import android.view.View; import android.widget.EditText; import android.widget.RadioButton; import android.widget.RadioGroup; import com.app.getconnected.R; public class RateRideActivity extends BaseActivity { String rideId; RadioGroup rideRatingField; EditText rideCommentField; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_rate_ride); // TODO find out why initLayout crashes - // initLayout(R.string.title_activity_rate_ride, true, false, false, - // false); + initLayout(R.string.title_activity_rate_ride, true, true, true, + false); rideId = getIntent().getStringExtra("rideId"); rideRatingField = (RadioGroup) findViewById(R.id.ride_rating); rideRatingField.check(R.id.good); rideCommentField = (EditText) findViewById(R.id.ride_comment); } @Override public boolean onCreateOptionsMenu(Menu menu) { getMenuInflater().inflate(R.menu.rate_ride, menu); return true; } /** * Finishes this activity without sending any rating * * @param view */ public void cancelRating(View view) { finish(); } /** * Sends the rating to the API and finishes this activity * * @param view */ public void sendRating(View view) { String rating = ((RadioButton) findViewById(rideRatingField .getCheckedRadioButtonId())).getTag().toString(); String comment = rideCommentField.getText().toString(); Log.d("DEBUG", "ride ID:" + rideId + ", Rating: " + rating + ", Comment: " + comment); finish(); } }
true
true
protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_rate_ride); // TODO find out why initLayout crashes // initLayout(R.string.title_activity_rate_ride, true, false, false, // false); rideId = getIntent().getStringExtra("rideId"); rideRatingField = (RadioGroup) findViewById(R.id.ride_rating); rideRatingField.check(R.id.good); rideCommentField = (EditText) findViewById(R.id.ride_comment); }
protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_rate_ride); // TODO find out why initLayout crashes initLayout(R.string.title_activity_rate_ride, true, true, true, false); rideId = getIntent().getStringExtra("rideId"); rideRatingField = (RadioGroup) findViewById(R.id.ride_rating); rideRatingField.check(R.id.good); rideCommentField = (EditText) findViewById(R.id.ride_comment); }
diff --git a/src/main/java/cz/zcu/kiv/eegdatabase/logic/DashedUrlMethodNameResolver.java b/src/main/java/cz/zcu/kiv/eegdatabase/logic/DashedUrlMethodNameResolver.java index edeebc0..28aa924 100644 --- a/src/main/java/cz/zcu/kiv/eegdatabase/logic/DashedUrlMethodNameResolver.java +++ b/src/main/java/cz/zcu/kiv/eegdatabase/logic/DashedUrlMethodNameResolver.java @@ -1,45 +1,45 @@ package cz.zcu.kiv.eegdatabase.logic; import javax.servlet.http.HttpServletRequest; import org.springframework.web.servlet.mvc.multiaction.MethodNameResolver; import org.springframework.web.servlet.mvc.multiaction.NoSuchRequestHandlingMethodException; /** * * @author Jindra */ public class DashedUrlMethodNameResolver implements MethodNameResolver { public String getHandlerMethodName(HttpServletRequest request) throws NoSuchRequestHandlingMethodException { String url = request.getRequestURL().toString(); int lastSlash = url.lastIndexOf("/") + 1; - int extensionPosition = url.lastIndexOf("lib/lucene"); + int extensionPosition = url.lastIndexOf("."); String fileName = null; if (extensionPosition > lastSlash) { // Dot of the extension must be behind last slash fileName = url.substring(lastSlash, extensionPosition); } else { // without extension fileName = url.substring(lastSlash); } // Remove slashes in file name and convert the beginnings of the words to upper case boolean toUpperCase = false; StringBuffer result = new StringBuffer(); for (int i = 0; i < fileName.length(); i++) { char c = fileName.charAt(i); if (c == '-') { toUpperCase = true; } else { if (toUpperCase) { c = Character.toUpperCase(c); toUpperCase = false; } result.append(c); } } return result.toString(); } }
true
true
public String getHandlerMethodName(HttpServletRequest request) throws NoSuchRequestHandlingMethodException { String url = request.getRequestURL().toString(); int lastSlash = url.lastIndexOf("/") + 1; int extensionPosition = url.lastIndexOf("lib/lucene"); String fileName = null; if (extensionPosition > lastSlash) { // Dot of the extension must be behind last slash fileName = url.substring(lastSlash, extensionPosition); } else { // without extension fileName = url.substring(lastSlash); } // Remove slashes in file name and convert the beginnings of the words to upper case boolean toUpperCase = false; StringBuffer result = new StringBuffer(); for (int i = 0; i < fileName.length(); i++) { char c = fileName.charAt(i); if (c == '-') { toUpperCase = true; } else { if (toUpperCase) { c = Character.toUpperCase(c); toUpperCase = false; } result.append(c); } } return result.toString(); }
public String getHandlerMethodName(HttpServletRequest request) throws NoSuchRequestHandlingMethodException { String url = request.getRequestURL().toString(); int lastSlash = url.lastIndexOf("/") + 1; int extensionPosition = url.lastIndexOf("."); String fileName = null; if (extensionPosition > lastSlash) { // Dot of the extension must be behind last slash fileName = url.substring(lastSlash, extensionPosition); } else { // without extension fileName = url.substring(lastSlash); } // Remove slashes in file name and convert the beginnings of the words to upper case boolean toUpperCase = false; StringBuffer result = new StringBuffer(); for (int i = 0; i < fileName.length(); i++) { char c = fileName.charAt(i); if (c == '-') { toUpperCase = true; } else { if (toUpperCase) { c = Character.toUpperCase(c); toUpperCase = false; } result.append(c); } } return result.toString(); }
diff --git a/tizzit-core/src/main/java/de/juwimm/cms/model/ViewComponentHbmDaoImpl.java b/tizzit-core/src/main/java/de/juwimm/cms/model/ViewComponentHbmDaoImpl.java index 6ea1c456..eb83e0ce 100644 --- a/tizzit-core/src/main/java/de/juwimm/cms/model/ViewComponentHbmDaoImpl.java +++ b/tizzit-core/src/main/java/de/juwimm/cms/model/ViewComponentHbmDaoImpl.java @@ -1,888 +1,888 @@ /** * Copyright (c) 2009 Juwi MacMillan Group GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.juwimm.cms.model; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Date; import java.util.Iterator; import java.util.Map; import org.apache.commons.lang.NotImplementedException; import org.apache.log4j.Logger; import org.hibernate.Query; import org.springframework.beans.factory.annotation.Autowired; import org.tizzit.util.DateConverter; import de.juwimm.cms.beans.foreign.TizzitPropertiesBeanSpring; import de.juwimm.cms.common.Constants; import de.juwimm.cms.exceptions.UserException; import de.juwimm.cms.remote.helper.AuthenticationHelper; import de.juwimm.cms.safeguard.model.Realm2viewComponentHbm; import de.juwimm.cms.safeguard.model.RealmJaasHbm; import de.juwimm.cms.safeguard.model.RealmJdbcHbm; import de.juwimm.cms.safeguard.model.RealmLdapHbm; import de.juwimm.cms.safeguard.model.RealmSimplePwHbm; import de.juwimm.cms.search.beans.SearchengineDeleteService; import de.juwimm.cms.vo.ContentValue; import de.juwimm.cms.vo.ViewDocumentValue; /** * @see de.juwimm.cms.model.ViewComponentHbm * @author <a href="mailto:[email protected]">Carsten Schalm</a> * company Juwi|MacMillan Group Gmbh, Walsrode, Germany * @version $Id$ */ public class ViewComponentHbmDaoImpl extends ViewComponentHbmDaoBase { private static Logger log = Logger.getLogger(ViewComponentHbmDaoImpl.class); private static final SimpleDateFormat sdf = new SimpleDateFormat("MM/dd/yyyy hh:mm a"); private TizzitPropertiesBeanSpring tizzitPropertiesBeanSpring; @Autowired private SearchengineDeleteService searchengineDeleteService; @Autowired private SequenceHbmDao sequenceHbmDao; public TizzitPropertiesBeanSpring getTizzitPropertiesBeanSpring() { return tizzitPropertiesBeanSpring; } @Autowired public void setTizzitPropertiesBeanSpring(TizzitPropertiesBeanSpring tizzitPropertiesBeanSpring) { this.tizzitPropertiesBeanSpring = tizzitPropertiesBeanSpring; } @Override protected boolean handleShouldBeVisible(ViewComponentHbm current, boolean isLiveserver) { // if (isLiveserver && current.getOnline() != 1) return false; boolean retVal = current.isVisible(); if (retVal && (current.getViewType() == Constants.VIEW_TYPE_CONTENT || current.getViewType() == Constants.VIEW_TYPE_UNIT) && isLiveserver && !hasPublishContentVersion(current)) { retVal = false; } if (retVal && (current.getOnlineStart() > 0 && new Date(current.getOnlineStart()).after(new Date(System.currentTimeMillis())))) retVal = false; if (retVal && (current.getOnlineStop() > 0 && new Date(current.getOnlineStop()).before(new Date(System.currentTimeMillis())))) retVal = false; return retVal; } /** * @param isDeploy - Constants.Deploy_type * */ @Override protected void handleToXml(ViewComponentHbm current, Integer onlyThisUnitId, boolean withContent, boolean lastContenVersionOnly, boolean withSiteProtection, boolean withUrl, int depth, boolean liveServer, boolean returnOnlyVisibleOne, int deployType, PrintStream out) { if (log.isDebugEnabled()) log.debug("toXml " + withContent + " WITH URL " + withUrl); // if it's a deploy - the status has to be 'approved' and will be set to 'for_deploy' if (deployType != -1) { - if (current.getStatus() == Constants.DEPLOY_STATUS_APPROVED) { + if (current.getStatus() == Constants.DEPLOY_STATUS_APPROVED || current.getStatus() != Constants.DEPLOY_STATUS_DEPLOYED) { current.setStatus(Constants.DEPLOY_STATUS_FOR_DEPLOY); this.update(current); } else { return; } } out.print("<viewcomponent id=\""); out.print(current.getViewComponentId()); out.print("\" unitId=\""); out.print(current.getUnit4ViewComponent()); if (withUrl) { out.print("\" hasChild=\""); out.print(hasVisibleChild(current, liveServer)); } // This is only needed for the FIRST VC in the Edition. // If there is no existent VC like this (initial deploy), we will take // this settings. if (current.getPrevNode() != null) { out.print("\" prev=\""); out.print(current.getPrevNode().getViewComponentId()); } if (current.getNextNode() != null) { out.print("\" next=\""); out.print(current.getNextNode().getViewComponentId()); } if (current.getParent() != null) { out.print("\" parent=\""); out.print(current.getParent().getViewComponentId()); } out.print("\">\n"); ViewDocumentValue viewDocumentValue = null; try { viewDocumentValue = current.getViewDocument().getDao(); } catch (Exception e) { } out.print("<showType>" + current.getShowType() + "</showType>\n"); out.print("<viewType>" + current.getViewType() + "</viewType>\n"); out.print("<visible>" + current.isVisible() + "</visible>\n"); out.print("<searchIndexed>" + current.isSearchIndexed() + "</searchIndexed>\n"); out.print("<statusInfo><![CDATA[" + current.getLinkDescription() + "]]></statusInfo>\n"); if (liveServer) { byte viewType = current.getViewType(); if (viewType == Constants.VIEW_TYPE_EXTERNAL_LINK || viewType == Constants.VIEW_TYPE_INTERNAL_LINK || viewType == Constants.VIEW_TYPE_SYMLINK) { if (current.getStatus() != Constants.DEPLOY_STATUS_APPROVED) { if (current.getApprovedLinkName() != null && !"null".equalsIgnoreCase(current.getApprovedLinkName())) { out.print("<linkName><![CDATA[" + current.getApprovedLinkName() + "]]></linkName>\n"); } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } out.print("<urlLinkName><![CDATA[" + current.getUrlLinkName() + "]]></urlLinkName>\n"); out.print("<viewLevel>" + current.getViewLevel() + "</viewLevel>\n"); out.print("<viewIndex>" + current.getViewIndex() + "</viewIndex>\n"); out.print("<displaySettings>" + current.getDisplaySettings() + "</displaySettings>\n"); if (viewDocumentValue != null) { out.print("<viewDocumentId>" + viewDocumentValue.getViewDocumentId() + "</viewDocumentId>\n"); } out.print("<viewDocumentViewType>" + (viewDocumentValue != null ? viewDocumentValue.getViewType() : "browser") + "</viewDocumentViewType>\n"); out.print("<language>" + (viewDocumentValue != null ? viewDocumentValue.getLanguage() : "deutsch") + "</language>\n"); out.print("<userModifiedDate>" + current.getUserLastModifiedDate() + "</userModifiedDate>\n"); // this is for the edition if (withContent) { out.print("<status>" + current.getStatus() + "</status>\n"); out.print("<onlineStart>" + current.getOnlineStart() + "</onlineStart>\n"); out.print("<onlineStop>" + current.getOnlineStop() + "</onlineStop>\n"); out.print("<online>" + current.getOnline() + "</online>\n"); out.print("<reference><![CDATA[" + current.getReference() + "]]></reference>\n"); out.print("<metaKeywords><![CDATA[" + current.getMetaData() + "]]></metaKeywords>\n"); out.print("<metaDescription><![CDATA[" + current.getMetaDescription() + "]]></metaDescription>\n"); try { out.print("<modifiedDate>" + DateConverter.getSql2String(new Date(current.getLastModifiedDate())) + "</modifiedDate>\n"); out.print("<createDate>" + DateConverter.getSql2String(new Date(current.getCreateDate())) + "</createDate>\n"); if (current.getViewType() == Constants.VIEW_TYPE_CONTENT || current.getViewType() == Constants.VIEW_TYPE_UNIT) { if (log.isDebugEnabled()) log.debug("GETTING CONTENT"); ContentHbm cl = getContentHbmDao().load(new Integer(current.getReference())); if (lastContenVersionOnly) { out.print(getContentHbmDao().toXmlWithLastContentVersion(cl)); } else { out.print(getContentHbmDao().toXml(cl)); } } } catch (Exception exe) { log.warn("Error occured ViewComponentHbmImpl to XML " + exe.getMessage()); } /* Safeguard RealmAtVC */ /* * try { RealmAtVC realmatvc = current.getRealmAtVC(); if (realmatvc != * null) { out.print("" + realmatvc.toXml() + "\n"); } } catch * (Exception ine) { log.error("CANNOT APPEND REALM AT VC " + * ine.getMessage()); } */ } // this is for navigation, f.e. if (withUrl) { if (current.getViewType() == Constants.VIEW_TYPE_EXTERNAL_LINK) { out.print("<extUrl><![CDATA[" + current.getReference() + "]]></extUrl>\n"); } else if (current.getViewType() == Constants.VIEW_TYPE_SEPARATOR) { out.print("<separator><![CDATA[" + current.getReference() + "]]></separator>\n"); } else if (current.getViewType() == Constants.VIEW_TYPE_INTERNAL_LINK) { try { ViewComponentHbm vclJump = (ViewComponentHbm) getSessionFactory().getCurrentSession().load(ViewComponentHbmImpl.class, new Integer(current.getReference())); out.print("<url"); if (current.getMetaData() != null && !current.getMetaData().equals("")) { out.print(" anchor=\"" + current.getMetaData() + "\"><![CDATA[" + vclJump.getPath() + "]]></url>\n"); } else { out.print("><![CDATA[" + vclJump.getPath() + "]]></url>\n"); } } catch (Exception exe) { out.print("/>\n"); log.warn("Error getting path for referenced viewComponent " + current.getReference() + " by internalLink " + current.getViewComponentId() + ": " + exe.getMessage()); } } else { out.print("<url><![CDATA[" + current.getPath() + "]]></url>\n"); try { if (current.getViewType() == Constants.VIEW_TYPE_SYMLINK) { try { ViewComponentHbm vclSym = (ViewComponentHbm) getSessionFactory().getCurrentSession().load(ViewComponentHbmImpl.class, new Integer(current.getReference())); String reference = vclSym.getReference(); ContentHbm content = (ContentHbm) getSessionFactory().getCurrentSession().load(ContentHbmImpl.class, new Integer(reference)); out.print("<template>" + content.getTemplate() + "</template>\n"); } catch (Exception symEx) { log.warn("ViewComponent " + current.getViewComponentId() + " is a SymLink, maybe the LinkTarget " + current.getReference() + " does not exist (anymore)? -> " + symEx.getMessage()); } } else { String reference = current.getReference(); ContentHbm content = (ContentHbm) getSessionFactory().getCurrentSession().load(ContentHbmImpl.class, new Integer(reference)); out.print("<template>" + content.getTemplate() + "</template>\n"); // out.print("<template>" + // getContentLocalHome().findByPrimaryKey(new // Integer(getReference())).getTemplate() + // "</template>\n"); } } catch (Exception exe) { log.warn("Error getting url or template for viewComponent " + current.getViewComponentId() + ": " + exe.getMessage()); } } } if (withSiteProtection) { if (current.getRealm2vc() != null) { out.println(current.getRealm2vc().toXml()); } exportVCRealms(out, current); } if (depth != 0) { // 0 is only THIS ViewComponent try { Collection coll = current.getChildrenOrdered(); Iterator it = coll.iterator(); while (it.hasNext()) { ViewComponentHbm vcl = (ViewComponentHbm) it.next(); if (onlyThisUnitId == null || onlyThisUnitId.equals(vcl.getUnit4ViewComponent()) || (deployType == Constants.DEPLOY_TYPE_ROOT && !vcl.getUnit4ViewComponent().equals(vcl.getParent().getUnit4ViewComponent()))) { if (!returnOnlyVisibleOne || this.shouldBeVisible(vcl, liveServer)) { int destDepth = depth - 1; if (depth == -1) destDepth = -1; this.toXml(vcl, onlyThisUnitId, withContent, lastContenVersionOnly, withSiteProtection, withUrl, destDepth, liveServer, returnOnlyVisibleOne, deployType, out); } } else { // This is outside the specified unit. Therefor do nothing with it and look for the next fitting // this.toXml(vcl, onlyThisUnitId, false, withUrl, 1, liveServer, returnOnlyVisibleOne, out); } } } catch (Exception exe) { log.error("Error occured calling children.toXml: " + exe.getMessage(), exe); } } out.println("</viewcomponent>"); if (log.isDebugEnabled()) log.debug("toXml end"); } /* (non-Javadoc) * @see de.juwimm.cms.model.ViewComponentHbmDaoBase#handleToXml(de.juwimm.cms.model.ViewComponentHbm, java.lang.Integer, boolean, boolean, boolean, int, boolean, boolean, java.io.PrintStream) */ @Override protected void handleToXml(ViewComponentHbm current, Integer onlyThisUnitId, boolean withContent, boolean lastContenVersionOnly, boolean withSiteProtection, boolean withUrl, int depth, boolean liveServer, boolean returnOnlyVisibleOne, PrintStream out) { int deployType = -1; this.toXml(current, onlyThisUnitId, withContent, lastContenVersionOnly, withSiteProtection, withUrl, depth, liveServer, returnOnlyVisibleOne, deployType, out); } @Override /** * @param current * @param onlyThisUnitId * @param withContent * @param depth max level of recursion, 0 for this node only, -1 for infinity */ protected void handleToXml(ViewComponentHbm current, Integer onlyThisUnitId, boolean withContent, boolean withUrl, int depth, boolean liveServer, boolean returnOnlyVisibleOne, PrintStream out) { boolean lastContenVersionOnly = false; boolean withSiteProtection = false; this.toXml(current, onlyThisUnitId, withContent, lastContenVersionOnly, withSiteProtection, withUrl, depth, liveServer, returnOnlyVisibleOne, out); } /** * * @param liveServer * @return */ private boolean hasVisibleChild(ViewComponentHbm me, boolean liveServer) { if (log.isDebugEnabled()) log.debug("hasVisibleChild start"); boolean result = false; try { Iterator it = me.getChildren().iterator(); while (it.hasNext()) { ViewComponentHbm current = (ViewComponentHbm) it.next(); result = this.shouldBeVisible(current, liveServer); if (result) { break; } } } catch (Exception exe) { log.error("hasVisibleChild for VCID " + me.getViewComponentId() + " an unknown error occured: " + exe.getMessage(), exe); } if (log.isDebugEnabled()) log.debug("hasVisibleChild end"); return result; } @Override protected long handleGetPageModifiedDate(ViewComponentHbm me) { if (log.isDebugEnabled()) log.debug("getPageModifiedDate start"); long result = new Date().getTime(); try { byte viewType = me.getViewType(); switch (viewType) { case Constants.VIEW_TYPE_CONTENT: case Constants.VIEW_TYPE_UNIT: { String reference = me.getReference(); if (reference != null) { Integer refId = null; refId = new Integer(reference); if (refId != null) { ContentVersionHbm contentVersion = null; ContentHbm vc = (ContentHbm) getSessionFactory().getCurrentSession().load(ContentHbmImpl.class, refId); if (getTizzitPropertiesBeanSpring().isLiveserver()) { contentVersion = vc.getContentVersionForPublish(); } else { contentVersion = vc.getLastContentVersion(); } if (contentVersion != null) { result = contentVersion.getCreateDate(); } } } break; } case Constants.VIEW_TYPE_SYMLINK: { String reference = me.getReference(); if (reference != null) { Integer refId = null; refId = new Integer(reference); if (refId != null) { ViewComponentHbm vc = (ViewComponentHbm) getSessionFactory().getCurrentSession().load(ViewComponentHbmImpl.class, refId); result = this.getPageModifiedDate(vc); } } break; } case Constants.VIEW_TYPE_INTERNAL_LINK: case Constants.VIEW_TYPE_SEPARATOR: case Constants.VIEW_TYPE_EXTERNAL_LINK: { result = me.getCreateDate(); break; } default: break; } } catch (Exception e) { log.warn("Error getting pageModifiedDate, setting to \"now\"", e); } if (log.isDebugEnabled()) log.debug("LEAVING GET PAGE MODIFIED DATE"); return result; } @Override protected boolean handleHasPublishContentVersion(ViewComponentHbm me) { boolean retVal = false; ContentHbm cl = null; ContentVersionHbm cvl = null; try { if (me.getViewType() == Constants.VIEW_TYPE_CONTENT || me.getViewType() == Constants.VIEW_TYPE_UNIT) { String ref = me.getReference(); cl = (ContentHbm) getSessionFactory().getCurrentSession().load(ContentHbmImpl.class, new Integer(ref)); cvl = cl.getContentVersionForPublish(); if (cvl != null) { retVal = true; } } } catch (Exception exe) { log.error("hasPublishContentVersion: This should not occure, basically no ContentVersion found or wrong Reference: " + me.getReference() + " Content:" + cl + " cvl:" + cvl + " vcid:" + me.getViewComponentId()); } return retVal; } @Override protected Date handleGetNavigationAge(Integer refVcId, String since, int depth, boolean getPUBLSVersion) throws Exception { try { if (log.isDebugEnabled()) log.debug("begin getNavigationAge"); Date retVal = null; try { Integer viewDocumentId = this.load(refVcId).getViewDocument().getViewDocumentId(); Query q = getSessionFactory().getCurrentSession().createQuery("SELECT MAX(vc.lastModifiedDate) FROM de.juwimm.cms.model.ViewComponentHbm vc WHERE vc.viewDocument.viewDocumentId = ?"); q.setInteger(0, viewDocumentId.intValue()); Long date = (Long) q.uniqueResult(); if (log.isDebugEnabled()) log.debug("getNavigationAge got md from SQL Command"); retVal = new Date(date.longValue()); } catch (Exception e) { log.error(e.getMessage(), e); } if (retVal == null) { retVal = new Date(); } if (log.isDebugEnabled()) log.debug("end getNavigationAge " + retVal); return retVal; } catch (Exception e) { log.error("Could not get navigation age", e); throw new UserException("Could not get navigation age: " + e.getMessage()); } } @Override protected String handleGetLastModifiedPages(Integer viewDocumentId, Integer unitId, int numberOfPages, boolean getPublsVersion) throws Exception { if (log.isDebugEnabled()) log.debug("begin getLastModifiedPages"); StringBuffer sb = new StringBuffer("<lastModifiedPages>"); try { try { Query q = null; // TODO implement search in unit. right now I can't find all // pages belonging to one unit // if (unitId == null) { // get newest pages for complete site q = getSessionFactory().getCurrentSession().createQuery("SELECT vc.viewComponentId FROM de.juwimm.cms.model.ViewComponentHbm vc WHERE vc.viewDocument.viewDocumentId = ? ORDER BY vc.userLastModifiedDate DESC"); q.setInteger(0, viewDocumentId.intValue()); // } else { // get newest pages for one unit // q = // getSessionFactory().getCurrentSession().createQuery("SELECT // vc.viewComponentId FROM de.juwimm.cms.model.ViewComponentHbm // vc WHERE vc.viewDocument.site.siteId = ? ORDER BY // vc.userLastModifiedDate DESC"); // q.setInteger(0, unitId.intValue()); // } q.setMaxResults(numberOfPages); ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); PrintStream out = new PrintStream(byteOut, true, "UTF-8"); Iterator it = q.list().iterator(); while (it.hasNext()) { Integer vcId = (Integer) it.next(); ViewComponentHbm viewComponent = this.load(vcId); this.toXml(viewComponent, null, false, true, 1, getPublsVersion, true, out); sb.append(byteOut.toString("UTF-8")); byteOut.reset(); } } catch (Exception e) { log.error(e.getMessage(), e); } } catch (Exception e) { log.error("Error getting " + numberOfPages + " last-modified pages for viewDocument " + viewDocumentId + " and unit " + unitId + ": " + e.getMessage(), e); throw new UserException("Error getting " + numberOfPages + " last-modified pages for viewDocument " + viewDocumentId + " and unit " + unitId + ": " + e.getMessage()); } sb.append("</lastModifiedPages>"); return sb.toString(); } @Override protected ViewComponentHbm handleCreate(ViewDocumentHbm viewDocument, String reference, String displayLinkName, String linkDescription, Integer viewComponentId) throws Exception { return handleCreate(viewDocument, reference, displayLinkName, linkDescription, null, viewComponentId); } @Override protected ViewComponentHbm handleCreate(ViewDocumentHbm vd, String reference, String displayLinkName, String linkDescription, String urlLinkName, Integer viewComponentId) throws Exception { ViewComponentHbm vc = ViewComponentHbm.Factory.newInstance(); try { if (viewComponentId == null) { Integer id = sequenceHbmDao.getNextSequenceNumber("viewcomponent.view_component_id"); vc.setViewComponentId(id); } else { vc.setViewComponentId(viewComponentId); } } catch (Exception e) { log.error("Error creating/setting primary key", e); } vc.setDisplayLinkName(displayLinkName); vc.setLinkDescription(linkDescription); if (urlLinkName != null && !"".equalsIgnoreCase(urlLinkName)) { vc.setUrlLinkName(urlLinkName); } else { vc.setUrlLinkName(vc.getViewComponentId().toString()); } if (reference.equals("root")) { vc.setViewType(new Byte("1").byteValue()); } else if (reference.startsWith("content:")) { vc.setReference(reference.substring(8)); vc.setViewType(new Byte("1").byteValue()); } else if (reference.startsWith("jump:")) { vc.setReference(reference.substring(5)); vc.setViewType(new Byte("2").byteValue()); } else if (reference.equalsIgnoreCase("SEPARATOR")) { vc.setViewType(new Byte("7").byteValue()); } else if (reference.startsWith("link:")) { vc.setReference(reference.substring(5)); vc.setViewType(new Byte("3").byteValue()); } else if (reference.startsWith("symlink:")) { vc.setReference(reference.substring(8)); vc.setViewType(new Byte("6").byteValue()); } else { vc.setReference(reference); } vc.setStatus(0); vc.setOnline((byte) 0); vc.setVisible(true); vc.setSearchIndexed(true); vc.setXmlSearchIndexed(true); vc.setViewDocument(vd); long ts = System.currentTimeMillis(); vc.setLastModifiedDate(ts); vc.setUserLastModifiedDate(ts); vc.setCreateDate(ts); if (reference.equals("root")) { ContentValue cdao = new ContentValue(); cdao.setTemplate("standard"); cdao.setHeading("Initial Page"); cdao.setVersion("1"); cdao.setContentText("<source></source>"); try { ContentHbm content = getContentHbmDao().createWithContentVersion(cdao, AuthenticationHelper.getUserName()); vc.setReference(content.getContentId().toString()); } catch (Throwable exe) { log.warn("RootContent could not be created because of duplicate key. This should only occure on liveserver."); } vc.setViewType(new Byte("1").byteValue()); } else if (vc.getReference() != null && vc.getReference().startsWith("symlink:")) { try { ViewComponentHbm vclRef = super.load(new Integer(vc.getReference())); ContentHbm c = getContentHbmDao().load(new Integer(vclRef.getReference())); c.setUpdateSearchIndex(true); } catch (Exception exe) { log.warn("Error occured during creation of symlink", exe); } } getHibernateTemplate().save(vc); return vc; } @Override public void remove(ViewComponentHbm viewComponentHbm) { // delete all children if (!viewComponentHbm.isLeaf()) { ViewComponentHbm currentNode, nextNode; currentNode = viewComponentHbm.getFirstChild(); nextNode = currentNode.getNextNode(); remove(currentNode); while (nextNode != null) { currentNode = nextNode; nextNode = nextNode.getNextNode(); remove(currentNode); } } // delete referenced content byte viewType = viewComponentHbm.getViewType(); if (viewType == Constants.VIEW_TYPE_CONTENT || viewType == Constants.VIEW_TYPE_UNIT) { try { String reference = viewComponentHbm.getReference(); if (reference != null && !reference.equals("") && !reference.equals("DUMMY") && !reference.equals("root")) { ContentHbm content = getContentHbmDao().load(new Integer(reference)); getContentHbmDao().remove(content); } } catch (Exception exe) { log.warn("Error deleting referenced content for viewComponent " + viewComponentHbm.getViewComponentId() + "\n" + exe.getMessage()); } } // sending deleted-message to searchEngine if (viewComponentHbm.getViewDocument() != null) { searchengineDeleteService.deletePage(viewComponentHbm); } // if this page was protected remove protection Realm2viewComponentHbm realm = viewComponentHbm.getRealm2vc(); if (realm != null) { getRealm2viewComponentHbmDao().remove(realm); } // if this page is a login-page, update realms that use this page Iterator it = viewComponentHbm.getRealm4login().iterator(); ArrayList<Realm2viewComponentHbm> al = new ArrayList<Realm2viewComponentHbm>(); while (it.hasNext()) { Realm2viewComponentHbm r = (Realm2viewComponentHbm) it.next(); al.add(r); } it = al.iterator(); while (it.hasNext()) { Realm2viewComponentHbm r = (Realm2viewComponentHbm) it.next(); r.setLoginPage(null); } super.remove(viewComponentHbm); } @Override @SuppressWarnings("unchecked") public java.util.Collection findByStatus(final int transform, final java.lang.Integer viewDocumentId, final int status) { return this.findByStatus(transform, "from de.juwimm.cms.model.ViewComponentHbm as v where v.viewDocument.viewDocumentId = ? and v.status = ?", viewDocumentId, status); } @Override public java.lang.Object find4Unit(final int transform, final java.lang.Integer unitId, final java.lang.Integer viewDocumentId) { return this.find4Unit(transform, "from de.juwimm.cms.model.ViewComponentHbm v where v.assignedUnit.unitId = ? and v.viewDocument.viewDocumentId = ?", unitId, viewDocumentId); } @Override @SuppressWarnings("unchecked") public java.util.Collection findByReferencedContent(final int transform, final java.lang.String reference) { return this.findByReferencedContent(transform, "from de.juwimm.cms.model.ViewComponentHbm as v where v.reference = ? and v.viewType in (0, 1, 4, 5)", reference); } @Override @SuppressWarnings("unchecked") public java.util.Collection findByReferencedViewComponent(final int transform, final java.lang.String reference) { return this.findByReferencedViewComponent(transform, "from de.juwimm.cms.model.ViewComponentHbm as v where v.reference = ? and v.viewType in (6, 2)", reference); } @Override @SuppressWarnings("unchecked") public java.util.Collection findAllWithUnit(final int transform, final java.lang.Integer viewDocumentId) { return this.findAllWithUnit(transform, "from de.juwimm.cms.model.ViewComponentHbm as v where v.assignedUnit is not null and v.viewDocument.viewDocumentId = ?", viewDocumentId); } @Override @SuppressWarnings("unchecked") public java.util.Collection findByParent(final int transform, final java.lang.Integer vcId) { return this.findByParent(transform, "from de.juwimm.cms.model.ViewComponentHbm v WHERE v.parent.viewComponentId = ?", vcId); } @Override protected java.util.Collection handleFindRootViewComponents4Unit(Integer unitId) throws Exception { Query query = getSession().createQuery("from de.juwimm.cms.model.ViewComponentHbm v where v.assignedUnit.unitId = :unitId"); query.setParameter("unitId", unitId); return query.list(); } @Override @Deprecated protected void handleToXmlComplete(Integer viewComponentId, Boolean onlyLastContentVersion, Integer onlyThisUnitId, Boolean withURL, int depth, Boolean liveServer, Boolean returnOnlyVisibleOne, PrintStream out) throws Exception { // ViewComponentHbm current = load(viewComponentId); // out.print("<viewcomponent id=\""); // out.print(current.getViewComponentId()); // out.print("\" unitId=\""); // out.print(current.getUnit4ViewComponent()); // // if (withURL) { // out.print("\" hasChild=\""); // out.print(hasVisibleChild(current, liveServer)); // } // if (current.getPrevNode() != null) { // out.print("\" prev=\""); // out.print(current.getPrevNode().getViewComponentId()); // } // if (current.getNextNode() != null) { // out.print("\" next=\""); // out.print(current.getNextNode().getViewComponentId()); // } // if (current.getParent() != null) { // out.print("\" parent=\""); // out.print(current.getParent().getViewComponentId()); // } // // out.print("\">\n"); // ViewDocumentValue viewDocumentValue = null; // try { // viewDocumentValue = current.getViewDocument().getDao(); // } catch (Exception e) { // } // out.print("<showType>" + current.getShowType() + "</showType>\n"); // out.print("<viewType>" + current.getViewType() + "</viewType>\n"); // out.print("<visible>" + current.isVisible() + "</visible>\n"); // out.print("<searchIndexed>" + current.isSearchIndexed() + "</searchIndexed>\n"); // out.print("<statusInfo><![CDATA[" + current.getLinkDescription() + "]]></statusInfo>\n"); // if (liveServer) { // byte viewType = current.getViewType(); // if (viewType == Constants.VIEW_TYPE_EXTERNAL_LINK || viewType == Constants.VIEW_TYPE_INTERNAL_LINK || viewType == Constants.VIEW_TYPE_SYMLINK) { // if (current.getStatus() != Constants.DEPLOY_STATUS_APPROVED) { // if (current.getApprovedLinkName() != null && !"null".equalsIgnoreCase(current.getApprovedLinkName())) { // out.print("<linkName><![CDATA[" + current.getApprovedLinkName() + "]]></linkName>\n"); // } else { // out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); // } // } else { // out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); // } // } else { // out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); // } // } else { // out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); // } // out.print("<urlLinkName><![CDATA[" + current.getUrlLinkName() + "]]></urlLinkName>\n"); // out.print("<viewLevel>" + current.getViewLevel() + "</viewLevel>\n"); // out.print("<viewIndex>" + current.getViewIndex() + "</viewIndex>\n"); // out.print("<displaySettings>" + current.getDisplaySettings() + "</displaySettings>\n"); // out.print("<viewDocumentViewType>" + (viewDocumentValue != null ? viewDocumentValue.getViewType() : "browser") + "</viewDocumentViewType>\n"); // out.print("<language>" + (viewDocumentValue != null ? viewDocumentValue.getLanguage() : "deutsch") + "</language>\n"); // out.print("<userModifiedDate>" + current.getUserLastModifiedDate() + "</userModifiedDate>\n"); // // // this is for the edition // // out.print("<status>" + current.getStatus() + "</status>\n"); // out.print("<onlineStart>" + current.getOnlineStart() + "</onlineStart>\n"); // out.print("<onlineStop>" + current.getOnlineStop() + "</onlineStop>\n"); // out.print("<online>" + current.getOnline() + "</online>\n"); // out.print("<reference><![CDATA[" + current.getReference() + "]]></reference>\n"); // out.print("<metaKeywords><![CDATA[" + current.getMetaData() + "]]></metaKeywords>\n"); // out.print("<metaDescription><![CDATA[" + current.getMetaDescription() + "]]></metaDescription>\n"); // try { // out.print("<modifiedDate>" + DateConverter.getSql2String(new Date(current.getLastModifiedDate())) + "</modifiedDate>\n"); // out.print("<createDate>" + DateConverter.getSql2String(new Date(current.getCreateDate())) + "</createDate>\n"); // if (current.getViewType() == Constants.VIEW_TYPE_CONTENT || current.getViewType() == Constants.VIEW_TYPE_UNIT) { // if (log.isDebugEnabled()) log.debug("GETTING CONTENT"); // ContentHbm cl = getContentHbmDao().load(new Integer(current.getReference())); // if (onlyLastContentVersion) { // out.print(getContentHbmDao().toXmlWithLastContentVersion(cl)); // } else { // out.print(getContentHbmDao().toXml(cl)); // } // } // } catch (Exception exe) { // log.warn("Error occured ViewComponentHbmImpl to XML " + exe.getMessage()); // } // /* Safeguard RealmAtVC */ // /* // * try { RealmAtVC realmatvc = current.getRealmAtVC(); if (realmatvc != // * null) { out.print("" + realmatvc.toXml() + "\n"); } } catch // * (Exception ine) { log.error("CANNOT APPEND REALM AT VC " + // * ine.getMessage()); } // */ // // // this is for navigation, f.e. // if (withURL) { // if (current.getViewType() == Constants.VIEW_TYPE_EXTERNAL_LINK) { // out.print("<extUrl><![CDATA[" + current.getReference() + "]]></extUrl>\n"); // } else if (current.getViewType() == Constants.VIEW_TYPE_SEPARATOR) { // out.print("<separator><![CDATA[" + current.getReference() + "]]></separator>\n"); // } else if (current.getViewType() == Constants.VIEW_TYPE_INTERNAL_LINK) { // try { // ViewComponentHbm vclJump = (ViewComponentHbm) getSessionFactory().getCurrentSession().load(ViewComponentHbmImpl.class, new Integer(current.getReference())); // out.print("<url"); // if (current.getMetaData() != null && !current.getMetaData().equals("")) { // out.print(" anchor=\"" + current.getMetaData() + "\"><![CDATA[" + vclJump.getPath() + "]]></url>\n"); // } else { // out.print("><![CDATA[" + vclJump.getPath() + "]]></url>\n"); // } // } catch (Exception exe) { // out.print("/>\n"); // log.warn("Error getting path for referenced viewComponent " + current.getReference() + " by internalLink " + current.getViewComponentId() + ": " + exe.getMessage()); // } // } else { // out.print("<url><![CDATA[" + current.getPath() + "]]></url>\n"); // try { // if (current.getViewType() == Constants.VIEW_TYPE_SYMLINK) { // try { // ViewComponentHbm vclSym = (ViewComponentHbm) getSessionFactory().getCurrentSession().load(ViewComponentHbmImpl.class, new Integer(current.getReference())); // String reference = vclSym.getReference(); // ContentHbm content = (ContentHbm) getSessionFactory().getCurrentSession().load(ContentHbmImpl.class, new Integer(reference)); // out.print("<template>" + content.getTemplate() + "</template>\n"); // } catch (Exception symEx) { // log.warn("ViewComponent " + current.getViewComponentId() + " is a SymLink, maybe the LinkTarget " + current.getReference() + " does not exist (anymore)? -> " + symEx.getMessage()); // } // } else { // String reference = current.getReference(); // ContentHbm content = (ContentHbm) getSessionFactory().getCurrentSession().load(ContentHbmImpl.class, new Integer(reference)); // out.print("<template>" + content.getTemplate() + "</template>\n"); // // out.print("<template>" + // // getContentLocalHome().findByPrimaryKey(new // // Integer(getReference())).getTemplate() + // // "</template>\n"); // } // } catch (Exception exe) { // log.warn("Error getting url or template for viewComponent " + current.getViewComponentId() + ": " + exe.getMessage()); // } // } // } // if (current.getRealm2vc() != null) { // out.println(current.getRealm2vc().toXml()); // } // exportVCRealms(out, current); // // if (depth != 0) { // 0 is only THIS ViewComponent // try { // Collection coll = current.getChildrenOrdered(); // Iterator it = coll.iterator(); // while (it.hasNext()) { // ViewComponentHbm vcl = (ViewComponentHbm) it.next(); // if (onlyThisUnitId == null || onlyThisUnitId.equals(vcl.getUnit4ViewComponent())) { // if (!returnOnlyVisibleOne || this.shouldBeVisible(vcl, liveServer)) { // int destDepth = depth - 1; // if (depth == -1) destDepth = -1; // this.toXmlComplete(vcl.getViewComponentId(), onlyLastContentVersion, onlyThisUnitId, withURL, destDepth, liveServer, returnOnlyVisibleOne, out); // } // } else { // //this.toXml(vcl, onlyThisUnitId, false, withUrl, 1, liveServer, returnOnlyVisibleOne, out); // this.toXmlComplete(vcl.getViewComponentId(), onlyLastContentVersion, onlyThisUnitId, withURL, 1, liveServer, returnOnlyVisibleOne, out); // } // } // } catch (Exception exe) { // log.error("Error occured calling children.toXmlComplete: " + exe.getMessage(), exe); // } // } // out.println("</viewcomponent>"); // if (log.isDebugEnabled()) log.debug("toXml end"); throw new NotImplementedException("should never be used"); } private void exportVCRealms(PrintStream out, ViewComponentHbm viewComponent) { if (viewComponent.getRealm2vc() != null) { Realm2viewComponentHbm realm = viewComponent.getRealm2vc(); RealmJaasHbm realmjass = realm.getJaasRealm(); RealmJdbcHbm realmjdbc = realm.getJdbcRealm(); RealmLdapHbm realmLdapHbm = realm.getLdapRealm(); RealmSimplePwHbm realmPw = realm.getSimplePwRealm(); out.println("<realms>"); if (realmjass != null) { out.println("\t" + realmjass.toXml()); } if (realmjdbc != null) { out.println("\t" + realmjdbc.toXml()); } if (realmLdapHbm != null) { out.println("\t" + realmLdapHbm.toXml()); } if (realmPw != null) { out.println("\t" + realmPw.toXml()); } out.println("</realms>"); } } @Override protected ViewComponentHbm handleCloneViewComponent(ViewComponentHbm oldViewComponent, Map picturesIds, Map documentsIds, Map personsIds, Integer unitId) throws Exception { ViewComponentHbm viewComponentHbm = ViewComponentHbm.Factory.newInstance(); try { Integer id = sequenceHbmDao.getNextSequenceNumber("viewcomponent.view_component_id"); viewComponentHbm.setViewComponentId(id); } catch (Exception e) { log.error("Error creating/setting primary key", e); } viewComponentHbm.setApprovedLinkName(oldViewComponent.getApprovedLinkName()); viewComponentHbm.setCreateDate(System.currentTimeMillis()); viewComponentHbm.setDeployCommand(oldViewComponent.getDeployCommand()); viewComponentHbm.setDisplayLinkName("copy_" + oldViewComponent.getDisplayLinkName()); viewComponentHbm.setDisplaySettings(oldViewComponent.getDisplaySettings()); viewComponentHbm.setLinkDescription(oldViewComponent.getLinkDescription()); viewComponentHbm.setMetaData(oldViewComponent.getMetaData()); viewComponentHbm.setMetaDescription(oldViewComponent.getMetaDescription()); viewComponentHbm.setOnline(oldViewComponent.getOnline()); viewComponentHbm.setOnlineStart(oldViewComponent.getOnlineStart()); viewComponentHbm.setOnlineStop(oldViewComponent.getOnlineStop()); viewComponentHbm.setRealm2vc(oldViewComponent.getRealm2vc()); //viewComponentHbm.setRealm4login(oldViewComponent.getRealm4login()); viewComponentHbm.setSearchIndexed(oldViewComponent.isSearchIndexed()); viewComponentHbm.setShowType(oldViewComponent.getShowType()); viewComponentHbm.setStatus(oldViewComponent.getStatus()); viewComponentHbm.setUrlLinkName("copy_" + oldViewComponent.getUrlLinkName()); viewComponentHbm.setViewDocument(oldViewComponent.getViewDocument()); viewComponentHbm.setViewIndex(oldViewComponent.getViewIndex()); viewComponentHbm.setViewLevel(oldViewComponent.getViewLevel()); viewComponentHbm.setViewType(oldViewComponent.getViewType()); viewComponentHbm.setVisible(oldViewComponent.isVisible()); viewComponentHbm.setXmlSearchIndexed(oldViewComponent.isXmlSearchIndexed()); ContentHbm oldContent = getContentHbmDao().load(Integer.valueOf(oldViewComponent.getReference())); ContentHbm newContent = getContentHbmDao().cloneContent(oldContent, picturesIds, documentsIds, personsIds, unitId); viewComponentHbm.setReference(String.valueOf(newContent.getContentId())); return create(viewComponentHbm); } }
true
true
protected void handleToXml(ViewComponentHbm current, Integer onlyThisUnitId, boolean withContent, boolean lastContenVersionOnly, boolean withSiteProtection, boolean withUrl, int depth, boolean liveServer, boolean returnOnlyVisibleOne, int deployType, PrintStream out) { if (log.isDebugEnabled()) log.debug("toXml " + withContent + " WITH URL " + withUrl); // if it's a deploy - the status has to be 'approved' and will be set to 'for_deploy' if (deployType != -1) { if (current.getStatus() == Constants.DEPLOY_STATUS_APPROVED) { current.setStatus(Constants.DEPLOY_STATUS_FOR_DEPLOY); this.update(current); } else { return; } } out.print("<viewcomponent id=\""); out.print(current.getViewComponentId()); out.print("\" unitId=\""); out.print(current.getUnit4ViewComponent()); if (withUrl) { out.print("\" hasChild=\""); out.print(hasVisibleChild(current, liveServer)); } // This is only needed for the FIRST VC in the Edition. // If there is no existent VC like this (initial deploy), we will take // this settings. if (current.getPrevNode() != null) { out.print("\" prev=\""); out.print(current.getPrevNode().getViewComponentId()); } if (current.getNextNode() != null) { out.print("\" next=\""); out.print(current.getNextNode().getViewComponentId()); } if (current.getParent() != null) { out.print("\" parent=\""); out.print(current.getParent().getViewComponentId()); } out.print("\">\n"); ViewDocumentValue viewDocumentValue = null; try { viewDocumentValue = current.getViewDocument().getDao(); } catch (Exception e) { } out.print("<showType>" + current.getShowType() + "</showType>\n"); out.print("<viewType>" + current.getViewType() + "</viewType>\n"); out.print("<visible>" + current.isVisible() + "</visible>\n"); out.print("<searchIndexed>" + current.isSearchIndexed() + "</searchIndexed>\n"); out.print("<statusInfo><![CDATA[" + current.getLinkDescription() + "]]></statusInfo>\n"); if (liveServer) { byte viewType = current.getViewType(); if (viewType == Constants.VIEW_TYPE_EXTERNAL_LINK || viewType == Constants.VIEW_TYPE_INTERNAL_LINK || viewType == Constants.VIEW_TYPE_SYMLINK) { if (current.getStatus() != Constants.DEPLOY_STATUS_APPROVED) { if (current.getApprovedLinkName() != null && !"null".equalsIgnoreCase(current.getApprovedLinkName())) { out.print("<linkName><![CDATA[" + current.getApprovedLinkName() + "]]></linkName>\n"); } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } out.print("<urlLinkName><![CDATA[" + current.getUrlLinkName() + "]]></urlLinkName>\n"); out.print("<viewLevel>" + current.getViewLevel() + "</viewLevel>\n"); out.print("<viewIndex>" + current.getViewIndex() + "</viewIndex>\n"); out.print("<displaySettings>" + current.getDisplaySettings() + "</displaySettings>\n"); if (viewDocumentValue != null) { out.print("<viewDocumentId>" + viewDocumentValue.getViewDocumentId() + "</viewDocumentId>\n"); } out.print("<viewDocumentViewType>" + (viewDocumentValue != null ? viewDocumentValue.getViewType() : "browser") + "</viewDocumentViewType>\n"); out.print("<language>" + (viewDocumentValue != null ? viewDocumentValue.getLanguage() : "deutsch") + "</language>\n"); out.print("<userModifiedDate>" + current.getUserLastModifiedDate() + "</userModifiedDate>\n"); // this is for the edition if (withContent) { out.print("<status>" + current.getStatus() + "</status>\n"); out.print("<onlineStart>" + current.getOnlineStart() + "</onlineStart>\n"); out.print("<onlineStop>" + current.getOnlineStop() + "</onlineStop>\n"); out.print("<online>" + current.getOnline() + "</online>\n"); out.print("<reference><![CDATA[" + current.getReference() + "]]></reference>\n"); out.print("<metaKeywords><![CDATA[" + current.getMetaData() + "]]></metaKeywords>\n"); out.print("<metaDescription><![CDATA[" + current.getMetaDescription() + "]]></metaDescription>\n"); try { out.print("<modifiedDate>" + DateConverter.getSql2String(new Date(current.getLastModifiedDate())) + "</modifiedDate>\n"); out.print("<createDate>" + DateConverter.getSql2String(new Date(current.getCreateDate())) + "</createDate>\n"); if (current.getViewType() == Constants.VIEW_TYPE_CONTENT || current.getViewType() == Constants.VIEW_TYPE_UNIT) { if (log.isDebugEnabled()) log.debug("GETTING CONTENT"); ContentHbm cl = getContentHbmDao().load(new Integer(current.getReference())); if (lastContenVersionOnly) { out.print(getContentHbmDao().toXmlWithLastContentVersion(cl)); } else { out.print(getContentHbmDao().toXml(cl)); } } } catch (Exception exe) { log.warn("Error occured ViewComponentHbmImpl to XML " + exe.getMessage()); } /* Safeguard RealmAtVC */ /* * try { RealmAtVC realmatvc = current.getRealmAtVC(); if (realmatvc != * null) { out.print("" + realmatvc.toXml() + "\n"); } } catch * (Exception ine) { log.error("CANNOT APPEND REALM AT VC " + * ine.getMessage()); } */ } // this is for navigation, f.e. if (withUrl) { if (current.getViewType() == Constants.VIEW_TYPE_EXTERNAL_LINK) { out.print("<extUrl><![CDATA[" + current.getReference() + "]]></extUrl>\n"); } else if (current.getViewType() == Constants.VIEW_TYPE_SEPARATOR) { out.print("<separator><![CDATA[" + current.getReference() + "]]></separator>\n"); } else if (current.getViewType() == Constants.VIEW_TYPE_INTERNAL_LINK) { try { ViewComponentHbm vclJump = (ViewComponentHbm) getSessionFactory().getCurrentSession().load(ViewComponentHbmImpl.class, new Integer(current.getReference())); out.print("<url"); if (current.getMetaData() != null && !current.getMetaData().equals("")) { out.print(" anchor=\"" + current.getMetaData() + "\"><![CDATA[" + vclJump.getPath() + "]]></url>\n"); } else { out.print("><![CDATA[" + vclJump.getPath() + "]]></url>\n"); } } catch (Exception exe) { out.print("/>\n"); log.warn("Error getting path for referenced viewComponent " + current.getReference() + " by internalLink " + current.getViewComponentId() + ": " + exe.getMessage()); } } else { out.print("<url><![CDATA[" + current.getPath() + "]]></url>\n"); try { if (current.getViewType() == Constants.VIEW_TYPE_SYMLINK) { try { ViewComponentHbm vclSym = (ViewComponentHbm) getSessionFactory().getCurrentSession().load(ViewComponentHbmImpl.class, new Integer(current.getReference())); String reference = vclSym.getReference(); ContentHbm content = (ContentHbm) getSessionFactory().getCurrentSession().load(ContentHbmImpl.class, new Integer(reference)); out.print("<template>" + content.getTemplate() + "</template>\n"); } catch (Exception symEx) { log.warn("ViewComponent " + current.getViewComponentId() + " is a SymLink, maybe the LinkTarget " + current.getReference() + " does not exist (anymore)? -> " + symEx.getMessage()); } } else { String reference = current.getReference(); ContentHbm content = (ContentHbm) getSessionFactory().getCurrentSession().load(ContentHbmImpl.class, new Integer(reference)); out.print("<template>" + content.getTemplate() + "</template>\n"); // out.print("<template>" + // getContentLocalHome().findByPrimaryKey(new // Integer(getReference())).getTemplate() + // "</template>\n"); } } catch (Exception exe) { log.warn("Error getting url or template for viewComponent " + current.getViewComponentId() + ": " + exe.getMessage()); } } } if (withSiteProtection) { if (current.getRealm2vc() != null) { out.println(current.getRealm2vc().toXml()); } exportVCRealms(out, current); } if (depth != 0) { // 0 is only THIS ViewComponent try { Collection coll = current.getChildrenOrdered(); Iterator it = coll.iterator(); while (it.hasNext()) { ViewComponentHbm vcl = (ViewComponentHbm) it.next(); if (onlyThisUnitId == null || onlyThisUnitId.equals(vcl.getUnit4ViewComponent()) || (deployType == Constants.DEPLOY_TYPE_ROOT && !vcl.getUnit4ViewComponent().equals(vcl.getParent().getUnit4ViewComponent()))) { if (!returnOnlyVisibleOne || this.shouldBeVisible(vcl, liveServer)) { int destDepth = depth - 1; if (depth == -1) destDepth = -1; this.toXml(vcl, onlyThisUnitId, withContent, lastContenVersionOnly, withSiteProtection, withUrl, destDepth, liveServer, returnOnlyVisibleOne, deployType, out); } } else { // This is outside the specified unit. Therefor do nothing with it and look for the next fitting // this.toXml(vcl, onlyThisUnitId, false, withUrl, 1, liveServer, returnOnlyVisibleOne, out); } } } catch (Exception exe) { log.error("Error occured calling children.toXml: " + exe.getMessage(), exe); } } out.println("</viewcomponent>"); if (log.isDebugEnabled()) log.debug("toXml end"); }
protected void handleToXml(ViewComponentHbm current, Integer onlyThisUnitId, boolean withContent, boolean lastContenVersionOnly, boolean withSiteProtection, boolean withUrl, int depth, boolean liveServer, boolean returnOnlyVisibleOne, int deployType, PrintStream out) { if (log.isDebugEnabled()) log.debug("toXml " + withContent + " WITH URL " + withUrl); // if it's a deploy - the status has to be 'approved' and will be set to 'for_deploy' if (deployType != -1) { if (current.getStatus() == Constants.DEPLOY_STATUS_APPROVED || current.getStatus() != Constants.DEPLOY_STATUS_DEPLOYED) { current.setStatus(Constants.DEPLOY_STATUS_FOR_DEPLOY); this.update(current); } else { return; } } out.print("<viewcomponent id=\""); out.print(current.getViewComponentId()); out.print("\" unitId=\""); out.print(current.getUnit4ViewComponent()); if (withUrl) { out.print("\" hasChild=\""); out.print(hasVisibleChild(current, liveServer)); } // This is only needed for the FIRST VC in the Edition. // If there is no existent VC like this (initial deploy), we will take // this settings. if (current.getPrevNode() != null) { out.print("\" prev=\""); out.print(current.getPrevNode().getViewComponentId()); } if (current.getNextNode() != null) { out.print("\" next=\""); out.print(current.getNextNode().getViewComponentId()); } if (current.getParent() != null) { out.print("\" parent=\""); out.print(current.getParent().getViewComponentId()); } out.print("\">\n"); ViewDocumentValue viewDocumentValue = null; try { viewDocumentValue = current.getViewDocument().getDao(); } catch (Exception e) { } out.print("<showType>" + current.getShowType() + "</showType>\n"); out.print("<viewType>" + current.getViewType() + "</viewType>\n"); out.print("<visible>" + current.isVisible() + "</visible>\n"); out.print("<searchIndexed>" + current.isSearchIndexed() + "</searchIndexed>\n"); out.print("<statusInfo><![CDATA[" + current.getLinkDescription() + "]]></statusInfo>\n"); if (liveServer) { byte viewType = current.getViewType(); if (viewType == Constants.VIEW_TYPE_EXTERNAL_LINK || viewType == Constants.VIEW_TYPE_INTERNAL_LINK || viewType == Constants.VIEW_TYPE_SYMLINK) { if (current.getStatus() != Constants.DEPLOY_STATUS_APPROVED) { if (current.getApprovedLinkName() != null && !"null".equalsIgnoreCase(current.getApprovedLinkName())) { out.print("<linkName><![CDATA[" + current.getApprovedLinkName() + "]]></linkName>\n"); } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } } else { out.print("<linkName><![CDATA[" + current.getDisplayLinkName() + "]]></linkName>\n"); } out.print("<urlLinkName><![CDATA[" + current.getUrlLinkName() + "]]></urlLinkName>\n"); out.print("<viewLevel>" + current.getViewLevel() + "</viewLevel>\n"); out.print("<viewIndex>" + current.getViewIndex() + "</viewIndex>\n"); out.print("<displaySettings>" + current.getDisplaySettings() + "</displaySettings>\n"); if (viewDocumentValue != null) { out.print("<viewDocumentId>" + viewDocumentValue.getViewDocumentId() + "</viewDocumentId>\n"); } out.print("<viewDocumentViewType>" + (viewDocumentValue != null ? viewDocumentValue.getViewType() : "browser") + "</viewDocumentViewType>\n"); out.print("<language>" + (viewDocumentValue != null ? viewDocumentValue.getLanguage() : "deutsch") + "</language>\n"); out.print("<userModifiedDate>" + current.getUserLastModifiedDate() + "</userModifiedDate>\n"); // this is for the edition if (withContent) { out.print("<status>" + current.getStatus() + "</status>\n"); out.print("<onlineStart>" + current.getOnlineStart() + "</onlineStart>\n"); out.print("<onlineStop>" + current.getOnlineStop() + "</onlineStop>\n"); out.print("<online>" + current.getOnline() + "</online>\n"); out.print("<reference><![CDATA[" + current.getReference() + "]]></reference>\n"); out.print("<metaKeywords><![CDATA[" + current.getMetaData() + "]]></metaKeywords>\n"); out.print("<metaDescription><![CDATA[" + current.getMetaDescription() + "]]></metaDescription>\n"); try { out.print("<modifiedDate>" + DateConverter.getSql2String(new Date(current.getLastModifiedDate())) + "</modifiedDate>\n"); out.print("<createDate>" + DateConverter.getSql2String(new Date(current.getCreateDate())) + "</createDate>\n"); if (current.getViewType() == Constants.VIEW_TYPE_CONTENT || current.getViewType() == Constants.VIEW_TYPE_UNIT) { if (log.isDebugEnabled()) log.debug("GETTING CONTENT"); ContentHbm cl = getContentHbmDao().load(new Integer(current.getReference())); if (lastContenVersionOnly) { out.print(getContentHbmDao().toXmlWithLastContentVersion(cl)); } else { out.print(getContentHbmDao().toXml(cl)); } } } catch (Exception exe) { log.warn("Error occured ViewComponentHbmImpl to XML " + exe.getMessage()); } /* Safeguard RealmAtVC */ /* * try { RealmAtVC realmatvc = current.getRealmAtVC(); if (realmatvc != * null) { out.print("" + realmatvc.toXml() + "\n"); } } catch * (Exception ine) { log.error("CANNOT APPEND REALM AT VC " + * ine.getMessage()); } */ } // this is for navigation, f.e. if (withUrl) { if (current.getViewType() == Constants.VIEW_TYPE_EXTERNAL_LINK) { out.print("<extUrl><![CDATA[" + current.getReference() + "]]></extUrl>\n"); } else if (current.getViewType() == Constants.VIEW_TYPE_SEPARATOR) { out.print("<separator><![CDATA[" + current.getReference() + "]]></separator>\n"); } else if (current.getViewType() == Constants.VIEW_TYPE_INTERNAL_LINK) { try { ViewComponentHbm vclJump = (ViewComponentHbm) getSessionFactory().getCurrentSession().load(ViewComponentHbmImpl.class, new Integer(current.getReference())); out.print("<url"); if (current.getMetaData() != null && !current.getMetaData().equals("")) { out.print(" anchor=\"" + current.getMetaData() + "\"><![CDATA[" + vclJump.getPath() + "]]></url>\n"); } else { out.print("><![CDATA[" + vclJump.getPath() + "]]></url>\n"); } } catch (Exception exe) { out.print("/>\n"); log.warn("Error getting path for referenced viewComponent " + current.getReference() + " by internalLink " + current.getViewComponentId() + ": " + exe.getMessage()); } } else { out.print("<url><![CDATA[" + current.getPath() + "]]></url>\n"); try { if (current.getViewType() == Constants.VIEW_TYPE_SYMLINK) { try { ViewComponentHbm vclSym = (ViewComponentHbm) getSessionFactory().getCurrentSession().load(ViewComponentHbmImpl.class, new Integer(current.getReference())); String reference = vclSym.getReference(); ContentHbm content = (ContentHbm) getSessionFactory().getCurrentSession().load(ContentHbmImpl.class, new Integer(reference)); out.print("<template>" + content.getTemplate() + "</template>\n"); } catch (Exception symEx) { log.warn("ViewComponent " + current.getViewComponentId() + " is a SymLink, maybe the LinkTarget " + current.getReference() + " does not exist (anymore)? -> " + symEx.getMessage()); } } else { String reference = current.getReference(); ContentHbm content = (ContentHbm) getSessionFactory().getCurrentSession().load(ContentHbmImpl.class, new Integer(reference)); out.print("<template>" + content.getTemplate() + "</template>\n"); // out.print("<template>" + // getContentLocalHome().findByPrimaryKey(new // Integer(getReference())).getTemplate() + // "</template>\n"); } } catch (Exception exe) { log.warn("Error getting url or template for viewComponent " + current.getViewComponentId() + ": " + exe.getMessage()); } } } if (withSiteProtection) { if (current.getRealm2vc() != null) { out.println(current.getRealm2vc().toXml()); } exportVCRealms(out, current); } if (depth != 0) { // 0 is only THIS ViewComponent try { Collection coll = current.getChildrenOrdered(); Iterator it = coll.iterator(); while (it.hasNext()) { ViewComponentHbm vcl = (ViewComponentHbm) it.next(); if (onlyThisUnitId == null || onlyThisUnitId.equals(vcl.getUnit4ViewComponent()) || (deployType == Constants.DEPLOY_TYPE_ROOT && !vcl.getUnit4ViewComponent().equals(vcl.getParent().getUnit4ViewComponent()))) { if (!returnOnlyVisibleOne || this.shouldBeVisible(vcl, liveServer)) { int destDepth = depth - 1; if (depth == -1) destDepth = -1; this.toXml(vcl, onlyThisUnitId, withContent, lastContenVersionOnly, withSiteProtection, withUrl, destDepth, liveServer, returnOnlyVisibleOne, deployType, out); } } else { // This is outside the specified unit. Therefor do nothing with it and look for the next fitting // this.toXml(vcl, onlyThisUnitId, false, withUrl, 1, liveServer, returnOnlyVisibleOne, out); } } } catch (Exception exe) { log.error("Error occured calling children.toXml: " + exe.getMessage(), exe); } } out.println("</viewcomponent>"); if (log.isDebugEnabled()) log.debug("toXml end"); }
diff --git a/spring-batch-core/src/main/java/org/springframework/batch/core/listener/MulticasterBatchListener.java b/spring-batch-core/src/main/java/org/springframework/batch/core/listener/MulticasterBatchListener.java index 4aae44061..0f57854aa 100644 --- a/spring-batch-core/src/main/java/org/springframework/batch/core/listener/MulticasterBatchListener.java +++ b/spring-batch-core/src/main/java/org/springframework/batch/core/listener/MulticasterBatchListener.java @@ -1,310 +1,317 @@ /* * Copyright 2006-2007 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.batch.core.listener; import java.util.List; import org.springframework.batch.core.ChunkListener; import org.springframework.batch.core.ItemProcessListener; import org.springframework.batch.core.ItemReadListener; import org.springframework.batch.core.ItemWriteListener; import org.springframework.batch.core.SkipListener; import org.springframework.batch.core.StepExecution; import org.springframework.batch.core.StepExecutionListener; import org.springframework.batch.core.StepListener; import org.springframework.batch.item.ItemStream; import org.springframework.batch.repeat.ExitStatus; /** * @author Dave Syer * */ public class MulticasterBatchListener<T, S> implements StepExecutionListener, ChunkListener, ItemReadListener<T>, ItemProcessListener<T, S>, ItemWriteListener<S>, SkipListener<T,S> { private CompositeStepExecutionListener stepListener = new CompositeStepExecutionListener(); private CompositeChunkListener chunkListener = new CompositeChunkListener(); private CompositeItemReadListener<T> itemReadListener = new CompositeItemReadListener<T>(); private CompositeItemProcessListener<T, S> itemProcessListener = new CompositeItemProcessListener<T, S>(); private CompositeItemWriteListener<S> itemWriteListener = new CompositeItemWriteListener<S>(); private CompositeSkipListener<T,S> skipListener = new CompositeSkipListener<T,S>(); /** * Initialise the listener instance. */ public MulticasterBatchListener() { super(); } /** * Register each of the objects as listeners. Once registered, calls to the * {@link MulticasterBatchListener} broadcast to the individual listeners. * * @param listeners listener objects of types known to the multicaster. */ public void setListeners(List<? extends StepListener> listeners) { for (StepListener stepListener : listeners) { register(stepListener); } } /** * Register the listener for callbacks on the appropriate interfaces * implemented. Any {@link StepListener} can be provided, or an * {@link ItemStream}. Other types will be ignored. */ public void register(StepListener listener) { if (listener instanceof StepExecutionListener) { this.stepListener.register((StepExecutionListener) listener); } if (listener instanceof ChunkListener) { this.chunkListener.register((ChunkListener) listener); } if (listener instanceof ItemReadListener) { - // TODO: make this type safe somehow? - this.itemReadListener.register((ItemReadListener) listener); + @SuppressWarnings("unchecked") + ItemReadListener<T> itemReadListener = (ItemReadListener) listener; + this.itemReadListener.register(itemReadListener); } if (listener instanceof ItemProcessListener) { - this.itemProcessListener.register((ItemProcessListener) listener); + @SuppressWarnings("unchecked") + ItemProcessListener<T,S> itemProcessListener = (ItemProcessListener) listener; + this.itemProcessListener.register(itemProcessListener); } if (listener instanceof ItemWriteListener) { - this.itemWriteListener.register((ItemWriteListener) listener); + @SuppressWarnings("unchecked") + ItemWriteListener<S> itemWriteListener = (ItemWriteListener) listener; + this.itemWriteListener.register(itemWriteListener); } if (listener instanceof SkipListener) { - this.skipListener.register((SkipListener) listener); + @SuppressWarnings("unchecked") + SkipListener<T,S> skipListener = (SkipListener) listener; + this.skipListener.register(skipListener); } } /** * @param item * @param result * @see org.springframework.batch.core.listener.CompositeItemProcessListener#afterProcess(java.lang.Object, * java.lang.Object) */ public void afterProcess(T item, S result) { try { itemProcessListener.afterProcess(item, result); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in afterProcess.", e); } } /** * @param item * @see org.springframework.batch.core.listener.CompositeItemProcessListener#beforeProcess(java.lang.Object) */ public void beforeProcess(T item) { try { itemProcessListener.beforeProcess(item); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in beforeProcess.", e); } } /** * @param item * @param ex * @see org.springframework.batch.core.listener.CompositeItemProcessListener#onProcessError(java.lang.Object, * java.lang.Exception) */ public void onProcessError(T item, Exception ex) { try { itemProcessListener.onProcessError(item, ex); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in onProcessError.", e); } } /** * @see org.springframework.batch.core.listener.CompositeStepExecutionListener#afterStep(StepExecution) */ public ExitStatus afterStep(StepExecution stepExecution) { try { return stepListener.afterStep(stepExecution); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in afterStep.", e); } } /** * @param stepExecution * @see org.springframework.batch.core.listener.CompositeStepExecutionListener#beforeStep(org.springframework.batch.core.StepExecution) */ public void beforeStep(StepExecution stepExecution) { try { stepListener.beforeStep(stepExecution); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in beforeStep.", e); } } /** * @param t * @see org.springframework.batch.core.listener.CompositeStepExecutionListener#onErrorInStep(StepExecution, * Throwable) */ public ExitStatus onErrorInStep(StepExecution stepExecution, Throwable t) { try { return stepListener.onErrorInStep(stepExecution, t); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in onErrorInStep.", t, e); } } /** * * @see org.springframework.batch.core.listener.CompositeChunkListener#afterChunk() */ public void afterChunk() { try { chunkListener.afterChunk(); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in afterChunk.", e); } } /** * * @see org.springframework.batch.core.listener.CompositeChunkListener#beforeChunk() */ public void beforeChunk() { try { chunkListener.beforeChunk(); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in beforeChunk.", e); } } /** * @param item * @see org.springframework.batch.core.listener.CompositeItemReadListener#afterRead(java.lang.Object) */ public void afterRead(T item) { try { itemReadListener.afterRead(item); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in afterRead.", e); } } /** * * @see org.springframework.batch.core.listener.CompositeItemReadListener#beforeRead() */ public void beforeRead() { try { itemReadListener.beforeRead(); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in beforeRead.", e); } } /** * @param ex * @see org.springframework.batch.core.listener.CompositeItemReadListener#onReadError(java.lang.Exception) */ public void onReadError(Exception ex) { try { itemReadListener.onReadError(ex); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in onReadError.", ex, e); } } /** * * @see ItemWriteListener#afterWrite(List) */ public void afterWrite(List<? extends S> items) { try { itemWriteListener.afterWrite(items); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in afterWrite.", e); } } /** * @param items * @see ItemWriteListener#beforeWrite(List) */ public void beforeWrite(List<? extends S> items) { try { itemWriteListener.beforeWrite(items); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in beforeWrite.", e); } } /** * @param ex * @param items * @see ItemWriteListener#onWriteError(Exception, List) */ public void onWriteError(Exception ex, List<? extends S> items) { try { itemWriteListener.onWriteError(ex, items); } catch (RuntimeException e) { throw new StepListenerFailedException("Error in onWriteError.", ex, e); } } /** * @param t * @see org.springframework.batch.core.listener.CompositeSkipListener#onSkipInRead(java.lang.Throwable) */ public void onSkipInRead(Throwable t) { skipListener.onSkipInRead(t); } /** * @param item * @param t * @see org.springframework.batch.core.listener.CompositeSkipListener#onSkipInWrite(java.lang.Object, * java.lang.Throwable) */ public void onSkipInWrite(S item, Throwable t) { skipListener.onSkipInWrite(item, t); } /** * @param item * @param t * @see org.springframework.batch.core.listener.CompositeSkipListener#onSkipInProcess(Object, Throwable) */ public void onSkipInProcess(T item, Throwable t) { skipListener.onSkipInProcess(item, t); } }
false
true
public void register(StepListener listener) { if (listener instanceof StepExecutionListener) { this.stepListener.register((StepExecutionListener) listener); } if (listener instanceof ChunkListener) { this.chunkListener.register((ChunkListener) listener); } if (listener instanceof ItemReadListener) { // TODO: make this type safe somehow? this.itemReadListener.register((ItemReadListener) listener); } if (listener instanceof ItemProcessListener) { this.itemProcessListener.register((ItemProcessListener) listener); } if (listener instanceof ItemWriteListener) { this.itemWriteListener.register((ItemWriteListener) listener); } if (listener instanceof SkipListener) { this.skipListener.register((SkipListener) listener); } }
public void register(StepListener listener) { if (listener instanceof StepExecutionListener) { this.stepListener.register((StepExecutionListener) listener); } if (listener instanceof ChunkListener) { this.chunkListener.register((ChunkListener) listener); } if (listener instanceof ItemReadListener) { @SuppressWarnings("unchecked") ItemReadListener<T> itemReadListener = (ItemReadListener) listener; this.itemReadListener.register(itemReadListener); } if (listener instanceof ItemProcessListener) { @SuppressWarnings("unchecked") ItemProcessListener<T,S> itemProcessListener = (ItemProcessListener) listener; this.itemProcessListener.register(itemProcessListener); } if (listener instanceof ItemWriteListener) { @SuppressWarnings("unchecked") ItemWriteListener<S> itemWriteListener = (ItemWriteListener) listener; this.itemWriteListener.register(itemWriteListener); } if (listener instanceof SkipListener) { @SuppressWarnings("unchecked") SkipListener<T,S> skipListener = (SkipListener) listener; this.skipListener.register(skipListener); } }
diff --git a/src/main/java/com/laytonsmith/core/functions/ByteArrays.java b/src/main/java/com/laytonsmith/core/functions/ByteArrays.java index 93e9f05c..3450e346 100644 --- a/src/main/java/com/laytonsmith/core/functions/ByteArrays.java +++ b/src/main/java/com/laytonsmith/core/functions/ByteArrays.java @@ -1,609 +1,611 @@ package com.laytonsmith.core.functions; import com.laytonsmith.PureUtilities.Version; import com.laytonsmith.annotations.api; import com.laytonsmith.core.CHVersion; import com.laytonsmith.core.Static; import com.laytonsmith.core.constructs.CByteArray; import com.laytonsmith.core.constructs.CDouble; import com.laytonsmith.core.constructs.CInt; import com.laytonsmith.core.constructs.CString; import com.laytonsmith.core.constructs.CVoid; import com.laytonsmith.core.constructs.Construct; import com.laytonsmith.core.constructs.Target; import com.laytonsmith.core.environments.Environment; import com.laytonsmith.core.exceptions.ConfigRuntimeException; import com.laytonsmith.core.functions.Exceptions.ExceptionType; import java.io.UnsupportedEncodingException; import java.nio.BufferUnderflowException; /** * * @author lsmith */ public class ByteArrays { public static String docs(){ return "This class contains all the methods needed to manipulate a byte array primitive. Since" + " byte arrays would be very inefficient to implement using a normal array, this data type" + " allows for more efficient operations, while still allowing for low level data access." + " Most data transferred within scripts is higher level, and does not require access" + " to a byte array, however, code that interacts with external processes may require" + " use of these functions to properly manipulate the data. Note that all the methods" + " deal with low level types, so the following definitions apply: a byte is 8 bits," + " a short is 16 bits, an int is 32 bits, a long is 64 bits. UTF-8 strings are supported" + " directly. The byte array is automatically resized as needed."; } @api public static class byte_array extends ba { @Override public ExceptionType[] thrown() { return new ExceptionType[]{}; } @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { return new CByteArray(t); } @Override public Integer[] numArgs() { return new Integer[]{0}; } @Override public String docs() { return "byte_array {} Returns a new byte array primitive, which can be operated on with the ba_ series of functions."; } } @api public static class ba_as_array extends ba { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = Static.getByteArray(args[0], t); return ba.asArray(t); } @Override public Integer[] numArgs() { return new Integer[]{1}; } @Override public String docs() { return "array {byte_array} Returns a new read only copy of the underlying byte array. This array is much more efficient" + " than if the array were made manually, however, it is read only. If you need to manipulate the array's" + " contents, then you can clone the array, however, the returned array (and any clones) cannot be automatically" + " interfaced with the byte array primitives. This operation is discouraged, because normal arrays are very" + " inefficient for dealing with low level bit data."; } } @api public static class ba_rewind extends ba { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); ba.rewind(); return new CVoid(t); } @Override public Integer[] numArgs() { return new Integer[]{1}; } @Override public String docs() { return "void {byte_array} Rewinds the byte array marker to 0."; } } @api public static class ba_get_byte extends ba_get { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); Integer pos = get_getPos(args, t); try { return new CInt(ba.getByte(pos), t); } catch(IndexOutOfBoundsException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(BufferUnderflowException e){ throw new Exceptions.RangeException(e.getMessage(), t); } } @Override public String docs() { return "int {byte_array, [pos]} Returns an int, read in as an 8 bit byte, from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_get_char extends ba_get { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); Integer pos = get_getPos(args, t); try{ return new CString(ba.getChar(pos), t); } catch(IndexOutOfBoundsException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(BufferUnderflowException e){ throw new Exceptions.RangeException(e.getMessage(), t); } } @Override public String docs() { return "string {byte_array, [pos]} Returns a one character string, read in as an 32 bit char, from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_get_short extends ba_get { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); Integer pos = get_getPos(args, t); try{ return new CInt(ba.getShort(pos), t); } catch(IndexOutOfBoundsException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(BufferUnderflowException e){ throw new Exceptions.RangeException(e.getMessage(), t); } } @Override public String docs() { return "int {byte_array, [pos]} Returns an int, read in as a 16 bit short, from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_get_int extends ba_get { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); Integer pos = get_getPos(args, t); try{ return new CInt(ba.getInt(pos), t); } catch(IndexOutOfBoundsException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(BufferUnderflowException e){ throw new Exceptions.RangeException(e.getMessage(), t); } } @Override public String docs() { return "int {byte_array, [pos]} Returns an int, read in as a 32 bit int, from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_get_long extends ba_get { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); Integer pos = get_getPos(args, t); try { return new CInt(ba.getLong(pos), t); } catch (IndexOutOfBoundsException e) { throw new Exceptions.RangeException(e.getMessage(), t); } catch (BufferUnderflowException e) { throw new Exceptions.RangeException(e.getMessage(), t); } } @Override public String docs() { return "int {byte_array, [pos]} Returns an int, read in as a 64 bit long, from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_get_float extends ba_get { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); Integer pos = get_getPos(args, t); try { return new CDouble(ba.getFloat(pos), t); } catch (IndexOutOfBoundsException e) { throw new Exceptions.RangeException(e.getMessage(), t); } catch (BufferUnderflowException e) { throw new Exceptions.RangeException(e.getMessage(), t); } } @Override public String docs() { return "double {byte_array, [pos]} Returns a double, read in as a 32 bit float, from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_get_double extends ba_get { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); Integer pos = get_getPos(args, t); try { return new CDouble(ba.getDouble(pos), t); } catch(IndexOutOfBoundsException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(BufferUnderflowException e){ throw new Exceptions.RangeException(e.getMessage(), t); } } @Override public String docs() { return "double {byte_array, [pos]} Returns a double, read in as a 64 bit double, from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_get_bytes extends ba_get { @Override public Integer[] numArgs() { return new Integer[]{2, 3}; } @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); int size = Static.getInt32(args[1], t); Integer pos = null; if(args.length == 3){ pos = Static.getInt32(args[2], t); } try { return ba.getBytes(size, pos); } catch(IndexOutOfBoundsException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(BufferUnderflowException e){ throw new Exceptions.RangeException(e.getMessage(), t); } } @Override public String docs() { return "byte_array {byte_array, length, [pos]} Returns a new byte_array primitive, starting from pos (or wherever the marker is" + " by default) to length."; } } @api public static class ba_get_string extends ba_get { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); Integer pos = get_getPos(args, t); String encoding = null; if(args.length == 3){ encoding = args[2].nval(); } try{ return new CString(ba.readUTF8String(pos, encoding), t); } catch(UnsupportedEncodingException e){ throw new Exceptions.FormatException(e.getMessage(), t); } catch(IndexOutOfBoundsException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(BufferUnderflowException e){ throw new Exceptions.RangeException(e.getMessage(), t); + } catch(NegativeArraySizeException e){ + throw new Exceptions.FormatException("Invalid data", t); } } @Override public String docs() { return "string {byte_array, [pos], [encoding]} Returns a UTF-8 encoded string, from the given position, or wherever the" + " marker is currently at by default. The string is assumed to have encoded the length of the string" + " with a 32 bit integer, then the string bytes. (This will be the case is the byte_array was encoded" + " with ba_set_string.) The encoding of the string may be set, but defaults to UTF-8."; } } @api public static class ba_put_byte extends ba_put { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); byte b = Static.getInt8(args[1], t); Integer pos = set_getPos(args, t); ba.putByte(b, pos); return new CVoid(t); } @Override public String docs() { return "void {byte_array, int, [pos]} Writes an int, interpreted as an 8 bit byte, starting from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_put_char extends ba_put { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); String b = args[1].val(); char c = '\0'; if(b.length() > 0){ c = b.charAt(0); } Integer pos = set_getPos(args, t); ba.putChar(c, pos); return new CVoid(t); } @Override public String docs() { return "void {byte_array, string, [pos]} Writes the first character of the string, interpreted as an 32 bit char, starting from the given position, or wherever the" + " marker is currently at by default. If the string is empty, a \\0 is written instead."; } } @api public static class ba_put_short extends ba_put { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); short b = Static.getInt16(args[1], t); Integer pos = set_getPos(args, t); ba.putShort(b, pos); return new CVoid(t); } @Override public String docs() { return "void {byte_array, int, [pos]} Writes an int, interpreted as an 16 bit short, starting from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_put_int extends ba_put { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); int b = Static.getInt32(args[1], t); Integer pos = set_getPos(args, t); ba.putInt(b, pos); return new CVoid(t); } @Override public String docs() { return "void {byte_array, int, [pos]} Writes an int, interpreted as a 32 bit int, starting from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_put_long extends ba_put { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); long b = Static.getInt(args[1], t); Integer pos = set_getPos(args, t); ba.putLong(b, pos); return new CVoid(t); } @Override public String docs() { return "void {byte_array, int, [pos]} Writes an int, interpreted as a 64 bit, starting from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_put_float extends ba_put { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); float b = Static.getDouble32(args[1], t); Integer pos = set_getPos(args, t); ba.putFloat(b, pos); return new CVoid(t); } @Override public String docs() { return "void {byte_array, double, [pos]} Writes a double, interpreted as a 32 bit float, starting from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_put_double extends ba_put { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); double b = Static.getDouble(args[1], t); Integer pos = set_getPos(args, t); ba.putDouble(b, pos); return new CVoid(t); } @Override public String docs() { return "void {byte_array, double, [pos]} Writes a double, interpreted as a 64 bit double, starting from the given position, or wherever the" + " marker is currently at by default."; } } @api public static class ba_put_bytes extends ba_put { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray dest = getBA(args, t); CByteArray src = Static.getByteArray(args[1], t); Integer pos = set_getPos(args, t); dest.putBytes(src, pos); return new CVoid(t); } @Override public String docs() { return "void {destination_byte_array, source_byte_array, [pos]} Writes the contents of the source_byte_array into this byte array," + " starting at pos, or wherever the marker is currently at by default."; } } @api public static class ba_put_string extends ba_put { @Override public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); String s = args[1].val(); Integer pos = set_getPos(args, t); String encoding = null; if(args.length == 3){ encoding = args[2].nval(); } try{ ba.writeUTF8String(s, pos, encoding); } catch(IndexOutOfBoundsException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(UnsupportedEncodingException e){ throw new Exceptions.FormatException(e.getMessage(), t); } return new CVoid(t); } @Override public String docs() { return "void {byte_array, string, [pos], [encoding]} Writes the length of the string to the byte array, as a short, (interpreted as UTF-8)," + " then writes the UTF-8 string itself. If an external application requires the string to be serialized" + " in a different manner, then use the string-byte_array conversion methods in StringHandling, however" + " strings written in this manner are compatible with ba_get_string. The encoding may be set, but defaults to UTF-8."; } } private static CByteArray getBA(Construct [] args, Target t){ return Static.getByteArray(args[0], t); } private static Integer get_getPos(Construct [] args, Target t){ if(args.length == 2){ return Static.getInt32(args[1], t); } else { return null; } } private static Integer set_getPos(Construct [] args, Target t){ if(args.length == 3){ return Static.getInt32(args[2], t); } else { return null; } } private static abstract class ba extends AbstractFunction { @Override public ExceptionType[] thrown() { return new ExceptionType[]{ExceptionType.CastException}; } @Override public Version since() { return CHVersion.V3_3_1; } @Override public String getName() { return getClass().getSimpleName(); } @Override public Boolean runAsync() { return null; } @Override public boolean isRestricted() { return false; } } public static abstract class ba_put extends ba { @Override public Integer[] numArgs() { return new Integer[]{2, 3}; } } public static abstract class ba_get extends ba { @Override public ExceptionType[] thrown() { return new ExceptionType[]{ExceptionType.CastException, ExceptionType.RangeException}; } @Override public Integer[] numArgs() { return new Integer[]{1, 2}; } } }
true
true
public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); Integer pos = get_getPos(args, t); String encoding = null; if(args.length == 3){ encoding = args[2].nval(); } try{ return new CString(ba.readUTF8String(pos, encoding), t); } catch(UnsupportedEncodingException e){ throw new Exceptions.FormatException(e.getMessage(), t); } catch(IndexOutOfBoundsException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(BufferUnderflowException e){ throw new Exceptions.RangeException(e.getMessage(), t); } }
public Construct exec(Target t, Environment environment, Construct... args) throws ConfigRuntimeException { CByteArray ba = getBA(args, t); Integer pos = get_getPos(args, t); String encoding = null; if(args.length == 3){ encoding = args[2].nval(); } try{ return new CString(ba.readUTF8String(pos, encoding), t); } catch(UnsupportedEncodingException e){ throw new Exceptions.FormatException(e.getMessage(), t); } catch(IndexOutOfBoundsException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(BufferUnderflowException e){ throw new Exceptions.RangeException(e.getMessage(), t); } catch(NegativeArraySizeException e){ throw new Exceptions.FormatException("Invalid data", t); } }
diff --git a/MPDroid/src/com/namelessdev/mpdroid/fragments/NowPlayingSmallFragment.java b/MPDroid/src/com/namelessdev/mpdroid/fragments/NowPlayingSmallFragment.java index fc4b91c1..fede985f 100644 --- a/MPDroid/src/com/namelessdev/mpdroid/fragments/NowPlayingSmallFragment.java +++ b/MPDroid/src/com/namelessdev/mpdroid/fragments/NowPlayingSmallFragment.java @@ -1,334 +1,334 @@ package com.namelessdev.mpdroid.fragments; import android.app.Activity; import android.content.SharedPreferences; import android.graphics.Bitmap; import android.graphics.drawable.Drawable; import android.os.AsyncTask; import android.os.Bundle; import android.preference.PreferenceManager; import android.support.v4.app.Fragment; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.View.OnClickListener; import android.view.ViewGroup; import android.view.ViewTreeObserver; import android.widget.ImageButton; import android.widget.ImageView; import android.widget.ProgressBar; import android.widget.TextView; import com.namelessdev.mpdroid.MPDApplication; import com.namelessdev.mpdroid.R; import com.namelessdev.mpdroid.cover.CoverBitmapDrawable; import com.namelessdev.mpdroid.helpers.AlbumCoverDownloadListener; import com.namelessdev.mpdroid.helpers.CoverAsyncHelper; import com.namelessdev.mpdroid.helpers.CoverInfo; import org.a0z.mpd.AlbumInfo; import org.a0z.mpd.MPD; import org.a0z.mpd.MPDStatus; import org.a0z.mpd.Music; import org.a0z.mpd.event.StatusChangeListener; import org.a0z.mpd.exception.MPDServerException; public class NowPlayingSmallFragment extends Fragment implements StatusChangeListener { private MPDApplication app; private CoverAsyncHelper coverHelper; private TextView songTitle; private TextView songArtist; private AlbumCoverDownloadListener coverArtListener; private ImageView coverArt; private ProgressBar coverArtProgress; private ImageButton buttonPrev; private ImageButton buttonPlayPause; private ImageButton buttonNext; private String lastArtist = ""; private String lastAlbum = ""; private boolean showAlbumArtist; @Override public void onAttach(Activity activity) { super.onAttach(activity); app = (MPDApplication) activity.getApplication(); } @Override public void onStart() { super.onStart(); app.oMPDAsyncHelper.addStatusChangeListener(this); new updateTrackInfoAsync().execute((MPDStatus[]) null); } @Override public void onStop() { app.oMPDAsyncHelper.removeStatusChangeListener(this); super.onStop(); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { final View view = inflater.inflate(R.layout.now_playing_small_fragment, container, false); songTitle = (TextView) view.findViewById(R.id.song_title); songTitle.setSelected(true); songArtist = (TextView) view.findViewById(R.id.song_artist); songArtist.setSelected(true); buttonPrev = (ImageButton) view.findViewById(R.id.prev); buttonPlayPause = (ImageButton) view.findViewById(R.id.playpause); buttonNext = (ImageButton) view.findViewById(R.id.next); buttonPrev.setOnClickListener(buttonClickListener); buttonPlayPause.setOnClickListener(buttonClickListener); buttonNext.setOnClickListener(buttonClickListener); coverArt = (ImageView) view.findViewById(R.id.albumCover); coverArtProgress = (ProgressBar) view.findViewById(R.id.albumCoverProgress); coverArtListener = new AlbumCoverDownloadListener(getActivity(), coverArt, coverArtProgress, app.isLightThemeSelected(), false); SharedPreferences settings = PreferenceManager.getDefaultSharedPreferences(getActivity()); showAlbumArtist = settings.getBoolean("showAlbumArtist", true); coverHelper = new CoverAsyncHelper(app, settings); coverHelper.setCoverMaxSizeFromScreen(getActivity()); final ViewTreeObserver vto = coverArt.getViewTreeObserver(); vto.addOnPreDrawListener(new ViewTreeObserver.OnPreDrawListener() { public boolean onPreDraw() { if (coverHelper != null) coverHelper.setCachedCoverMaxSize(coverArt.getMeasuredHeight()); return true; } }); coverHelper.addCoverDownloadListener(coverArtListener); return view; } @Override public void onDestroyView() { if (coverArt != null) { final Drawable oldDrawable = coverArt.getDrawable(); coverArt.setImageResource(R.drawable.no_cover_art); if (oldDrawable != null && oldDrawable instanceof CoverBitmapDrawable) { final Bitmap oldBitmap = ((CoverBitmapDrawable) oldDrawable).getBitmap(); if (oldBitmap != null) oldBitmap.recycle(); } } super.onDestroyView(); } final OnClickListener buttonClickListener = new OnClickListener() { @Override public void onClick(View v) { switch (v.getId()) { case R.id.prev: new Thread(new Runnable() { @Override public void run() { try { app.oMPDAsyncHelper.oMPD.previous(); } catch (MPDServerException e) { Log.w(MPDApplication.TAG, e.getMessage()); } } }).start(); break; case R.id.playpause: new Thread(new Runnable() { @Override public void run() { try { final MPD mpd = app.oMPDAsyncHelper.oMPD; final String state = mpd.getStatus().getState(); if (state.equals(MPDStatus.MPD_STATE_PLAYING) || state.equals(MPDStatus.MPD_STATE_PAUSED)) { mpd.pause(); } else { mpd.play(); } } catch (MPDServerException e) { Log.w(MPDApplication.TAG, e.getMessage()); } } }).start(); break; case R.id.next: new Thread(new Runnable() { @Override public void run() { try { app.oMPDAsyncHelper.oMPD.next(); } catch (MPDServerException e) { Log.w(MPDApplication.TAG, e.getMessage()); } } }).start(); break; } } }; @Override public void trackChanged(MPDStatus mpdStatus, int oldTrack) { new updateTrackInfoAsync().execute(mpdStatus); } @Override public void playlistChanged(MPDStatus mpdStatus, int oldPlaylistVersion) { if (isDetached()) return; // If the playlist changed but not the song position in the playlist // We end up being desynced. Update the current song. new updateTrackInfoAsync().execute((MPDStatus[]) null); } @Override public void stateChanged(MPDStatus status, String oldState) { if (isDetached()) return; app.getApplicationState().currentMpdStatus = status; if (status.getState() != null && buttonPlayPause != null) { if (status.getState().equals(MPDStatus.MPD_STATE_PLAYING)) { buttonPlayPause.setImageDrawable(getResources().getDrawable(R.drawable.ic_media_pause)); } else { buttonPlayPause.setImageDrawable(getResources().getDrawable(R.drawable.ic_media_play)); } } } @Override public void connectionStateChanged(boolean connected, boolean connectionLost) { if (isDetached() || songTitle == null || songArtist == null) return; connected = ((MPDApplication) getActivity().getApplication()).oMPDAsyncHelper.oMPD.isConnected(); if (connected) { songTitle.setText(getResources().getString(R.string.noSongInfo)); songArtist.setText(""); } else { songTitle.setText(getResources().getString(R.string.notConnected)); songArtist.setText(""); } return; } public class updateTrackInfoAsync extends AsyncTask<MPDStatus, Void, Boolean> { Music actSong = null; MPDStatus status = null; @Override protected Boolean doInBackground(MPDStatus... params) { if (params == null) { try { // A recursive call doesn't seem that bad here. return doInBackground(app.oMPDAsyncHelper.oMPD.getStatus()); } catch (MPDServerException e) { e.printStackTrace(); } return false; } if (params[0] != null) { String state = params[0].getState(); if (state != null) { int songPos = params[0].getSongPos(); if (songPos >= 0) { actSong = app.oMPDAsyncHelper.oMPD.getPlaylist().getByIndex(songPos); status = params[0]; return true; } } } return false; } @Override protected void onPostExecute(Boolean result) { if (result != null && result) { String albumartist = null; String artist = null; String artistlabel = null; String title = null; String album = null; boolean noSong = actSong == null || status.getPlaylistLength() == 0; if (noSong) { title = getResources().getString(R.string.noSongInfo); } else { if (actSong.isStream()) { if (actSong.haveTitle()) { title = actSong.getTitle(); artist = actSong.getName(); } else { title = actSong.getName(); artist = ""; } } else { albumartist = actSong.getAlbumArtist(); artist = actSong.getArtist(); if (!showAlbumArtist || albumartist == null || "".equals(albumartist) || artist.toLowerCase().contains(albumartist.toLowerCase())) artistlabel = ""+artist; else if ("".equals(artist)) artistlabel = "" + albumartist; else { artistlabel = albumartist + " / " + artist; artist = albumartist; } title = actSong.getTitle(); album = actSong.getAlbum(); } } artist = artist == null ? "" : artist; title = title == null ? "" : title; album = album == null ? "" : album; - artistlabel = artistlabel.equals("null") ? "" : artistlabel; + artistlabel = artistlabel == null || artistlabel.equals("null") ? "" : artistlabel; songArtist.setText(artistlabel); songTitle.setText(title); if (noSong || actSong.isStream()) { lastArtist = artist; lastAlbum = album; coverArtListener.onCoverNotFound(new CoverInfo(artist, album)); } else if (!lastAlbum.equals(album) || !lastArtist.equals(artist)) { coverHelper.downloadCover(actSong.getAlbumInfo()); lastArtist = artist; lastAlbum = album; } stateChanged(status, null); } else { songArtist.setText(""); songTitle.setText(R.string.noSongInfo); } } } /** * ************************** * Stuff we don't care about * * ************************** */ @Override public void volumeChanged(MPDStatus mpdStatus, int oldVolume) { } @Override public void repeatChanged(boolean repeating) { } @Override public void randomChanged(boolean random) { } @Override public void libraryStateChanged(boolean updating) { } public void updateCover(AlbumInfo albumInfo) { if (coverArt != null && null != coverArt.getTag() && coverArt.getTag().equals(albumInfo.getKey())) { coverHelper.downloadCover(albumInfo); } } }
true
true
protected void onPostExecute(Boolean result) { if (result != null && result) { String albumartist = null; String artist = null; String artistlabel = null; String title = null; String album = null; boolean noSong = actSong == null || status.getPlaylistLength() == 0; if (noSong) { title = getResources().getString(R.string.noSongInfo); } else { if (actSong.isStream()) { if (actSong.haveTitle()) { title = actSong.getTitle(); artist = actSong.getName(); } else { title = actSong.getName(); artist = ""; } } else { albumartist = actSong.getAlbumArtist(); artist = actSong.getArtist(); if (!showAlbumArtist || albumartist == null || "".equals(albumartist) || artist.toLowerCase().contains(albumartist.toLowerCase())) artistlabel = ""+artist; else if ("".equals(artist)) artistlabel = "" + albumartist; else { artistlabel = albumartist + " / " + artist; artist = albumartist; } title = actSong.getTitle(); album = actSong.getAlbum(); } } artist = artist == null ? "" : artist; title = title == null ? "" : title; album = album == null ? "" : album; artistlabel = artistlabel.equals("null") ? "" : artistlabel; songArtist.setText(artistlabel); songTitle.setText(title); if (noSong || actSong.isStream()) { lastArtist = artist; lastAlbum = album; coverArtListener.onCoverNotFound(new CoverInfo(artist, album)); } else if (!lastAlbum.equals(album) || !lastArtist.equals(artist)) { coverHelper.downloadCover(actSong.getAlbumInfo()); lastArtist = artist; lastAlbum = album; } stateChanged(status, null); } else { songArtist.setText(""); songTitle.setText(R.string.noSongInfo); } }
protected void onPostExecute(Boolean result) { if (result != null && result) { String albumartist = null; String artist = null; String artistlabel = null; String title = null; String album = null; boolean noSong = actSong == null || status.getPlaylistLength() == 0; if (noSong) { title = getResources().getString(R.string.noSongInfo); } else { if (actSong.isStream()) { if (actSong.haveTitle()) { title = actSong.getTitle(); artist = actSong.getName(); } else { title = actSong.getName(); artist = ""; } } else { albumartist = actSong.getAlbumArtist(); artist = actSong.getArtist(); if (!showAlbumArtist || albumartist == null || "".equals(albumartist) || artist.toLowerCase().contains(albumartist.toLowerCase())) artistlabel = ""+artist; else if ("".equals(artist)) artistlabel = "" + albumartist; else { artistlabel = albumartist + " / " + artist; artist = albumartist; } title = actSong.getTitle(); album = actSong.getAlbum(); } } artist = artist == null ? "" : artist; title = title == null ? "" : title; album = album == null ? "" : album; artistlabel = artistlabel == null || artistlabel.equals("null") ? "" : artistlabel; songArtist.setText(artistlabel); songTitle.setText(title); if (noSong || actSong.isStream()) { lastArtist = artist; lastAlbum = album; coverArtListener.onCoverNotFound(new CoverInfo(artist, album)); } else if (!lastAlbum.equals(album) || !lastArtist.equals(artist)) { coverHelper.downloadCover(actSong.getAlbumInfo()); lastArtist = artist; lastAlbum = album; } stateChanged(status, null); } else { songArtist.setText(""); songTitle.setText(R.string.noSongInfo); } }
diff --git a/tools/layoutlib/bridge/src/android/view/LayoutInflater_Delegate.java b/tools/layoutlib/bridge/src/android/view/LayoutInflater_Delegate.java index 2dbca92a..b7cfe41d 100644 --- a/tools/layoutlib/bridge/src/android/view/LayoutInflater_Delegate.java +++ b/tools/layoutlib/bridge/src/android/view/LayoutInflater_Delegate.java @@ -1,179 +1,179 @@ /* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package android.view; import com.android.layoutlib.bridge.android.BridgeInflater; import com.android.tools.layoutlib.annotations.LayoutlibDelegate; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserException; import android.content.res.TypedArray; import android.content.res.XmlResourceParser; import android.util.AttributeSet; import android.util.Xml; import java.io.IOException; /** * Delegate used to provide new implementation of a select few methods of {@link LayoutInflater} * * Through the layoutlib_create tool, the original methods of LayoutInflater have been replaced * by calls to methods of the same name in this delegate class. * * Generally we don't want to copy-paste a huge method like that just for one small features, * but because this is done after this platform is final and the next version (Honeycomb) has a * better mechanism (or slightly less copy-paste), maintenance of this duplicated code is not * a problem. * */ public class LayoutInflater_Delegate { public static boolean sIsInInclude = false; @LayoutlibDelegate /*package*/ static void parseInclude(LayoutInflater thisInflater, XmlPullParser parser, View parent, AttributeSet attrs) throws XmlPullParserException, IOException { int type; if (parent instanceof ViewGroup) { final int layout = attrs.getAttributeResourceValue(null, "layout", 0); if (layout == 0) { final String value = attrs.getAttributeValue(null, "layout"); if (value == null) { throw new InflateException("You must specifiy a layout in the" + " include tag: <include layout=\"@layout/layoutID\" />"); } else { throw new InflateException("You must specifiy a valid layout " + "reference. The layout ID " + value + " is not valid."); } } else { final XmlResourceParser childParser = thisInflater.getContext().getResources().getLayout(layout); try { final AttributeSet childAttrs = Xml.asAttributeSet(childParser); while ((type = childParser.next()) != XmlPullParser.START_TAG && type != XmlPullParser.END_DOCUMENT) { // Empty. } if (type != XmlPullParser.START_TAG) { throw new InflateException(childParser.getPositionDescription() + ": No start tag found!"); } final String childName = childParser.getName(); if (LayoutInflater.TAG_MERGE.equals(childName)) { // ---- START MODIFICATIONS ---- if (thisInflater instanceof BridgeInflater) { ((BridgeInflater) thisInflater).setIsInMerge(true); } // ---- END MODIFICATIONS ---- // Inflate all children. - thisInflater.rInflate(childParser, parent, childAttrs, true); + thisInflater.rInflate(childParser, parent, childAttrs, false); // ---- START MODIFICATIONS ---- if (thisInflater instanceof BridgeInflater) { ((BridgeInflater) thisInflater).setIsInMerge(false); } // ---- END MODIFICATIONS ---- } else { final View view = thisInflater.createViewFromTag(childName, childAttrs); final ViewGroup group = (ViewGroup) parent; // We try to load the layout params set in the <include /> tag. If // they don't exist, we will rely on the layout params set in the // included XML file. // During a layoutparams generation, a runtime exception is thrown // if either layout_width or layout_height is missing. We catch // this exception and set localParams accordingly: true means we // successfully loaded layout params from the <include /> tag, // false means we need to rely on the included layout params. ViewGroup.LayoutParams params = null; try { // ---- START CHANGES sIsInInclude = true; // ---- END CHANGES params = group.generateLayoutParams(attrs); } catch (RuntimeException e) { // ---- START CHANGES sIsInInclude = false; // ---- END CHANGES params = group.generateLayoutParams(childAttrs); } finally { // ---- START CHANGES sIsInInclude = false; // ---- END CHANGES if (params != null) { view.setLayoutParams(params); } } // Inflate all children. thisInflater.rInflate(childParser, view, childAttrs, true); // Attempt to override the included layout's android:id with the // one set on the <include /> tag itself. TypedArray a = thisInflater.mContext.obtainStyledAttributes(attrs, com.android.internal.R.styleable.View, 0, 0); int id = a.getResourceId(com.android.internal.R.styleable.View_id, View.NO_ID); // While we're at it, let's try to override android:visibility. int visibility = a.getInt(com.android.internal.R.styleable.View_visibility, -1); a.recycle(); if (id != View.NO_ID) { view.setId(id); } switch (visibility) { case 0: view.setVisibility(View.VISIBLE); break; case 1: view.setVisibility(View.INVISIBLE); break; case 2: view.setVisibility(View.GONE); break; } group.addView(view); } } finally { childParser.close(); } } } else { throw new InflateException("<include /> can only be used inside of a ViewGroup"); } final int currentDepth = parser.getDepth(); while (((type = parser.next()) != XmlPullParser.END_TAG || parser.getDepth() > currentDepth) && type != XmlPullParser.END_DOCUMENT) { // Empty } } }
true
true
/*package*/ static void parseInclude(LayoutInflater thisInflater, XmlPullParser parser, View parent, AttributeSet attrs) throws XmlPullParserException, IOException { int type; if (parent instanceof ViewGroup) { final int layout = attrs.getAttributeResourceValue(null, "layout", 0); if (layout == 0) { final String value = attrs.getAttributeValue(null, "layout"); if (value == null) { throw new InflateException("You must specifiy a layout in the" + " include tag: <include layout=\"@layout/layoutID\" />"); } else { throw new InflateException("You must specifiy a valid layout " + "reference. The layout ID " + value + " is not valid."); } } else { final XmlResourceParser childParser = thisInflater.getContext().getResources().getLayout(layout); try { final AttributeSet childAttrs = Xml.asAttributeSet(childParser); while ((type = childParser.next()) != XmlPullParser.START_TAG && type != XmlPullParser.END_DOCUMENT) { // Empty. } if (type != XmlPullParser.START_TAG) { throw new InflateException(childParser.getPositionDescription() + ": No start tag found!"); } final String childName = childParser.getName(); if (LayoutInflater.TAG_MERGE.equals(childName)) { // ---- START MODIFICATIONS ---- if (thisInflater instanceof BridgeInflater) { ((BridgeInflater) thisInflater).setIsInMerge(true); } // ---- END MODIFICATIONS ---- // Inflate all children. thisInflater.rInflate(childParser, parent, childAttrs, true); // ---- START MODIFICATIONS ---- if (thisInflater instanceof BridgeInflater) { ((BridgeInflater) thisInflater).setIsInMerge(false); } // ---- END MODIFICATIONS ---- } else { final View view = thisInflater.createViewFromTag(childName, childAttrs); final ViewGroup group = (ViewGroup) parent; // We try to load the layout params set in the <include /> tag. If // they don't exist, we will rely on the layout params set in the // included XML file. // During a layoutparams generation, a runtime exception is thrown // if either layout_width or layout_height is missing. We catch // this exception and set localParams accordingly: true means we // successfully loaded layout params from the <include /> tag, // false means we need to rely on the included layout params. ViewGroup.LayoutParams params = null; try { // ---- START CHANGES sIsInInclude = true; // ---- END CHANGES params = group.generateLayoutParams(attrs); } catch (RuntimeException e) { // ---- START CHANGES sIsInInclude = false; // ---- END CHANGES params = group.generateLayoutParams(childAttrs); } finally { // ---- START CHANGES sIsInInclude = false; // ---- END CHANGES if (params != null) { view.setLayoutParams(params); } } // Inflate all children. thisInflater.rInflate(childParser, view, childAttrs, true); // Attempt to override the included layout's android:id with the // one set on the <include /> tag itself. TypedArray a = thisInflater.mContext.obtainStyledAttributes(attrs, com.android.internal.R.styleable.View, 0, 0); int id = a.getResourceId(com.android.internal.R.styleable.View_id, View.NO_ID); // While we're at it, let's try to override android:visibility. int visibility = a.getInt(com.android.internal.R.styleable.View_visibility, -1); a.recycle(); if (id != View.NO_ID) { view.setId(id); } switch (visibility) { case 0: view.setVisibility(View.VISIBLE); break; case 1: view.setVisibility(View.INVISIBLE); break; case 2: view.setVisibility(View.GONE); break; } group.addView(view); } } finally { childParser.close(); } } } else { throw new InflateException("<include /> can only be used inside of a ViewGroup"); } final int currentDepth = parser.getDepth(); while (((type = parser.next()) != XmlPullParser.END_TAG || parser.getDepth() > currentDepth) && type != XmlPullParser.END_DOCUMENT) { // Empty } }
/*package*/ static void parseInclude(LayoutInflater thisInflater, XmlPullParser parser, View parent, AttributeSet attrs) throws XmlPullParserException, IOException { int type; if (parent instanceof ViewGroup) { final int layout = attrs.getAttributeResourceValue(null, "layout", 0); if (layout == 0) { final String value = attrs.getAttributeValue(null, "layout"); if (value == null) { throw new InflateException("You must specifiy a layout in the" + " include tag: <include layout=\"@layout/layoutID\" />"); } else { throw new InflateException("You must specifiy a valid layout " + "reference. The layout ID " + value + " is not valid."); } } else { final XmlResourceParser childParser = thisInflater.getContext().getResources().getLayout(layout); try { final AttributeSet childAttrs = Xml.asAttributeSet(childParser); while ((type = childParser.next()) != XmlPullParser.START_TAG && type != XmlPullParser.END_DOCUMENT) { // Empty. } if (type != XmlPullParser.START_TAG) { throw new InflateException(childParser.getPositionDescription() + ": No start tag found!"); } final String childName = childParser.getName(); if (LayoutInflater.TAG_MERGE.equals(childName)) { // ---- START MODIFICATIONS ---- if (thisInflater instanceof BridgeInflater) { ((BridgeInflater) thisInflater).setIsInMerge(true); } // ---- END MODIFICATIONS ---- // Inflate all children. thisInflater.rInflate(childParser, parent, childAttrs, false); // ---- START MODIFICATIONS ---- if (thisInflater instanceof BridgeInflater) { ((BridgeInflater) thisInflater).setIsInMerge(false); } // ---- END MODIFICATIONS ---- } else { final View view = thisInflater.createViewFromTag(childName, childAttrs); final ViewGroup group = (ViewGroup) parent; // We try to load the layout params set in the <include /> tag. If // they don't exist, we will rely on the layout params set in the // included XML file. // During a layoutparams generation, a runtime exception is thrown // if either layout_width or layout_height is missing. We catch // this exception and set localParams accordingly: true means we // successfully loaded layout params from the <include /> tag, // false means we need to rely on the included layout params. ViewGroup.LayoutParams params = null; try { // ---- START CHANGES sIsInInclude = true; // ---- END CHANGES params = group.generateLayoutParams(attrs); } catch (RuntimeException e) { // ---- START CHANGES sIsInInclude = false; // ---- END CHANGES params = group.generateLayoutParams(childAttrs); } finally { // ---- START CHANGES sIsInInclude = false; // ---- END CHANGES if (params != null) { view.setLayoutParams(params); } } // Inflate all children. thisInflater.rInflate(childParser, view, childAttrs, true); // Attempt to override the included layout's android:id with the // one set on the <include /> tag itself. TypedArray a = thisInflater.mContext.obtainStyledAttributes(attrs, com.android.internal.R.styleable.View, 0, 0); int id = a.getResourceId(com.android.internal.R.styleable.View_id, View.NO_ID); // While we're at it, let's try to override android:visibility. int visibility = a.getInt(com.android.internal.R.styleable.View_visibility, -1); a.recycle(); if (id != View.NO_ID) { view.setId(id); } switch (visibility) { case 0: view.setVisibility(View.VISIBLE); break; case 1: view.setVisibility(View.INVISIBLE); break; case 2: view.setVisibility(View.GONE); break; } group.addView(view); } } finally { childParser.close(); } } } else { throw new InflateException("<include /> can only be used inside of a ViewGroup"); } final int currentDepth = parser.getDepth(); while (((type = parser.next()) != XmlPullParser.END_TAG || parser.getDepth() > currentDepth) && type != XmlPullParser.END_DOCUMENT) { // Empty } }
diff --git a/src/main/java/org/testng/reporters/jq/ChronologicalPanel.java b/src/main/java/org/testng/reporters/jq/ChronologicalPanel.java index 51ba9aa0..af5d4918 100644 --- a/src/main/java/org/testng/reporters/jq/ChronologicalPanel.java +++ b/src/main/java/org/testng/reporters/jq/ChronologicalPanel.java @@ -1,101 +1,101 @@ package org.testng.reporters.jq; import org.testng.IInvokedMethod; import org.testng.ISuite; import org.testng.ITestNGMethod; import org.testng.ITestResult; import org.testng.reporters.XMLStringBuffer; import java.util.Collections; import java.util.Comparator; import java.util.List; public class ChronologicalPanel extends BaseMultiSuitePanel { public ChronologicalPanel(Model model) { super(model); } @Override public String getPrefix() { return "chronological-"; } @Override public String getHeader(ISuite suite) { return "Methods in chronological order"; } @Override public String getContent(ISuite suite, XMLStringBuffer main) { - List<ITestResult> results = getModel().getAllTestResults(suite, false /* configurations too */); - Collections.sort(results, new Comparator<ITestResult>() { + XMLStringBuffer xsb = new XMLStringBuffer(main.getCurrentIndent()); + List<IInvokedMethod> invokedMethods = suite.getAllInvokedMethods(); + Collections.sort(invokedMethods, new Comparator<IInvokedMethod>() { @Override - public int compare(ITestResult arg0, ITestResult arg1) { - return (int) (arg0.getStartMillis() - arg1.getStartMillis()); + public int compare(IInvokedMethod arg0, IInvokedMethod arg1) { + return (int) + (arg0.getTestResult().getStartMillis() - arg1.getTestResult().getStartMillis()); } }); - XMLStringBuffer xsb = new XMLStringBuffer(main.getCurrentIndent()); - List<IInvokedMethod> invokedMethods = suite.getAllInvokedMethods(); String currentClass = ""; long start = 0; for (IInvokedMethod im : invokedMethods) { ITestNGMethod m = im.getTestMethod(); // for (ITestResult tr : results) { // ITestNGMethod m = tr.getMethod(); String cls = "test-method"; if (m.isBeforeSuiteConfiguration()) { cls = "configuration-suite before"; } else if (m.isAfterSuiteConfiguration()) { cls = "configuration-suite after"; } else if (m.isBeforeTestConfiguration()) { cls = "configuration-test before"; } else if (m.isAfterTestConfiguration()) { cls = "configuration-test after"; } else if (m.isBeforeClassConfiguration()) { cls = "configuration-class before"; } else if (m.isAfterClassConfiguration()) { cls = "configuration-class after"; } else if (m.isBeforeMethodConfiguration()) { cls = "configuration-method before"; } else if (m.isAfterMethodConfiguration()) { cls = "configuration-method after"; } ITestResult tr = im.getTestResult(); String methodName = Model.getTestResultName(tr); if (!m.getTestClass().getName().equals(currentClass)) { if (!"".equals(currentClass)) { xsb.pop(D); } xsb.push(D, C, "chronological-class"); xsb.addRequired(D, m.getTestClass().getName(), C, "chronological-class-name"); currentClass = m.getTestClass().getName(); } xsb.push(D, C, cls); if (tr.getStatus() == ITestResult.FAILURE) { xsb.push("img", "src", Model.getImage("failed")); xsb.pop("img"); } // No need to check for skipped methods since by definition, they were never // invoked. xsb.addRequired(S, methodName, C, "method-name"); if (start == 0) { start = tr.getStartMillis(); } xsb.addRequired(S, Long.toString(tr.getStartMillis() - start) + " ms", C, "method-start"); xsb.pop(D); } return xsb.toXML(); } @Override public String getNavigatorLink(ISuite suite) { return "Chronological view"; } }
false
true
public String getContent(ISuite suite, XMLStringBuffer main) { List<ITestResult> results = getModel().getAllTestResults(suite, false /* configurations too */); Collections.sort(results, new Comparator<ITestResult>() { @Override public int compare(ITestResult arg0, ITestResult arg1) { return (int) (arg0.getStartMillis() - arg1.getStartMillis()); } }); XMLStringBuffer xsb = new XMLStringBuffer(main.getCurrentIndent()); List<IInvokedMethod> invokedMethods = suite.getAllInvokedMethods(); String currentClass = ""; long start = 0; for (IInvokedMethod im : invokedMethods) { ITestNGMethod m = im.getTestMethod(); // for (ITestResult tr : results) { // ITestNGMethod m = tr.getMethod(); String cls = "test-method"; if (m.isBeforeSuiteConfiguration()) { cls = "configuration-suite before"; } else if (m.isAfterSuiteConfiguration()) { cls = "configuration-suite after"; } else if (m.isBeforeTestConfiguration()) { cls = "configuration-test before"; } else if (m.isAfterTestConfiguration()) { cls = "configuration-test after"; } else if (m.isBeforeClassConfiguration()) { cls = "configuration-class before"; } else if (m.isAfterClassConfiguration()) { cls = "configuration-class after"; } else if (m.isBeforeMethodConfiguration()) { cls = "configuration-method before"; } else if (m.isAfterMethodConfiguration()) { cls = "configuration-method after"; } ITestResult tr = im.getTestResult(); String methodName = Model.getTestResultName(tr); if (!m.getTestClass().getName().equals(currentClass)) { if (!"".equals(currentClass)) { xsb.pop(D); } xsb.push(D, C, "chronological-class"); xsb.addRequired(D, m.getTestClass().getName(), C, "chronological-class-name"); currentClass = m.getTestClass().getName(); } xsb.push(D, C, cls); if (tr.getStatus() == ITestResult.FAILURE) { xsb.push("img", "src", Model.getImage("failed")); xsb.pop("img"); } // No need to check for skipped methods since by definition, they were never // invoked. xsb.addRequired(S, methodName, C, "method-name"); if (start == 0) { start = tr.getStartMillis(); } xsb.addRequired(S, Long.toString(tr.getStartMillis() - start) + " ms", C, "method-start"); xsb.pop(D); } return xsb.toXML(); }
public String getContent(ISuite suite, XMLStringBuffer main) { XMLStringBuffer xsb = new XMLStringBuffer(main.getCurrentIndent()); List<IInvokedMethod> invokedMethods = suite.getAllInvokedMethods(); Collections.sort(invokedMethods, new Comparator<IInvokedMethod>() { @Override public int compare(IInvokedMethod arg0, IInvokedMethod arg1) { return (int) (arg0.getTestResult().getStartMillis() - arg1.getTestResult().getStartMillis()); } }); String currentClass = ""; long start = 0; for (IInvokedMethod im : invokedMethods) { ITestNGMethod m = im.getTestMethod(); // for (ITestResult tr : results) { // ITestNGMethod m = tr.getMethod(); String cls = "test-method"; if (m.isBeforeSuiteConfiguration()) { cls = "configuration-suite before"; } else if (m.isAfterSuiteConfiguration()) { cls = "configuration-suite after"; } else if (m.isBeforeTestConfiguration()) { cls = "configuration-test before"; } else if (m.isAfterTestConfiguration()) { cls = "configuration-test after"; } else if (m.isBeforeClassConfiguration()) { cls = "configuration-class before"; } else if (m.isAfterClassConfiguration()) { cls = "configuration-class after"; } else if (m.isBeforeMethodConfiguration()) { cls = "configuration-method before"; } else if (m.isAfterMethodConfiguration()) { cls = "configuration-method after"; } ITestResult tr = im.getTestResult(); String methodName = Model.getTestResultName(tr); if (!m.getTestClass().getName().equals(currentClass)) { if (!"".equals(currentClass)) { xsb.pop(D); } xsb.push(D, C, "chronological-class"); xsb.addRequired(D, m.getTestClass().getName(), C, "chronological-class-name"); currentClass = m.getTestClass().getName(); } xsb.push(D, C, cls); if (tr.getStatus() == ITestResult.FAILURE) { xsb.push("img", "src", Model.getImage("failed")); xsb.pop("img"); } // No need to check for skipped methods since by definition, they were never // invoked. xsb.addRequired(S, methodName, C, "method-name"); if (start == 0) { start = tr.getStartMillis(); } xsb.addRequired(S, Long.toString(tr.getStartMillis() - start) + " ms", C, "method-start"); xsb.pop(D); } return xsb.toXML(); }
diff --git a/src/main/java/org/basex/query/func/FNSeq.java b/src/main/java/org/basex/query/func/FNSeq.java index f1b5e321a..05c1578a3 100644 --- a/src/main/java/org/basex/query/func/FNSeq.java +++ b/src/main/java/org/basex/query/func/FNSeq.java @@ -1,387 +1,387 @@ package org.basex.query.func; import org.basex.data.Data; import org.basex.query.QueryContext; import org.basex.query.QueryException; import org.basex.query.expr.CmpV; import org.basex.query.expr.Expr; import org.basex.query.item.ANode; import static org.basex.query.item.ANode.kind; import org.basex.query.item.DBNode; import org.basex.query.item.Empty; import org.basex.query.item.Item; import org.basex.query.item.Itr; import org.basex.query.item.SeqType; import org.basex.query.item.Type; import org.basex.query.iter.AxisIter; import org.basex.query.iter.Iter; import org.basex.query.iter.ItemCache; import org.basex.query.iter.NodeCache; import org.basex.query.util.ItemSet; import org.basex.util.Array; import org.basex.util.InputInfo; /** * Sequence functions. * * @author BaseX Team 2005-11, BSD License * @author Christian Gruen */ final class FNSeq extends Fun { /** * Constructor. * @param ii input info * @param f function definition * @param e arguments */ protected FNSeq(final InputInfo ii, final FunDef f, final Expr... e) { super(ii, f, e); } @Override public Item item(final QueryContext ctx, final InputInfo ii) throws QueryException { switch(def) { case HEAD: return head(ctx); default: return super.item(ctx, ii); } } @Override public Iter iter(final QueryContext ctx) throws QueryException { switch(def) { case INDEXOF: return indexOf(ctx); case DISTINCT: return distinctValues(ctx); case INSBEF: return insertBefore(ctx); case REVERSE: return reverse(ctx); case REMOVE: return remove(ctx); case SUBSEQ: return subsequence(ctx); case TAIL: return tail(ctx); case OUTERMOST: return most(ctx, true); case INNERMOST: return most(ctx, false); default: return super.iter(ctx); } } /** * Returns the outermost/innermost nodes of a node sequence, i.e. a node is * only contained, if none of its ancestors/descendants are. * @param ctx query context * @param outer outermost flag * @return outermost/innermost nodes * @throws QueryException exception */ private Iter most(final QueryContext ctx, final boolean outer) throws QueryException { final Iter iter = expr[0].iter(ctx); final NodeCache nc = new NodeCache().random(); for(Item it; (it = iter.next()) != null;) nc.add(checkNode(it)); final int len = (int) nc.size(); // only go further if there are at least two nodes if(len < 2) return nc; // after this, the iterator is sorted and duplicate free if(nc.dbnodes()) { // nodes are sorted, so ancestors always come before their descendants // the first/last node is thus always included in the output final DBNode fst = (DBNode) nc.get(outer ? 0 : len - 1); final Data data = fst.data; final ANode[] nodes = nc.item.clone(); - nc.item[0] = fst; - nc.size(1); - // [LW] improve with NodeCache.binarySearch() if(outer) { // skip the subtree of the last added node - int next = fst.pre + data.size(fst.pre, kind(fst.type)); - for(int i = 1; i < len; i++) { - final DBNode nd = (DBNode) nodes[i]; - if(nd.pre >= next) { - nc.add(nd); - next = nd.pre + data.size(nd.pre, kind(nd.type)); - } + nc.size(0); + final NodeCache src = new NodeCache(nodes, len); + for(int next = 0, p; next < len; next = p < 0 ? -p - 1 : p) { + final DBNode nd = (DBNode) nodes[next]; + final int n = nd.pre + data.size(nd.pre, kind(nd.type)); + p = src.binarySearch(n, next + 1, len - next - 1); + nc.add(nd); } } else { + // [LW] improve with NodeCache.binarySearch() // skip ancestors of the last added node + nc.item[0] = fst; + nc.size(1); int before = fst.pre; for(int i = len - 1; i-- != 0;) { final DBNode nd = (DBNode) nodes[i]; if(nd.pre + data.size(nd.pre, kind(nd.type)) <= before) { nc.add(nd); before = nd.pre; } } // nodes were added in reverse order, correct that Array.reverse(nc.item, 0, (int) nc.size()); } return nc; } // multiple documents and/or constructed fragments final NodeCache out = new NodeCache(new ANode[len], 0); outer: for(int i = 0; i < len; i++) { final ANode nd = nc.item[i]; final AxisIter ax = outer ? nd.anc() : nd.descendant(); for(ANode a; (a = ax.next()) != null;) if(nc.indexOf(a, false) != -1) continue outer; out.add(nc.item[i]); } return out; } @Override public Expr cmp(final QueryContext ctx) { // static typing: // index-of will create integers, insert-before might add new types if(def == FunDef.INDEXOF || def == FunDef.INSBEF) return this; // all other types will return existing types final Type t = expr[0].type().type; SeqType.Occ o = SeqType.Occ.ZM; // head will return at most one item if(def == FunDef.HEAD) o = SeqType.Occ.ZO; // at most one returned item if(def == FunDef.SUBSEQ && expr[0].type().one()) o = SeqType.Occ.ZO; type = SeqType.get(t, o); return this; } /** * Returns the first item in a sequence. * @param ctx query context * @return first item * @throws QueryException query exception */ private Item head(final QueryContext ctx) throws QueryException { final Expr e = expr[0]; return e.type().zeroOrOne() ? e.item(ctx, input) : e.iter(ctx).next(); } /** * Returns all but the first item in a sequence. * @param ctx query context * @return iterator * @throws QueryException query exception */ private Iter tail(final QueryContext ctx) throws QueryException { final Expr e = expr[0]; if(e.type().zeroOrOne()) return Empty.ITER; final Iter ir = e.iter(ctx); if(ir.next() == null) return Empty.ITER; return new Iter() { @Override public Item next() throws QueryException { return ir.next(); } }; } /** * Returns the indexes of an item in a sequence. * @param ctx query context * @return position(s) of item * @throws QueryException query exception */ private Iter indexOf(final QueryContext ctx) throws QueryException { final Item it = checkItem(expr[1], ctx); if(expr.length == 3) checkColl(expr[2], ctx); return new Iter() { final Iter ir = expr[0].iter(ctx); int c; @Override public Item next() throws QueryException { while(true) { final Item i = ir.next(); if(i == null) return null; ++c; if(i.comparable(it) && CmpV.Op.EQ.e(input, i, it)) return Itr.get(c); } } }; } /** * Returns all distinct values of a sequence. * @param ctx query context * @return distinct iterator * @throws QueryException query exception */ private Iter distinctValues(final QueryContext ctx) throws QueryException { if(expr.length == 2) checkColl(expr[1], ctx); return new Iter() { final ItemSet map = new ItemSet(); final Iter ir = expr[0].iter(ctx); @Override public Item next() throws QueryException { while(true) { Item i = ir.next(); if(i == null) return null; ctx.checkStop(); i = atom(i); if(map.index(input, i)) return i; } } }; } /** * Inserts items before the specified position. * @param ctx query context * @return iterator * @throws QueryException query exception */ private Iter insertBefore(final QueryContext ctx) throws QueryException { return new Iter() { final long pos = Math.max(1, checkItr(expr[1], ctx)); final Iter iter = expr[0].iter(ctx); final Iter ins = expr[2].iter(ctx); long p = pos; boolean last; @Override public Item next() throws QueryException { if(last) return p > 0 ? ins.next() : null; final boolean sub = p == 0 || --p == 0; final Item i = (sub ? ins : iter).next(); if(i != null) return i; if(sub) --p; else last = true; return next(); } }; } /** * Removes an item at a specified position in a sequence. * @param ctx query context * @return iterator without Item * @throws QueryException query exception */ private Iter remove(final QueryContext ctx) throws QueryException { return new Iter() { final long pos = checkItr(expr[1], ctx); final Iter iter = expr[0].iter(ctx); long c; @Override public Item next() throws QueryException { return ++c != pos || iter.next() != null ? iter.next() : null; } }; } /** * Creates a subsequence out of a sequence, starting with start and * ending with end. * @param ctx query context * @return subsequence * @throws QueryException query exception */ private Iter subsequence(final QueryContext ctx) throws QueryException { final double ds = checkDbl(expr[1], ctx); if(Double.isNaN(ds)) return Empty.ITER; final long s = StrictMath.round(ds); long l = Long.MAX_VALUE; if(expr.length > 2) { final double dl = checkDbl(expr[2], ctx); if(Double.isNaN(dl)) return Empty.ITER; l = s + StrictMath.round(dl); } final long e = l; final Iter iter = ctx.iter(expr[0]); final long max = iter.size(); return max != -1 ? new Iter() { // directly access specified items long c = Math.max(1, s); long m = Math.min(e, max + 1); @Override public Item next() throws QueryException { return c < m ? iter.get(c++ - 1) : null; } @Override public Item get(final long i) throws QueryException { return iter.get(c + i - 1); } @Override public long size() { return Math.max(0, m - c); } @Override public boolean reset() { c = Math.max(1, s); return true; } } : new Iter() { // run through all items long c; @Override public Item next() throws QueryException { while(true) { final Item i = iter.next(); if(i == null || ++c >= e) return null; if(c >= s) return i; } } }; } /** * Reverses a sequence. * @param ctx query context * @return iterator * @throws QueryException query exception */ private Iter reverse(final QueryContext ctx) throws QueryException { final Iter iter = ctx.iter(expr[0]); // only one item found; no reversion necessary if(iter.size() == 1) return iter; // process any other iterator... return new Iter() { final Iter ir = iter.size() != -1 ? iter : ItemCache.get(iter); final long s = ir.size(); long c = s; @Override public Item next() throws QueryException { return --c >= 0 ? ir.get(c) : null; } @Override public Item get(final long i) throws QueryException { return ir.get(s - i - 1); } @Override public long size() { return s; } @Override public boolean reset() { c = s; return true; } }; } @Override public boolean uses(final Use u) { return u == Use.X30 && (def == FunDef.HEAD || def == FunDef.TAIL) || super.uses(u); } }
false
true
private Iter most(final QueryContext ctx, final boolean outer) throws QueryException { final Iter iter = expr[0].iter(ctx); final NodeCache nc = new NodeCache().random(); for(Item it; (it = iter.next()) != null;) nc.add(checkNode(it)); final int len = (int) nc.size(); // only go further if there are at least two nodes if(len < 2) return nc; // after this, the iterator is sorted and duplicate free if(nc.dbnodes()) { // nodes are sorted, so ancestors always come before their descendants // the first/last node is thus always included in the output final DBNode fst = (DBNode) nc.get(outer ? 0 : len - 1); final Data data = fst.data; final ANode[] nodes = nc.item.clone(); nc.item[0] = fst; nc.size(1); // [LW] improve with NodeCache.binarySearch() if(outer) { // skip the subtree of the last added node int next = fst.pre + data.size(fst.pre, kind(fst.type)); for(int i = 1; i < len; i++) { final DBNode nd = (DBNode) nodes[i]; if(nd.pre >= next) { nc.add(nd); next = nd.pre + data.size(nd.pre, kind(nd.type)); } } } else { // skip ancestors of the last added node int before = fst.pre; for(int i = len - 1; i-- != 0;) { final DBNode nd = (DBNode) nodes[i]; if(nd.pre + data.size(nd.pre, kind(nd.type)) <= before) { nc.add(nd); before = nd.pre; } } // nodes were added in reverse order, correct that Array.reverse(nc.item, 0, (int) nc.size()); } return nc; } // multiple documents and/or constructed fragments final NodeCache out = new NodeCache(new ANode[len], 0); outer: for(int i = 0; i < len; i++) { final ANode nd = nc.item[i]; final AxisIter ax = outer ? nd.anc() : nd.descendant(); for(ANode a; (a = ax.next()) != null;) if(nc.indexOf(a, false) != -1) continue outer; out.add(nc.item[i]); } return out; }
private Iter most(final QueryContext ctx, final boolean outer) throws QueryException { final Iter iter = expr[0].iter(ctx); final NodeCache nc = new NodeCache().random(); for(Item it; (it = iter.next()) != null;) nc.add(checkNode(it)); final int len = (int) nc.size(); // only go further if there are at least two nodes if(len < 2) return nc; // after this, the iterator is sorted and duplicate free if(nc.dbnodes()) { // nodes are sorted, so ancestors always come before their descendants // the first/last node is thus always included in the output final DBNode fst = (DBNode) nc.get(outer ? 0 : len - 1); final Data data = fst.data; final ANode[] nodes = nc.item.clone(); if(outer) { // skip the subtree of the last added node nc.size(0); final NodeCache src = new NodeCache(nodes, len); for(int next = 0, p; next < len; next = p < 0 ? -p - 1 : p) { final DBNode nd = (DBNode) nodes[next]; final int n = nd.pre + data.size(nd.pre, kind(nd.type)); p = src.binarySearch(n, next + 1, len - next - 1); nc.add(nd); } } else { // [LW] improve with NodeCache.binarySearch() // skip ancestors of the last added node nc.item[0] = fst; nc.size(1); int before = fst.pre; for(int i = len - 1; i-- != 0;) { final DBNode nd = (DBNode) nodes[i]; if(nd.pre + data.size(nd.pre, kind(nd.type)) <= before) { nc.add(nd); before = nd.pre; } } // nodes were added in reverse order, correct that Array.reverse(nc.item, 0, (int) nc.size()); } return nc; } // multiple documents and/or constructed fragments final NodeCache out = new NodeCache(new ANode[len], 0); outer: for(int i = 0; i < len; i++) { final ANode nd = nc.item[i]; final AxisIter ax = outer ? nd.anc() : nd.descendant(); for(ANode a; (a = ax.next()) != null;) if(nc.indexOf(a, false) != -1) continue outer; out.add(nc.item[i]); } return out; }
diff --git a/java/engine/org/apache/derby/iapi/types/DataValueFactoryImpl.java b/java/engine/org/apache/derby/iapi/types/DataValueFactoryImpl.java index bd919fe4b..ddc7a0feb 100644 --- a/java/engine/org/apache/derby/iapi/types/DataValueFactoryImpl.java +++ b/java/engine/org/apache/derby/iapi/types/DataValueFactoryImpl.java @@ -1,1229 +1,1227 @@ /* Derby - Class org.apache.derby.iapi.types.DataValueFactoryImpl Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.derby.iapi.types; import org.apache.derby.iapi.types.NumberDataValue; import org.apache.derby.iapi.types.BooleanDataValue; import org.apache.derby.iapi.types.BitDataValue; import org.apache.derby.iapi.types.DateTimeDataValue; import org.apache.derby.iapi.types.StringDataValue; import org.apache.derby.iapi.types.UserDataValue; import org.apache.derby.iapi.types.RefDataValue; import org.apache.derby.iapi.types.DataValueFactory; import org.apache.derby.iapi.types.DataValueDescriptor; import org.apache.derby.iapi.types.RowLocation; import org.apache.derby.iapi.error.StandardException; import org.apache.derby.iapi.services.sanity.SanityManager; import org.apache.derby.iapi.services.i18n.LocaleFinder; import org.apache.derby.iapi.services.io.FormatableInstanceGetter; import org.apache.derby.iapi.services.io.FormatIdUtil; import org.apache.derby.iapi.services.io.RegisteredFormatIds; import org.apache.derby.iapi.services.io.StoredFormatIds; import org.apache.derby.iapi.services.monitor.ModuleControl; import org.apache.derby.iapi.services.loader.ClassInfo; import org.apache.derby.iapi.services.loader.InstanceGetter; import org.apache.derby.iapi.reference.SQLState; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; import java.text.Collator; import java.text.RuleBasedCollator; import java.util.Properties; import java.util.Locale; import org.apache.derby.iapi.db.DatabaseContext; import org.apache.derby.iapi.services.context.ContextService; /** * Core implementation of DataValueFactory. Does not implement * methods required to generate DataValueDescriptor implementations * for the DECIMAL datatype. J2ME and J2SE require different implementations. * * @see DataValueFactory */ abstract class DataValueFactoryImpl implements DataValueFactory, ModuleControl { LocaleFinder localeFinder; //BasicDatabase first boots DVF in it's boot method and then sets //this databaseLocale in DVF. private Locale databaseLocale; //Following Collator object will be initialized using databaseLocale. private RuleBasedCollator collatorForCharacterTypes; /** * For performance purposes, cache InstanceGetters for various formatid * as we get them in getInstanceUsingFormatIdAndCollationType method. */ private InstanceGetter[] instanceGettersForFormatIds; DataValueFactoryImpl() { } /* ** ModuleControl methods. */ /* (non-Javadoc) * @see org.apache.derby.iapi.services.monitor.ModuleControl#boot(boolean, java.util.Properties) */ public void boot(boolean create, Properties properties) throws StandardException { DataValueDescriptor decimalImplementation = getNullDecimal(null); TypeId.decimalImplementation = decimalImplementation; RegisteredFormatIds.TwoByte[StoredFormatIds.SQL_DECIMAL_ID] = decimalImplementation.getClass().getName(); // Generate a DECIMAL value represetentation of 0 decimalImplementation = decimalImplementation.getNewNull(); decimalImplementation.setValue(0L); NumberDataType.ZERO_DECIMAL = decimalImplementation; } /* (non-Javadoc) * @see org.apache.derby.iapi.services.monitor.ModuleControl#stop() */ public void stop() { } /** * @see DataValueFactory#getDataValue * */ public NumberDataValue getDataValue(int value) { return new SQLInteger(value); } public NumberDataValue getDataValue(int value, NumberDataValue previous) throws StandardException { if (previous == null) return new SQLInteger(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(Integer value) { if (value != null) return new SQLInteger(value.intValue()); else return new SQLInteger(); } public NumberDataValue getDataValue(Integer value, NumberDataValue previous) throws StandardException { if (previous == null) { return getDataValue(value); } previous.setValue(value); return previous; } public NumberDataValue getDataValue(char value) { return new SQLInteger(value); } public NumberDataValue getDataValue(char value, NumberDataValue previous) throws StandardException { if (previous == null) return new SQLInteger(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(short value) { return new SQLSmallint(value); } public NumberDataValue getDataValue(short value, NumberDataValue previous) throws StandardException { if (previous == null) return new SQLSmallint(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(Short value) { if (value != null) return new SQLSmallint(value.shortValue()); else return new SQLSmallint(); } public NumberDataValue getDataValue(Short value, NumberDataValue previous) throws StandardException { if (previous == null) return getDataValue(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(byte value) { return new SQLTinyint(value); } public NumberDataValue getDataValue(byte value, NumberDataValue previous) throws StandardException { if (previous == null) return new SQLTinyint(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(Byte value) { if (value != null) return new SQLTinyint(value.byteValue()); else return new SQLTinyint(); } public NumberDataValue getDataValue(Byte value, NumberDataValue previous) throws StandardException { if (previous == null) return getDataValue(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(long value) { return new SQLLongint(value); } public NumberDataValue getDataValue(long value, NumberDataValue previous) throws StandardException { if (previous == null) return new SQLLongint(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(Long value) { if (value != null) return new SQLLongint(value.longValue()); else return new SQLLongint(); } public NumberDataValue getDataValue(Long value, NumberDataValue previous) throws StandardException { if (previous == null) return getDataValue(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(float value) throws StandardException { return new SQLReal(value); } public NumberDataValue getDataValue(float value, NumberDataValue previous) throws StandardException { if (previous == null) return new SQLReal(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(Float value) throws StandardException { if (value != null) return new SQLReal(value.floatValue()); else return new SQLReal(); } public NumberDataValue getDataValue(Float value, NumberDataValue previous) throws StandardException { if (previous == null) return getDataValue(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(double value) throws StandardException { return new SQLDouble(value); } public NumberDataValue getDataValue(double value, NumberDataValue previous) throws StandardException { if (previous == null) return new SQLDouble(value); previous.setValue(value); return previous; } public NumberDataValue getDataValue(Double value) throws StandardException { if (value != null) return new SQLDouble(value.doubleValue()); else return new SQLDouble(); } public NumberDataValue getDataValue(Double value, NumberDataValue previous) throws StandardException { if (previous == null) return getDataValue(value); previous.setValue(value); return previous; } public final NumberDataValue getDecimalDataValue(Number value) throws StandardException { NumberDataValue ndv = getNullDecimal((NumberDataValue) null); ndv.setValue(value); return ndv; } public final NumberDataValue getDecimalDataValue(Number value, NumberDataValue previous) throws StandardException { if (previous == null) return getDecimalDataValue(value); previous.setValue(value); return previous; } public final NumberDataValue getDecimalDataValue(String value, NumberDataValue previous) throws StandardException { if (previous == null) return getDecimalDataValue(value); previous.setValue(value); return previous; } public BooleanDataValue getDataValue(boolean value) { return new SQLBoolean(value); } public BooleanDataValue getDataValue(boolean value, BooleanDataValue previous) throws StandardException { if (previous == null) return new SQLBoolean(value); previous.setValue(value); return previous; } public BooleanDataValue getDataValue(Boolean value) { if (value != null) return new SQLBoolean(value.booleanValue()); else return new SQLBoolean(); } public BooleanDataValue getDataValue(Boolean value, BooleanDataValue previous) throws StandardException { if (previous == null) return getDataValue(value); previous.setValue(value); return previous; } public BooleanDataValue getDataValue(BooleanDataValue value) { if (value != null) return value; else return new SQLBoolean(); } public BitDataValue getBitDataValue(byte[] value) throws StandardException { return new SQLBit(value); } public BitDataValue getBitDataValue(byte[] value, BitDataValue previous) throws StandardException { if (previous == null) return new SQLBit(value); previous.setValue(value); return previous; } public BitDataValue getVarbitDataValue(byte[] value) { return new SQLVarbit(value); } public BitDataValue getVarbitDataValue(byte[] value, BitDataValue previous) throws StandardException { if (previous == null) return new SQLVarbit(value); previous.setValue(value); return previous; } // LONGVARBIT public BitDataValue getLongVarbitDataValue(byte[] value) throws StandardException { return new SQLLongVarbit(value); } public BitDataValue getLongVarbitDataValue(byte[] value, BitDataValue previous) throws StandardException { if (previous == null) return new SQLLongVarbit(value); previous.setValue(value); return previous; } // BLOB public BitDataValue getBlobDataValue(byte[] value) throws StandardException { return new SQLBlob(value); } public BitDataValue getBlobDataValue(byte[] value, BitDataValue previous) throws StandardException { if (previous == null) return new SQLBlob(value); previous.setValue(value); return previous; } // CHAR public StringDataValue getCharDataValue(String value) { return new SQLChar(value); } public StringDataValue getCharDataValue(String value, StringDataValue previous) throws StandardException { if (previous == null) return new SQLChar(value); previous.setValue(value); return previous; } public StringDataValue getVarcharDataValue(String value) { return new SQLVarchar(value); } public StringDataValue getVarcharDataValue(String value, StringDataValue previous) throws StandardException { if (previous == null) return new SQLVarchar(value); previous.setValue(value); return previous; } public StringDataValue getLongvarcharDataValue(String value) { return new SQLLongvarchar(value); } public StringDataValue getClobDataValue(String value) { return new SQLClob(value); } public StringDataValue getLongvarcharDataValue(String value, StringDataValue previous) throws StandardException { if (previous == null) return new SQLLongvarchar(value); previous.setValue(value); return previous; } public StringDataValue getClobDataValue(String value, StringDataValue previous) throws StandardException { if (previous == null) return new SQLClob(value); previous.setValue(value); return previous; } // public StringDataValue getNationalCharDataValue(String value) { return new SQLNationalChar(value, getLocaleFinder()); } public StringDataValue getNationalCharDataValue(String value, StringDataValue previous) throws StandardException { if (previous == null) return new SQLNationalChar(value, getLocaleFinder()); previous.setValue(value); return previous; } public StringDataValue getNationalVarcharDataValue(String value) { return new SQLNationalVarchar(value, getLocaleFinder()); } public StringDataValue getNationalVarcharDataValue(String value, StringDataValue previous) throws StandardException { if (previous == null) return new SQLNationalVarchar(value, getLocaleFinder()); previous.setValue(value); return previous; } public StringDataValue getNationalLongvarcharDataValue(String value) { return new SQLNationalLongvarchar(value, getLocaleFinder()); } public StringDataValue getNationalLongvarcharDataValue(String value, StringDataValue previous) throws StandardException { if (previous == null) return new SQLNationalLongvarchar(value, getLocaleFinder()); previous.setValue(value); return previous; } public StringDataValue getNClobDataValue(String value) { return new SQLNClob(value, getLocaleFinder()); } public StringDataValue getNClobDataValue(String value, StringDataValue previous) throws StandardException { if (previous == null) return new SQLNClob(value, getLocaleFinder()); previous.setValue(value); return previous; } public DateTimeDataValue getDataValue(Date value) throws StandardException { return new SQLDate(value); } public DateTimeDataValue getDataValue(Date value, DateTimeDataValue previous) throws StandardException { if (previous == null) return new SQLDate(value); previous.setValue(value); return previous; } public DateTimeDataValue getDataValue(Time value) throws StandardException { return new SQLTime(value); } public DateTimeDataValue getDataValue(Time value, DateTimeDataValue previous) throws StandardException { if (previous == null) return new SQLTime(value); previous.setValue(value); return previous; } public DateTimeDataValue getDataValue(Timestamp value) throws StandardException { return new SQLTimestamp(value); } public DateTimeDataValue getDataValue(Timestamp value, DateTimeDataValue previous) throws StandardException { if (previous == null) return new SQLTimestamp(value); previous.setValue(value); return previous; } /** * Implement the date SQL function: construct a SQL date from a string, number, or timestamp. * * @param operand Must be a date, a number, or a string convertible to a date. * * @exception StandardException standard error policy */ public DateTimeDataValue getDate( DataValueDescriptor operand) throws StandardException { return SQLDate.computeDateFunction( operand, this); } /** * Implement the timestamp SQL function: construct a SQL timestamp from a string, or timestamp. * * @param operand Must be a timestamp or a string convertible to a timestamp. * * @exception StandardException standard error policy */ public DateTimeDataValue getTimestamp( DataValueDescriptor operand) throws StandardException { return SQLTimestamp.computeTimestampFunction( operand, this); } public DateTimeDataValue getTimestamp( DataValueDescriptor date, DataValueDescriptor time) throws StandardException { return new SQLTimestamp( date, time); } public UserDataValue getDataValue(Object value) { return new UserType(value); } public UserDataValue getDataValue(Object value, UserDataValue previous) { if (previous == null) return new UserType(value); ((UserType) previous).setValue(value); return previous; } public RefDataValue getDataValue(RowLocation value) { return new SQLRef(value); } public RefDataValue getDataValue(RowLocation value, RefDataValue previous) { if (previous == null) return new SQLRef(value); previous.setValue(value); return previous; } public NumberDataValue getNullInteger(NumberDataValue dataValue) { if (dataValue == null) { return new SQLInteger(); } else { dataValue.setToNull(); return dataValue; } } public NumberDataValue getNullShort(NumberDataValue dataValue) { if (dataValue == null) { return new SQLSmallint(); } else { dataValue.setToNull(); return dataValue; } } public NumberDataValue getNullLong(NumberDataValue dataValue) { if (dataValue == null) { return new SQLLongint(); } else { dataValue.setToNull(); return dataValue; } } public NumberDataValue getNullByte(NumberDataValue dataValue) { if (dataValue == null) { return new SQLTinyint(); } else { dataValue.setToNull(); return dataValue; } } public NumberDataValue getNullFloat(NumberDataValue dataValue) { if (dataValue == null) { return new SQLReal(); } else { dataValue.setToNull(); return dataValue; } } public NumberDataValue getNullDouble(NumberDataValue dataValue) { if (dataValue == null) { return new SQLDouble(); } else { dataValue.setToNull(); return dataValue; } } public BooleanDataValue getNullBoolean(BooleanDataValue dataValue) { if (dataValue == null) { return new SQLBoolean(); } else { dataValue.setToNull(); return dataValue; } } public BitDataValue getNullBit(BitDataValue dataValue) throws StandardException { if (dataValue == null) { return getBitDataValue((byte[]) null); } else { dataValue.setToNull(); return dataValue; } } public BitDataValue getNullVarbit(BitDataValue dataValue) throws StandardException { if (dataValue == null) { return getVarbitDataValue((byte[]) null); } else { dataValue.setToNull(); return dataValue; } } // LONGVARBIT public BitDataValue getNullLongVarbit(BitDataValue dataValue) throws StandardException { if (dataValue == null) { return getLongVarbitDataValue((byte[]) null); } else { dataValue.setToNull(); return dataValue; } } /// BLOB public BitDataValue getNullBlob(BitDataValue dataValue) throws StandardException { if (dataValue == null) { return getBlobDataValue((byte[]) null); } else { dataValue.setToNull(); return dataValue; } } // CHAR public StringDataValue getNullChar(StringDataValue dataValue) { if (dataValue == null) { return getCharDataValue((String) null); } else { dataValue.setToNull(); return dataValue; } } public StringDataValue getNullVarchar(StringDataValue dataValue) { if (dataValue == null) { return getVarcharDataValue((String) null); } else { dataValue.setToNull(); return dataValue; } } public StringDataValue getNullLongvarchar(StringDataValue dataValue) { if (dataValue == null) { return getLongvarcharDataValue((String) null); } else { dataValue.setToNull(); return dataValue; } } public StringDataValue getNullClob(StringDataValue dataValue) { if (dataValue == null) { return getClobDataValue((String) null); } else { dataValue.setToNull(); return dataValue; } } public StringDataValue getNullNationalChar(StringDataValue dataValue) { if (dataValue == null) { return getNationalCharDataValue((String) null); } else { dataValue.setToNull(); return dataValue; } } public StringDataValue getNullNationalVarchar(StringDataValue dataValue) { if (dataValue == null) { return getNationalVarcharDataValue((String) null); } else { dataValue.setToNull(); return dataValue; } } public StringDataValue getNullNationalLongvarchar(StringDataValue dataValue) { if (dataValue == null) { return getNationalLongvarcharDataValue((String) null); } else { dataValue.setToNull(); return dataValue; } } public StringDataValue getNullNClob(StringDataValue dataValue) { if (dataValue == null) { return getNClobDataValue((String) null); } else { dataValue.setToNull(); return dataValue; } } public UserDataValue getNullObject(UserDataValue dataValue) { if (dataValue == null) { return getDataValue((Object) null); } else { dataValue.setToNull(); return dataValue; } } public RefDataValue getNullRef(RefDataValue dataValue) { if (dataValue == null) { return getDataValue((RowLocation) null); } else { dataValue.setToNull(); return dataValue; } } public DateTimeDataValue getNullDate(DateTimeDataValue dataValue) { if (dataValue == null) { try { return getDataValue((Date) null); } catch( StandardException se) { if( SanityManager.DEBUG) { SanityManager.THROWASSERT( "Could not get a null date.", se); } return null; } } else { dataValue.setToNull(); return dataValue; } } public DateTimeDataValue getNullTime(DateTimeDataValue dataValue) { if (dataValue == null) { try { return getDataValue((Time) null); } catch( StandardException se) { if( SanityManager.DEBUG) { SanityManager.THROWASSERT( "Could not get a null time.", se); } return null; } } else { dataValue.setToNull(); return dataValue; } } public DateTimeDataValue getNullTimestamp(DateTimeDataValue dataValue) { if (dataValue == null) { try { return getDataValue((Timestamp) null); } catch( StandardException se) { if( SanityManager.DEBUG) { SanityManager.THROWASSERT( "Could not get a null timestamp.", se); } return null; } } else { dataValue.setToNull(); return dataValue; } } public DateTimeDataValue getDateValue( String dateStr, boolean isJdbcEscape) throws StandardException { return new SQLDate( dateStr, isJdbcEscape, getLocaleFinder()); } // end of getDateValue( String dateStr) public DateTimeDataValue getTimeValue( String timeStr, boolean isJdbcEscape) throws StandardException { return new SQLTime( timeStr, isJdbcEscape, getLocaleFinder()); } // end of getTimeValue( String timeStr) public DateTimeDataValue getTimestampValue( String timestampStr, boolean isJdbcEscape) throws StandardException { return new SQLTimestamp( timestampStr, isJdbcEscape, getLocaleFinder()); } // end of getTimestampValue( String timestampStr) /** * getXMLDataValue: * Get a SQL null value with XML type. * @return An XMLDataValue instance corresponding to a * a NULL value. */ public XMLDataValue getXMLDataValue() { return new XML(); } /** * getXMLDataValue: * Get a null XML value. If a non-null XMLDataValue is * received then re-use that instance, otherwise create * a new one. * @param previous An XMLDataValue instance to re-use. * @return An XMLDataValue instance corresponding to a * NULL value. If an XMLDataValue was received, the * returned XMLDataValue is the same instance as the one * received, but the actual data has been set to a * SQL null value. * @exception StandardException Thrown on error */ public XMLDataValue getXMLDataValue(XMLDataValue previous) throws StandardException { return getNullXML(previous); } /** * getNullXML: * Get an XML with a SQL null value. If the supplied value is * null then get a new value, otherwise set it to null and return * that value. * @param dataValue An XMLDataValue instance to re-use. * @return An XMLDataValue instance corresponding to a * NULL value. If an XMLDataValue was received, the * returned XMLDataValue is the same instance as the one * received, but the actual data has been set to null. */ public XMLDataValue getNullXML(XMLDataValue dataValue) { if (dataValue == null) return getXMLDataValue(); else { dataValue.setToNull(); return dataValue; } } /** @see DataValueFactory#setLocale(Locale) */ public void setLocale(Locale localeOfTheDatabase){ databaseLocale = localeOfTheDatabase; collatorForCharacterTypes = (RuleBasedCollator) Collator.getInstance(databaseLocale); } /** @see DataValueFactory#getCharacterCollator(int) */ public RuleBasedCollator getCharacterCollator(int collationType){ if (collationType == StringDataValue.COLLATION_TYPE_UCS_BASIC) return (RuleBasedCollator)null; else return collatorForCharacterTypes; } /** * @see DataValueFactory#getInstanceUsingFormatIdAndCollationType(int, int) */ public Object getInstanceUsingFormatIdAndCollationType( int formatId, int collationType) throws StandardException { String className; int fmtIdPositionInInstanceGetterArray; InstanceGetter instanceGetter; try { fmtIdPositionInInstanceGetterArray = formatId - StoredFormatIds.MIN_TWO_BYTE_FORMAT_ID; //If this is the first time this method is getting called, then //instanceGettersForFormatIds will be null. If so, allocate it. if (instanceGettersForFormatIds == null) { instanceGettersForFormatIds = new InstanceGetter[RegisteredFormatIds.TwoByte.length]; } //Check if we have already called this method for the passed format //id. instanceGetter = instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray]; //If following if is true, then this method has already been called //for the passed format id. We can just use the cached InstanceGetter //from instanceGettersForFormatIds if (instanceGetter != null) { //Get the object from the InstanceGetter Object returnObject = instanceGetter.getNewInstance(); //If we are dealing with default collation, then we have //got the right DVD already. Just return it. if (collationType == StringDataValue.COLLATION_TYPE_UCS_BASIC) return returnObject; //If we are dealing with territory based collation and //the object is of type StringDataValue, then we need to - //create a StringDataValue with territory based collation. + //return a StringDataValue with territory based collation. if (returnObject instanceof StringDataValue) - ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType)); - return returnObject; + return ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType)); } //This is the first time this method has been called for the passed //format id and hence it's InstanceGetter is not in //instanceGettersForFormatIds. Get the InstanceGetter's name for //this format id from RegisteredFormatIds className = RegisteredFormatIds.TwoByte[fmtIdPositionInInstanceGetterArray]; } catch (ArrayIndexOutOfBoundsException aioobe) { className = null; fmtIdPositionInInstanceGetterArray = 0; } catch (Exception ite) { throw StandardException.newException(SQLState.REGISTERED_CLASS_INSTANCE_ERROR, ite, new Integer(formatId), "XX" /*ci.getClassName()*/); } if (className != null) { Throwable t; try { Class clazz = Class.forName(className); // See if the InstanceGetter class for this format id is a //FormatableInstanceGetter if (FormatableInstanceGetter.class.isAssignableFrom(clazz)) { FormatableInstanceGetter tfig = (FormatableInstanceGetter) clazz.newInstance(); tfig.setFormatId(formatId); //Cache this InstanceGetter in instanceGettersForFormatIds instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray] = tfig; //Get the object from the InstanceGetter Object returnObject = tfig.getNewInstance(); //If we are dealing with default collation, then we have //got the right DVD already. Just return it. if (collationType == StringDataValue.COLLATION_TYPE_UCS_BASIC) return returnObject; //If we are dealing with territory based collation and //the object is of type StringDataValue, then we need to - //create a StringDataValue with territory based collation. + //return a StringDataValue with territory based collation. if (returnObject instanceof StringDataValue) - ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType)); - return returnObject; + return ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType)); } //InstanceGetter is not of the type FormatableInstanceGetter instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray] = new ClassInfo(clazz); return instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray].getNewInstance(); } catch (ClassNotFoundException cnfe) { t = cnfe; } catch (IllegalAccessException iae) { t = iae; } catch (InstantiationException ie) { t = ie; } catch (LinkageError le) { t = le; } catch (java.lang.reflect.InvocationTargetException ite) { t = ite; } throw StandardException.newException(SQLState.REGISTERED_CLASS_LINAKGE_ERROR, t, FormatIdUtil.formatIdToString(formatId), className); } throw StandardException.newException(SQLState.REGISTERED_CLASS_NONE, FormatIdUtil.formatIdToString(formatId)); } // RESOLVE: This is here to find the LocaleFinder (i.e. the Database) // on first access. This is necessary because the Monitor can't find // the Database at boot time, because the Database is not done booting. // See LanguageConnectionFactory. private LocaleFinder getLocaleFinder() { if (localeFinder == null) { DatabaseContext dc = (DatabaseContext) ContextService.getContext(DatabaseContext.CONTEXT_ID); if( dc != null) localeFinder = dc.getDatabase(); } return localeFinder; } }
false
true
public Object getInstanceUsingFormatIdAndCollationType( int formatId, int collationType) throws StandardException { String className; int fmtIdPositionInInstanceGetterArray; InstanceGetter instanceGetter; try { fmtIdPositionInInstanceGetterArray = formatId - StoredFormatIds.MIN_TWO_BYTE_FORMAT_ID; //If this is the first time this method is getting called, then //instanceGettersForFormatIds will be null. If so, allocate it. if (instanceGettersForFormatIds == null) { instanceGettersForFormatIds = new InstanceGetter[RegisteredFormatIds.TwoByte.length]; } //Check if we have already called this method for the passed format //id. instanceGetter = instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray]; //If following if is true, then this method has already been called //for the passed format id. We can just use the cached InstanceGetter //from instanceGettersForFormatIds if (instanceGetter != null) { //Get the object from the InstanceGetter Object returnObject = instanceGetter.getNewInstance(); //If we are dealing with default collation, then we have //got the right DVD already. Just return it. if (collationType == StringDataValue.COLLATION_TYPE_UCS_BASIC) return returnObject; //If we are dealing with territory based collation and //the object is of type StringDataValue, then we need to //create a StringDataValue with territory based collation. if (returnObject instanceof StringDataValue) ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType)); return returnObject; } //This is the first time this method has been called for the passed //format id and hence it's InstanceGetter is not in //instanceGettersForFormatIds. Get the InstanceGetter's name for //this format id from RegisteredFormatIds className = RegisteredFormatIds.TwoByte[fmtIdPositionInInstanceGetterArray]; } catch (ArrayIndexOutOfBoundsException aioobe) { className = null; fmtIdPositionInInstanceGetterArray = 0; } catch (Exception ite) { throw StandardException.newException(SQLState.REGISTERED_CLASS_INSTANCE_ERROR, ite, new Integer(formatId), "XX" /*ci.getClassName()*/); } if (className != null) { Throwable t; try { Class clazz = Class.forName(className); // See if the InstanceGetter class for this format id is a //FormatableInstanceGetter if (FormatableInstanceGetter.class.isAssignableFrom(clazz)) { FormatableInstanceGetter tfig = (FormatableInstanceGetter) clazz.newInstance(); tfig.setFormatId(formatId); //Cache this InstanceGetter in instanceGettersForFormatIds instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray] = tfig; //Get the object from the InstanceGetter Object returnObject = tfig.getNewInstance(); //If we are dealing with default collation, then we have //got the right DVD already. Just return it. if (collationType == StringDataValue.COLLATION_TYPE_UCS_BASIC) return returnObject; //If we are dealing with territory based collation and //the object is of type StringDataValue, then we need to //create a StringDataValue with territory based collation. if (returnObject instanceof StringDataValue) ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType)); return returnObject; } //InstanceGetter is not of the type FormatableInstanceGetter instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray] = new ClassInfo(clazz); return instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray].getNewInstance(); } catch (ClassNotFoundException cnfe) { t = cnfe; } catch (IllegalAccessException iae) { t = iae; } catch (InstantiationException ie) { t = ie; } catch (LinkageError le) { t = le; } catch (java.lang.reflect.InvocationTargetException ite) { t = ite; } throw StandardException.newException(SQLState.REGISTERED_CLASS_LINAKGE_ERROR, t, FormatIdUtil.formatIdToString(formatId), className); } throw StandardException.newException(SQLState.REGISTERED_CLASS_NONE, FormatIdUtil.formatIdToString(formatId)); }
public Object getInstanceUsingFormatIdAndCollationType( int formatId, int collationType) throws StandardException { String className; int fmtIdPositionInInstanceGetterArray; InstanceGetter instanceGetter; try { fmtIdPositionInInstanceGetterArray = formatId - StoredFormatIds.MIN_TWO_BYTE_FORMAT_ID; //If this is the first time this method is getting called, then //instanceGettersForFormatIds will be null. If so, allocate it. if (instanceGettersForFormatIds == null) { instanceGettersForFormatIds = new InstanceGetter[RegisteredFormatIds.TwoByte.length]; } //Check if we have already called this method for the passed format //id. instanceGetter = instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray]; //If following if is true, then this method has already been called //for the passed format id. We can just use the cached InstanceGetter //from instanceGettersForFormatIds if (instanceGetter != null) { //Get the object from the InstanceGetter Object returnObject = instanceGetter.getNewInstance(); //If we are dealing with default collation, then we have //got the right DVD already. Just return it. if (collationType == StringDataValue.COLLATION_TYPE_UCS_BASIC) return returnObject; //If we are dealing with territory based collation and //the object is of type StringDataValue, then we need to //return a StringDataValue with territory based collation. if (returnObject instanceof StringDataValue) return ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType)); } //This is the first time this method has been called for the passed //format id and hence it's InstanceGetter is not in //instanceGettersForFormatIds. Get the InstanceGetter's name for //this format id from RegisteredFormatIds className = RegisteredFormatIds.TwoByte[fmtIdPositionInInstanceGetterArray]; } catch (ArrayIndexOutOfBoundsException aioobe) { className = null; fmtIdPositionInInstanceGetterArray = 0; } catch (Exception ite) { throw StandardException.newException(SQLState.REGISTERED_CLASS_INSTANCE_ERROR, ite, new Integer(formatId), "XX" /*ci.getClassName()*/); } if (className != null) { Throwable t; try { Class clazz = Class.forName(className); // See if the InstanceGetter class for this format id is a //FormatableInstanceGetter if (FormatableInstanceGetter.class.isAssignableFrom(clazz)) { FormatableInstanceGetter tfig = (FormatableInstanceGetter) clazz.newInstance(); tfig.setFormatId(formatId); //Cache this InstanceGetter in instanceGettersForFormatIds instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray] = tfig; //Get the object from the InstanceGetter Object returnObject = tfig.getNewInstance(); //If we are dealing with default collation, then we have //got the right DVD already. Just return it. if (collationType == StringDataValue.COLLATION_TYPE_UCS_BASIC) return returnObject; //If we are dealing with territory based collation and //the object is of type StringDataValue, then we need to //return a StringDataValue with territory based collation. if (returnObject instanceof StringDataValue) return ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType)); } //InstanceGetter is not of the type FormatableInstanceGetter instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray] = new ClassInfo(clazz); return instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray].getNewInstance(); } catch (ClassNotFoundException cnfe) { t = cnfe; } catch (IllegalAccessException iae) { t = iae; } catch (InstantiationException ie) { t = ie; } catch (LinkageError le) { t = le; } catch (java.lang.reflect.InvocationTargetException ite) { t = ite; } throw StandardException.newException(SQLState.REGISTERED_CLASS_LINAKGE_ERROR, t, FormatIdUtil.formatIdToString(formatId), className); } throw StandardException.newException(SQLState.REGISTERED_CLASS_NONE, FormatIdUtil.formatIdToString(formatId)); }
diff --git a/gdms/src/main/java/org/gdms/driver/jdbc/GeometryRule.java b/gdms/src/main/java/org/gdms/driver/jdbc/GeometryRule.java index 6d9423978..37a754134 100644 --- a/gdms/src/main/java/org/gdms/driver/jdbc/GeometryRule.java +++ b/gdms/src/main/java/org/gdms/driver/jdbc/GeometryRule.java @@ -1,69 +1,68 @@ /* * OrbisGIS is a GIS application dedicated to scientific spatial simulation. * This cross-platform GIS is developed at French IRSTV institute and is able * to manipulate and create vector and raster spatial information. OrbisGIS * is distributed under GPL 3 license. It is produced by the geo-informatic team of * the IRSTV Institute <http://www.irstv.cnrs.fr/>, CNRS FR 2488: * Erwan BOCHER, scientific researcher, * Thomas LEDUC, scientific researcher, * Fernando GONZALEZ CORTES, computer engineer. * * Copyright (C) 2007 Erwan BOCHER, Fernando GONZALEZ CORTES, Thomas LEDUC * * This file is part of OrbisGIS. * * OrbisGIS is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * OrbisGIS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with OrbisGIS. If not, see <http://www.gnu.org/licenses/>. * * For more information, please consult: * <http://orbisgis.cerma.archi.fr/> * <http://sourcesup.cru.fr/projects/orbisgis/> * * or contact directly: * erwan.bocher _at_ ec-nantes.fr * fergonco _at_ gmail.com * thomas.leduc _at_ cerma.archi.fr */ package org.gdms.driver.jdbc; import org.gdms.data.types.Constraint; import org.gdms.data.types.Type; public class GeometryRule extends AbstractConversionRule { @Override public int[] getValidConstraints() { - return addGlobalConstraints(Constraint.GEOMETRY_TYPE, - Constraint.DIMENSION_3D_GEOMETRY, Constraint.SRID); + return addGlobalConstraints(Constraint.DIMENSION_3D_GEOMETRY, Constraint.SRID); } @Override public int getOutputTypeCode() { return Type.GEOMETRY; } @Override public String getTypeName() { return "geometry"; } @Override public String getSQL(String fieldName, Type fieldType) { return null; } @Override public boolean canApply(Type type) { return type.getTypeCode() == Type.GEOMETRY; } }
true
true
public int[] getValidConstraints() { return addGlobalConstraints(Constraint.GEOMETRY_TYPE, Constraint.DIMENSION_3D_GEOMETRY, Constraint.SRID); }
public int[] getValidConstraints() { return addGlobalConstraints(Constraint.DIMENSION_3D_GEOMETRY, Constraint.SRID); }
diff --git a/src/java/fedora/server/storage/types/DatastreamReferencedContent.java b/src/java/fedora/server/storage/types/DatastreamReferencedContent.java index 32de82f3b..35d150f58 100755 --- a/src/java/fedora/server/storage/types/DatastreamReferencedContent.java +++ b/src/java/fedora/server/storage/types/DatastreamReferencedContent.java @@ -1,66 +1,62 @@ package fedora.server.storage.types; import fedora.server.errors.StreamIOException; import java.io.InputStream; import fedora.common.http.HttpInputStream; import fedora.common.http.WebClient; /** * * <p><b>Title:</b> DatastreamReferencedContent.java</p> * <p><b>Description:</b> Referenced Content.</p> * * @author [email protected] * @version $Id$ */ public class DatastreamReferencedContent extends Datastream { private static WebClient s_http; static { s_http = new WebClient(); } public DatastreamReferencedContent() { } public Datastream copy() { DatastreamReferencedContent ds = new DatastreamReferencedContent(); copy(ds); return ds; } /** * Gets an InputStream to the content of this externally-referenced * datastream. * <p></p> * The DSLocation of this datastream must be non-null before invoking * this method. * <p></p> * If successful, the DSMIME type is automatically set based on the * web server's response header. If the web server doesn't send a * valid Content-type: header, as a last resort, the content-type * is guessed by using a map of common extensions to mime-types. * <p></p> * If the content-length header is present in the response, DSSize * will be set accordingly. */ public InputStream getContentStream() throws StreamIOException { HttpInputStream contentStream = null; try { contentStream = s_http.get(DSLocation, true); DSSize = new Long(contentStream.getResponseHeaderValue("content-length","0")).longValue(); } catch (Throwable th) { - th.printStackTrace(); - throw new StreamIOException("[DatastreamReferencedContent] " - + "returned an error. The underlying error was a " - + th.getClass().getName() + " The message " - + "was \"" + th.getMessage() + "\" . "); + throw new StreamIOException("Error getting content stream", th); } return(contentStream); } }
true
true
public InputStream getContentStream() throws StreamIOException { HttpInputStream contentStream = null; try { contentStream = s_http.get(DSLocation, true); DSSize = new Long(contentStream.getResponseHeaderValue("content-length","0")).longValue(); } catch (Throwable th) { th.printStackTrace(); throw new StreamIOException("[DatastreamReferencedContent] " + "returned an error. The underlying error was a " + th.getClass().getName() + " The message " + "was \"" + th.getMessage() + "\" . "); } return(contentStream); }
public InputStream getContentStream() throws StreamIOException { HttpInputStream contentStream = null; try { contentStream = s_http.get(DSLocation, true); DSSize = new Long(contentStream.getResponseHeaderValue("content-length","0")).longValue(); } catch (Throwable th) { throw new StreamIOException("Error getting content stream", th); } return(contentStream); }
diff --git a/src/org/eclipse/core/internal/refresh/PollingMonitor.java b/src/org/eclipse/core/internal/refresh/PollingMonitor.java index 4e1f5859..5b773b13 100644 --- a/src/org/eclipse/core/internal/refresh/PollingMonitor.java +++ b/src/org/eclipse/core/internal/refresh/PollingMonitor.java @@ -1,213 +1,216 @@ /******************************************************************************* * Copyright (c) 2004, 2006 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM - Initial API and implementation *******************************************************************************/ package org.eclipse.core.internal.refresh; import java.util.ArrayList; import org.eclipse.core.internal.resources.Resource; import org.eclipse.core.internal.utils.Messages; import org.eclipse.core.resources.*; import org.eclipse.core.resources.refresh.IRefreshMonitor; import org.eclipse.core.runtime.*; import org.eclipse.core.runtime.jobs.Job; import org.osgi.framework.Bundle; /** * The <code>PollingMonitor</code> is an <code>IRefreshMonitor</code> that * polls the file system rather than registering natively for call-backs. * * The polling monitor operates in iterations that span multiple invocations * of the job's run method. At the beginning of an iteration, a set of * all resource roots is collected. Each time the job runs, it removes items * from the set and searches for changes for a fixed period of time. * This ensures that the refresh job is broken into very small discrete * operations that do not interrupt the user's main-line activity. * * @since 3.0 */ public class PollingMonitor extends Job implements IRefreshMonitor { /** * The maximum duration of a single polling iteration */ private static final long MAX_DURATION = 250; /** * The amount of time that a changed root should remain * hot. */ private static final long HOT_ROOT_DECAY = 90000; /** * The minimum delay between executions of the polling monitor */ private static final long MIN_FREQUENCY = 4000; /** * The roots of resources which should be polled */ private final ArrayList resourceRoots; /** * The resources remaining to be refreshed in this iteration */ private final ArrayList toRefresh; /** * The root that has most recently been out of sync */ private IResource hotRoot; /** * The time the hot root was last refreshed */ private long hotRootTime; private final RefreshManager refreshManager; /** * True if this job has never been run. False otherwise. */ private boolean firstRun = true; /** * Creates a new polling monitor. */ public PollingMonitor(RefreshManager manager) { super(Messages.refresh_pollJob); this.refreshManager = manager; setPriority(Job.DECORATE); setSystem(true); resourceRoots = new ArrayList(); toRefresh = new ArrayList(); } /** * Add the given root to the list of roots that need to be polled. */ public synchronized void monitor(IResource root) { resourceRoots.add(root); schedule(MIN_FREQUENCY); } /** * Polls the file system under the root containers for changes. */ protected IStatus run(IProgressMonitor monitor) { //sleep until resources plugin has finished starting if (firstRun) { firstRun = false; Bundle bundle = Platform.getBundle(ResourcesPlugin.PI_RESOURCES); long waitStart = System.currentTimeMillis(); while (bundle.getState() == Bundle.STARTING) { try { Thread.sleep(10000); } catch (InterruptedException e) { //ignore } //don't wait forever if ((System.currentTimeMillis() -waitStart) > 90000) break; } } long time = System.currentTimeMillis(); //check to see if we need to start an iteration if (toRefresh.isEmpty()) { beginIteration(); if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "New polling iteration on " + toRefresh.size() + " roots"); //$NON-NLS-1$ //$NON-NLS-2$ } final int oldSize = toRefresh.size(); if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "started polling"); //$NON-NLS-1$ //refresh the hot root if applicable if (time - hotRootTime > HOT_ROOT_DECAY) hotRoot = null; else if (hotRoot != null && !monitor.isCanceled()) poll(hotRoot); //process roots that have not yet been refreshed this iteration final long loopStart = System.currentTimeMillis(); while (!toRefresh.isEmpty()) { if (monitor.isCanceled()) break; poll((IResource) toRefresh.remove(toRefresh.size() - 1)); //stop the iteration if we have exceed maximum duration if (System.currentTimeMillis() - loopStart > MAX_DURATION) break; } time = System.currentTimeMillis() - time; if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "polled " + (oldSize - toRefresh.size()) + " roots in " + time + "ms"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //reschedule automatically - shouldRun will cancel if not needed //make sure it doesn't run more than 5% of the time long delay = Math.max(MIN_FREQUENCY, time * 20); + //back off even more if there are other jobs running + if (!getJobManager().isIdle()) + delay *= 2; if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "rescheduling polling job in: " + delay / 1000 + " seconds"); //$NON-NLS-1$ //$NON-NLS-2$ //don't reschedule the job if the resources plugin has been shut down if (Platform.getBundle(ResourcesPlugin.PI_RESOURCES).getState() == Bundle.ACTIVE) schedule(delay); return Status.OK_STATUS; } /** * Instructs the polling job to do one complete iteration of all workspace roots, and * then discard itself. This is used when * the refresh manager is first turned on if there is a native monitor installed (which * don't handle changes that occurred while the monitor was turned off). */ void runOnce() { synchronized (this) { //add all roots to the refresh list, but not to the real set of roots //this will cause the job to never run again once it has exhausted //the set of roots to refresh IProject[] projects = ResourcesPlugin.getWorkspace().getRoot().getProjects(); for (int i = 0; i < projects.length; i++) toRefresh.add(projects[i]); } schedule(MIN_FREQUENCY); } private void poll(IResource resource) { if (resource.isSynchronized(IResource.DEPTH_INFINITE)) return; //don't refresh links with no local content if (resource.isLinked() && !((Resource)resource).getStore().fetchInfo().exists()) return; //submit refresh request refreshManager.refresh(resource); hotRoot = resource; hotRootTime = System.currentTimeMillis(); if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "new hot root: " + resource); //$NON-NLS-1$ } /* (non-Javadoc) * @see Job#shouldRun */ public boolean shouldRun() { //only run if there is something to refresh return !resourceRoots.isEmpty() || !toRefresh.isEmpty(); } /** * Copies the resources to be polled into the list of resources * to refresh this iteration. This method is synchronized to * guard against concurrent access to the resourceRoots field. */ private synchronized void beginIteration() { toRefresh.addAll(resourceRoots); if (hotRoot != null) toRefresh.remove(hotRoot); } /* * @see org.eclipse.core.resources.refresh.IRefreshMonitor#unmonitor(IContainer) */ public synchronized void unmonitor(IResource resource) { if (resource == null) resourceRoots.clear(); else resourceRoots.remove(resource); if (resourceRoots.isEmpty()) cancel(); } }
true
true
protected IStatus run(IProgressMonitor monitor) { //sleep until resources plugin has finished starting if (firstRun) { firstRun = false; Bundle bundle = Platform.getBundle(ResourcesPlugin.PI_RESOURCES); long waitStart = System.currentTimeMillis(); while (bundle.getState() == Bundle.STARTING) { try { Thread.sleep(10000); } catch (InterruptedException e) { //ignore } //don't wait forever if ((System.currentTimeMillis() -waitStart) > 90000) break; } } long time = System.currentTimeMillis(); //check to see if we need to start an iteration if (toRefresh.isEmpty()) { beginIteration(); if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "New polling iteration on " + toRefresh.size() + " roots"); //$NON-NLS-1$ //$NON-NLS-2$ } final int oldSize = toRefresh.size(); if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "started polling"); //$NON-NLS-1$ //refresh the hot root if applicable if (time - hotRootTime > HOT_ROOT_DECAY) hotRoot = null; else if (hotRoot != null && !monitor.isCanceled()) poll(hotRoot); //process roots that have not yet been refreshed this iteration final long loopStart = System.currentTimeMillis(); while (!toRefresh.isEmpty()) { if (monitor.isCanceled()) break; poll((IResource) toRefresh.remove(toRefresh.size() - 1)); //stop the iteration if we have exceed maximum duration if (System.currentTimeMillis() - loopStart > MAX_DURATION) break; } time = System.currentTimeMillis() - time; if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "polled " + (oldSize - toRefresh.size()) + " roots in " + time + "ms"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //reschedule automatically - shouldRun will cancel if not needed //make sure it doesn't run more than 5% of the time long delay = Math.max(MIN_FREQUENCY, time * 20); if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "rescheduling polling job in: " + delay / 1000 + " seconds"); //$NON-NLS-1$ //$NON-NLS-2$ //don't reschedule the job if the resources plugin has been shut down if (Platform.getBundle(ResourcesPlugin.PI_RESOURCES).getState() == Bundle.ACTIVE) schedule(delay); return Status.OK_STATUS; }
protected IStatus run(IProgressMonitor monitor) { //sleep until resources plugin has finished starting if (firstRun) { firstRun = false; Bundle bundle = Platform.getBundle(ResourcesPlugin.PI_RESOURCES); long waitStart = System.currentTimeMillis(); while (bundle.getState() == Bundle.STARTING) { try { Thread.sleep(10000); } catch (InterruptedException e) { //ignore } //don't wait forever if ((System.currentTimeMillis() -waitStart) > 90000) break; } } long time = System.currentTimeMillis(); //check to see if we need to start an iteration if (toRefresh.isEmpty()) { beginIteration(); if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "New polling iteration on " + toRefresh.size() + " roots"); //$NON-NLS-1$ //$NON-NLS-2$ } final int oldSize = toRefresh.size(); if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "started polling"); //$NON-NLS-1$ //refresh the hot root if applicable if (time - hotRootTime > HOT_ROOT_DECAY) hotRoot = null; else if (hotRoot != null && !monitor.isCanceled()) poll(hotRoot); //process roots that have not yet been refreshed this iteration final long loopStart = System.currentTimeMillis(); while (!toRefresh.isEmpty()) { if (monitor.isCanceled()) break; poll((IResource) toRefresh.remove(toRefresh.size() - 1)); //stop the iteration if we have exceed maximum duration if (System.currentTimeMillis() - loopStart > MAX_DURATION) break; } time = System.currentTimeMillis() - time; if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "polled " + (oldSize - toRefresh.size()) + " roots in " + time + "ms"); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ //reschedule automatically - shouldRun will cancel if not needed //make sure it doesn't run more than 5% of the time long delay = Math.max(MIN_FREQUENCY, time * 20); //back off even more if there are other jobs running if (!getJobManager().isIdle()) delay *= 2; if (RefreshManager.DEBUG) System.out.println(RefreshManager.DEBUG_PREFIX + "rescheduling polling job in: " + delay / 1000 + " seconds"); //$NON-NLS-1$ //$NON-NLS-2$ //don't reschedule the job if the resources plugin has been shut down if (Platform.getBundle(ResourcesPlugin.PI_RESOURCES).getState() == Bundle.ACTIVE) schedule(delay); return Status.OK_STATUS; }
diff --git a/spring-batch-samples/src/test/java/org/springframework/batch/sample/iosample/JdbcPagingRestartIntegrationTests.java b/spring-batch-samples/src/test/java/org/springframework/batch/sample/iosample/JdbcPagingRestartIntegrationTests.java index 11019d9bb..062a90d0b 100644 --- a/spring-batch-samples/src/test/java/org/springframework/batch/sample/iosample/JdbcPagingRestartIntegrationTests.java +++ b/spring-batch-samples/src/test/java/org/springframework/batch/sample/iosample/JdbcPagingRestartIntegrationTests.java @@ -1,84 +1,85 @@ /* * Copyright 2006-2007 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.batch.sample.iosample; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import javax.sql.DataSource; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.batch.core.JobParametersBuilder; import org.springframework.batch.core.StepExecution; import org.springframework.batch.item.ExecutionContext; import org.springframework.batch.item.ItemReader; import org.springframework.batch.item.ItemStream; import org.springframework.batch.sample.domain.trade.CustomerCredit; import org.springframework.batch.test.MetaDataInstanceFactory; import org.springframework.batch.test.StepScopeTestExecutionListener; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.jdbc.core.simple.SimpleJdbcTemplate; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.TestExecutionListeners; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.test.context.support.DependencyInjectionTestExecutionListener; import org.springframework.test.jdbc.SimpleJdbcTestUtils; /** * @author Dave Syer * @since 2.1 */ @RunWith(SpringJUnit4ClassRunner.class) @TestExecutionListeners( { DependencyInjectionTestExecutionListener.class, StepScopeTestExecutionListener.class }) @ContextConfiguration(locations = { "/simple-job-launcher-context.xml", "/jobs/ioSampleJob.xml", "/jobs/iosample/jdbcPaging.xml" }) public class JdbcPagingRestartIntegrationTests { @Autowired private ItemReader<CustomerCredit> reader; private SimpleJdbcTemplate jdbcTemplate; @Autowired public void setDataSource(DataSource dataSource) { jdbcTemplate = new SimpleJdbcTemplate(dataSource); } public StepExecution getStepExecution() { return MetaDataInstanceFactory.createStepExecution(new JobParametersBuilder().addDouble("credit", 10000.) .toJobParameters()); } @Test public void testReader() throws Exception { ExecutionContext executionContext = new ExecutionContext(); int count = SimpleJdbcTestUtils.countRowsInTable(jdbcTemplate, "CUSTOMER")-2; executionContext.putInt("JdbcPagingItemReader.read.count", count); - executionContext.putInt("JdbcPagingItemReader.start.after", 2); + // Assume the primary keys are in order + executionContext.putInt("JdbcPagingItemReader.start.after", count); ((ItemStream)reader).open(executionContext); CustomerCredit item = reader.read(); // System.err.println(item); assertNotNull(item); item = reader.read(); // System.err.println(item); assertNotNull(item); item = reader.read(); // System.err.println(item); assertNull(item); } }
true
true
public void testReader() throws Exception { ExecutionContext executionContext = new ExecutionContext(); int count = SimpleJdbcTestUtils.countRowsInTable(jdbcTemplate, "CUSTOMER")-2; executionContext.putInt("JdbcPagingItemReader.read.count", count); executionContext.putInt("JdbcPagingItemReader.start.after", 2); ((ItemStream)reader).open(executionContext); CustomerCredit item = reader.read(); // System.err.println(item); assertNotNull(item); item = reader.read(); // System.err.println(item); assertNotNull(item); item = reader.read(); // System.err.println(item); assertNull(item); }
public void testReader() throws Exception { ExecutionContext executionContext = new ExecutionContext(); int count = SimpleJdbcTestUtils.countRowsInTable(jdbcTemplate, "CUSTOMER")-2; executionContext.putInt("JdbcPagingItemReader.read.count", count); // Assume the primary keys are in order executionContext.putInt("JdbcPagingItemReader.start.after", count); ((ItemStream)reader).open(executionContext); CustomerCredit item = reader.read(); // System.err.println(item); assertNotNull(item); item = reader.read(); // System.err.println(item); assertNotNull(item); item = reader.read(); // System.err.println(item); assertNull(item); }
diff --git a/src/no/runsafe/eventengine/libraries/PlayerLibrary.java b/src/no/runsafe/eventengine/libraries/PlayerLibrary.java index ea297f0..19c7ae9 100644 --- a/src/no/runsafe/eventengine/libraries/PlayerLibrary.java +++ b/src/no/runsafe/eventengine/libraries/PlayerLibrary.java @@ -1,384 +1,384 @@ package no.runsafe.eventengine.libraries; import no.runsafe.eventengine.events.CustomEvent; import no.runsafe.eventengine.handlers.SeatbeltHandler; import no.runsafe.framework.RunsafePlugin; import no.runsafe.framework.api.ILocation; import no.runsafe.framework.api.IScheduler; import no.runsafe.framework.api.lua.*; import no.runsafe.framework.api.player.IPlayer; import no.runsafe.framework.internal.LegacyMaterial; import no.runsafe.framework.minecraft.Buff; import no.runsafe.framework.minecraft.Item; import no.runsafe.framework.minecraft.inventory.RunsafeInventory; import no.runsafe.framework.minecraft.item.meta.RunsafeMeta; import no.runsafe.framework.minecraft.player.GameMode; import no.runsafe.framework.text.ChatColour; import no.runsafe.worldguardbridge.IRegionControl; import org.bukkit.util.Vector; import org.luaj.vm2.LuaTable; import java.util.ArrayList; import java.util.List; public class PlayerLibrary extends Library { public PlayerLibrary(RunsafePlugin plugin, IScheduler scheduler, IRegionControl regionControl) { super(plugin, "player"); this.scheduler = scheduler; this.regionControl = regionControl; } @Override protected LuaTable getAPI() { LuaTable lib = new LuaTable(); lib.set("kill", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).setHealth(0.0D); } }); lib.set("getLocation", new LocationFunction() { @Override public ILocation run(FunctionParameters parameters) { return parameters.getPlayer(0).getLocation(); } }); lib.set("isDead", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).isDead(); } }); lib.set("sendMessage", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).sendColouredMessage(parameters.getString(1)); } }); lib.set("setHealth", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).setHealth(parameters.getDouble(1)); } }); lib.set("teleportToLocation", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).teleport(parameters.getLocation(1)); } }); lib.set("teleportToLocationRotation", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).teleport(parameters.getLocation(1, true)); } }); lib.set("teleportToPlayer", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).teleport(parameters.getPlayer(1)); } }); lib.set("cloneInventory", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { CloneInventory(parameters.getPlayer(0), parameters.getPlayer(1)); } }); lib.set("sendEvent", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { new CustomEvent(parameters.getPlayer(0), parameters.getString(1)).Fire(); } }); lib.set("clearInventory", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { IPlayer player = parameters.getPlayer(0); player.clearInventory(); player.getEquipment().clear(); } }); lib.set("addItem", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { AddItem(parameters.getPlayer(0), parameters.getString(1), parameters.getInt(2)); } }); lib.set("getPlayerAtLocation", new StringFunction() { @Override public String run(FunctionParameters parameters) { return GetPlayerAtLocation(parameters.getLocation(0)); } }); lib.set("isOnline", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).isOnline(); } }); lib.set("hasPermission", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).hasPermission(parameters.getString(1)); } }); lib.set("addPermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).addPermission(parameters.getString(1)); } }); lib.set("addWorldPermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).addPermission(parameters.getString(1), parameters.getString(2)); } }); lib.set("removePermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).removePermission(parameters.getString(1)); } }); lib.set("removeWorldPermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).removePermission(parameters.getString(1), parameters.getString(2)); } }); lib.set("removePotionEffects", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).removeBuffs(); } }); lib.set("addPotionEffect", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).addBuff(Buff.getFromName(parameters.getString(1)).amplification(parameters.getInt(2)).duration(parameters.getInt(3))); } }); lib.set("closeInventory", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { scheduler.startSyncTask(new Runnable() { @Override public void run() { parameters.getPlayer(0).closeInventory(); } }, 1L); } }); lib.set("setVelocity", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { parameters.getPlayer(0).setVelocity(new Vector( parameters.getDouble(1), parameters.getDouble(2), parameters.getDouble(3) )); } }); lib.set("lockMount", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { SeatbeltHandler.lockPlayer(parameters.getPlayer(0)); } }); lib.set("unlockMount", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { SeatbeltHandler.unlockPlayer(parameters.getPlayer(0)); } }); lib.set("dismount", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).leaveVehicle(); } }); lib.set("isInRegion", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { String checkRegion = parameters.getString(1) + '-' + parameters.getString(2); return regionControl.getApplicableRegions(parameters.getPlayer(0)).contains(checkRegion); } }); lib.set("setMode", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { GameMode mode = GameMode.search(parameters.getString(1)); if (mode != null) mode.apply(parameters.getPlayer(0)); } }); lib.set("removeItem", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { IPlayer player = parameters.getPlayer(0); player.removeItem(Item.get(parameters.getString(1)), parameters.getInt(2)); } }); lib.set("removeItemByName", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { IPlayer player = parameters.getPlayer(0); RunsafeInventory inventory = player.getInventory(); String itemName = parameters.getString(1); List<Integer> removeItems = new ArrayList<Integer>(); int curr = 0; while (curr < inventory.getSize()) { RunsafeMeta item = inventory.getItemInSlot(curr); if (item != null) { String displayName = item.hasDisplayName() ? item.getDisplayName() : ChatColour.Strip(item.getNormalName()); if (itemName.equals(displayName)) removeItems.add(curr); } curr++; } for (int slot : removeItems) inventory.removeItemInSlot(slot); } }); lib.set("hasItem", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { - return parameters.getPlayer(0).hasItem(Item.get(parameters.getString(1)), parameters.getInt(2)); + return parameters.getPlayer(0).hasItemStrict(Item.get(parameters.getString(1)), parameters.getInt(2)); } }); lib.set("hasItemWithName", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { String requiredName = parameters.getString(1); for (RunsafeMeta item : parameters.getPlayer(0).getInventory().getContents()) { String displayName = item.getDisplayName(); if (displayName != null && displayName.equals(requiredName)) return true; } return false; } }); return lib; } private static void CloneInventory(IPlayer source, IPlayer target) { target.getInventory().unserialize(source.getInventory().serialize()); target.updateInventory(); } private static String GetPlayerAtLocation(ILocation location) { for (IPlayer player : location.getWorld().getPlayers()) if (player.getLocation().distance(location) < 2) return player.getName(); return null; } private static void AddItem(IPlayer player,String item, int amount) { RunsafeMeta meta = Item.get(item).getItem(); meta.setAmount(amount); player.give(meta); } private final IScheduler scheduler; private final IRegionControl regionControl; }
true
true
protected LuaTable getAPI() { LuaTable lib = new LuaTable(); lib.set("kill", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).setHealth(0.0D); } }); lib.set("getLocation", new LocationFunction() { @Override public ILocation run(FunctionParameters parameters) { return parameters.getPlayer(0).getLocation(); } }); lib.set("isDead", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).isDead(); } }); lib.set("sendMessage", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).sendColouredMessage(parameters.getString(1)); } }); lib.set("setHealth", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).setHealth(parameters.getDouble(1)); } }); lib.set("teleportToLocation", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).teleport(parameters.getLocation(1)); } }); lib.set("teleportToLocationRotation", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).teleport(parameters.getLocation(1, true)); } }); lib.set("teleportToPlayer", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).teleport(parameters.getPlayer(1)); } }); lib.set("cloneInventory", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { CloneInventory(parameters.getPlayer(0), parameters.getPlayer(1)); } }); lib.set("sendEvent", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { new CustomEvent(parameters.getPlayer(0), parameters.getString(1)).Fire(); } }); lib.set("clearInventory", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { IPlayer player = parameters.getPlayer(0); player.clearInventory(); player.getEquipment().clear(); } }); lib.set("addItem", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { AddItem(parameters.getPlayer(0), parameters.getString(1), parameters.getInt(2)); } }); lib.set("getPlayerAtLocation", new StringFunction() { @Override public String run(FunctionParameters parameters) { return GetPlayerAtLocation(parameters.getLocation(0)); } }); lib.set("isOnline", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).isOnline(); } }); lib.set("hasPermission", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).hasPermission(parameters.getString(1)); } }); lib.set("addPermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).addPermission(parameters.getString(1)); } }); lib.set("addWorldPermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).addPermission(parameters.getString(1), parameters.getString(2)); } }); lib.set("removePermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).removePermission(parameters.getString(1)); } }); lib.set("removeWorldPermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).removePermission(parameters.getString(1), parameters.getString(2)); } }); lib.set("removePotionEffects", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).removeBuffs(); } }); lib.set("addPotionEffect", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).addBuff(Buff.getFromName(parameters.getString(1)).amplification(parameters.getInt(2)).duration(parameters.getInt(3))); } }); lib.set("closeInventory", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { scheduler.startSyncTask(new Runnable() { @Override public void run() { parameters.getPlayer(0).closeInventory(); } }, 1L); } }); lib.set("setVelocity", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { parameters.getPlayer(0).setVelocity(new Vector( parameters.getDouble(1), parameters.getDouble(2), parameters.getDouble(3) )); } }); lib.set("lockMount", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { SeatbeltHandler.lockPlayer(parameters.getPlayer(0)); } }); lib.set("unlockMount", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { SeatbeltHandler.unlockPlayer(parameters.getPlayer(0)); } }); lib.set("dismount", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).leaveVehicle(); } }); lib.set("isInRegion", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { String checkRegion = parameters.getString(1) + '-' + parameters.getString(2); return regionControl.getApplicableRegions(parameters.getPlayer(0)).contains(checkRegion); } }); lib.set("setMode", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { GameMode mode = GameMode.search(parameters.getString(1)); if (mode != null) mode.apply(parameters.getPlayer(0)); } }); lib.set("removeItem", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { IPlayer player = parameters.getPlayer(0); player.removeItem(Item.get(parameters.getString(1)), parameters.getInt(2)); } }); lib.set("removeItemByName", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { IPlayer player = parameters.getPlayer(0); RunsafeInventory inventory = player.getInventory(); String itemName = parameters.getString(1); List<Integer> removeItems = new ArrayList<Integer>(); int curr = 0; while (curr < inventory.getSize()) { RunsafeMeta item = inventory.getItemInSlot(curr); if (item != null) { String displayName = item.hasDisplayName() ? item.getDisplayName() : ChatColour.Strip(item.getNormalName()); if (itemName.equals(displayName)) removeItems.add(curr); } curr++; } for (int slot : removeItems) inventory.removeItemInSlot(slot); } }); lib.set("hasItem", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).hasItem(Item.get(parameters.getString(1)), parameters.getInt(2)); } }); lib.set("hasItemWithName", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { String requiredName = parameters.getString(1); for (RunsafeMeta item : parameters.getPlayer(0).getInventory().getContents()) { String displayName = item.getDisplayName(); if (displayName != null && displayName.equals(requiredName)) return true; } return false; } }); return lib; }
protected LuaTable getAPI() { LuaTable lib = new LuaTable(); lib.set("kill", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).setHealth(0.0D); } }); lib.set("getLocation", new LocationFunction() { @Override public ILocation run(FunctionParameters parameters) { return parameters.getPlayer(0).getLocation(); } }); lib.set("isDead", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).isDead(); } }); lib.set("sendMessage", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).sendColouredMessage(parameters.getString(1)); } }); lib.set("setHealth", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).setHealth(parameters.getDouble(1)); } }); lib.set("teleportToLocation", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).teleport(parameters.getLocation(1)); } }); lib.set("teleportToLocationRotation", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).teleport(parameters.getLocation(1, true)); } }); lib.set("teleportToPlayer", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).teleport(parameters.getPlayer(1)); } }); lib.set("cloneInventory", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { CloneInventory(parameters.getPlayer(0), parameters.getPlayer(1)); } }); lib.set("sendEvent", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { new CustomEvent(parameters.getPlayer(0), parameters.getString(1)).Fire(); } }); lib.set("clearInventory", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { IPlayer player = parameters.getPlayer(0); player.clearInventory(); player.getEquipment().clear(); } }); lib.set("addItem", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { AddItem(parameters.getPlayer(0), parameters.getString(1), parameters.getInt(2)); } }); lib.set("getPlayerAtLocation", new StringFunction() { @Override public String run(FunctionParameters parameters) { return GetPlayerAtLocation(parameters.getLocation(0)); } }); lib.set("isOnline", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).isOnline(); } }); lib.set("hasPermission", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).hasPermission(parameters.getString(1)); } }); lib.set("addPermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).addPermission(parameters.getString(1)); } }); lib.set("addWorldPermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).addPermission(parameters.getString(1), parameters.getString(2)); } }); lib.set("removePermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).removePermission(parameters.getString(1)); } }); lib.set("removeWorldPermission", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).removePermission(parameters.getString(1), parameters.getString(2)); } }); lib.set("removePotionEffects", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).removeBuffs(); } }); lib.set("addPotionEffect", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).addBuff(Buff.getFromName(parameters.getString(1)).amplification(parameters.getInt(2)).duration(parameters.getInt(3))); } }); lib.set("closeInventory", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { scheduler.startSyncTask(new Runnable() { @Override public void run() { parameters.getPlayer(0).closeInventory(); } }, 1L); } }); lib.set("setVelocity", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { parameters.getPlayer(0).setVelocity(new Vector( parameters.getDouble(1), parameters.getDouble(2), parameters.getDouble(3) )); } }); lib.set("lockMount", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { SeatbeltHandler.lockPlayer(parameters.getPlayer(0)); } }); lib.set("unlockMount", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { SeatbeltHandler.unlockPlayer(parameters.getPlayer(0)); } }); lib.set("dismount", new VoidFunction() { @Override protected void run(FunctionParameters parameters) { parameters.getPlayer(0).leaveVehicle(); } }); lib.set("isInRegion", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { String checkRegion = parameters.getString(1) + '-' + parameters.getString(2); return regionControl.getApplicableRegions(parameters.getPlayer(0)).contains(checkRegion); } }); lib.set("setMode", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { GameMode mode = GameMode.search(parameters.getString(1)); if (mode != null) mode.apply(parameters.getPlayer(0)); } }); lib.set("removeItem", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { IPlayer player = parameters.getPlayer(0); player.removeItem(Item.get(parameters.getString(1)), parameters.getInt(2)); } }); lib.set("removeItemByName", new VoidFunction() { @Override protected void run(final FunctionParameters parameters) { IPlayer player = parameters.getPlayer(0); RunsafeInventory inventory = player.getInventory(); String itemName = parameters.getString(1); List<Integer> removeItems = new ArrayList<Integer>(); int curr = 0; while (curr < inventory.getSize()) { RunsafeMeta item = inventory.getItemInSlot(curr); if (item != null) { String displayName = item.hasDisplayName() ? item.getDisplayName() : ChatColour.Strip(item.getNormalName()); if (itemName.equals(displayName)) removeItems.add(curr); } curr++; } for (int slot : removeItems) inventory.removeItemInSlot(slot); } }); lib.set("hasItem", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { return parameters.getPlayer(0).hasItemStrict(Item.get(parameters.getString(1)), parameters.getInt(2)); } }); lib.set("hasItemWithName", new BooleanFunction() { @Override protected boolean run(FunctionParameters parameters) { String requiredName = parameters.getString(1); for (RunsafeMeta item : parameters.getPlayer(0).getInventory().getContents()) { String displayName = item.getDisplayName(); if (displayName != null && displayName.equals(requiredName)) return true; } return false; } }); return lib; }
diff --git a/srcj/com/sun/electric/tool/cvspm/Update.java b/srcj/com/sun/electric/tool/cvspm/Update.java index 739c6f5b1..74724b79c 100644 --- a/srcj/com/sun/electric/tool/cvspm/Update.java +++ b/srcj/com/sun/electric/tool/cvspm/Update.java @@ -1,479 +1,480 @@ /* -*- tab-width: 4 -*- * * Electric(tm) VLSI Design System * * File: Update.java * * Copyright (c) 2003 Sun Microsystems and Static Free Software * * Electric(tm) is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Electric(tm) is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Electric(tm); see the file COPYING. If not, write to * the Free Software Foundation, Inc., 59 Temple Place, Suite 330, * Boston, Mass 02111-1307, USA. */ package com.sun.electric.tool.cvspm; import com.sun.electric.database.hierarchy.Library; import com.sun.electric.database.hierarchy.Cell; import com.sun.electric.tool.Job; import com.sun.electric.tool.io.input.LibraryFiles; import com.sun.electric.tool.io.output.DELIB; import com.sun.electric.tool.user.User; import com.sun.electric.tool.user.ui.TopLevel; import javax.swing.JOptionPane; import java.io.*; import java.util.*; /** * Created by IntelliJ IDEA. * User: gainsley * Date: Mar 13, 2006 * Time: 3:30:40 PM * To change this template use File | Settings | File Templates. */ public class Update { public static final int UPDATE = 0; public static final int STATUS = 1; public static final int ROLLBACK = 2; // ------------------ Update/Status --------------------- /** * Update all libraries. * @param type the type of update to do */ public static void updateProject(int type) { List<Library> allLibs = new ArrayList<Library>(); for (Iterator<Library> it = Library.getLibraries(); it.hasNext(); ) { Library lib = it.next(); if (lib.isHidden()) continue; if (!lib.isFromDisk()) continue; if (lib.getName().equals("spiceparts")) continue; allLibs.add(lib); } update(allLibs, null, type, true); } /** * Update all open libraries. * @param type the type of update to do */ public static void updateOpenLibraries(int type) { List<Library> allLibs = new ArrayList<Library>(); for (Iterator<Library> it = Library.getLibraries(); it.hasNext(); ) { Library lib = it.next(); if (lib.isHidden()) continue; if (!lib.isFromDisk()) continue; if (lib.getName().equals("spiceparts")) continue; allLibs.add(lib); } update(allLibs, null, type, false); } /** * Update all Cells from a library. * @param lib * @param type the type of update to do */ public static void updateLibrary(Library lib, int type) { List<Library> libsToUpdate = new ArrayList<Library>(); libsToUpdate.add(lib); update(libsToUpdate, null, type, false); } /** * Update a Cell. * @param cell * @param type the type of update to do */ public static void updateCell(Cell cell, int type) { List<Cell> cellsToUpdate = new ArrayList<Cell>(); cellsToUpdate.add(cell); update(null, cellsToUpdate, type, false); } /** * Run Update/Status/Rollback on the libraries and cells * @param libs * @param cells * @param type * @param updateProject */ public static void update(List<Library> libs, List<Cell> cells, int type, boolean updateProject) { if (libs == null) libs = new ArrayList<Library>(); if (cells == null) cells = new ArrayList<Cell>(); // make sure cells are part of a DELIB CVSLibrary.LibsCells bad = CVSLibrary.notFromDELIB(cells); if (type == STATUS) { // remove offending cells for (Cell cell : bad.cells) cells.remove(cell); } else if (bad.cells.size() > 0) { CVS.showError("Error: the following Cells are not part of a DELIB library and cannot be acted upon individually", "CVS "+getMessage(type)+" Error", bad.libs, bad.cells); return; } // make sure the selecetd objecs are in cvs bad = CVSLibrary.getNotInCVS(libs, cells); // for STATUS, remove libraries not in cvs, and also set their state unknown if (type == STATUS) { for (Library lib : bad.libs) { libs.remove(lib); CVSLibrary.setState(lib, State.UNKNOWN); } for (Cell cell : bad.cells) { cells.remove(cell); CVSLibrary.setState(cell, State.UNKNOWN); } } else if (bad.libs.size() > 0 || bad.cells.size() > 0) { // if any of them not in cvs, issue error and abort CVS.showError("Error: the following Libraries or Cells are not in CVS", "CVS "+getMessage(type)+" Error", bad.libs, bad.cells); return; } // for update or rollback, make sure they are also not modified if (type == UPDATE || type == ROLLBACK) { bad = CVSLibrary.getModified(libs, cells); if (bad.libs.size() > 0 || bad.cells.size() > 0) { CVS.showError("Error: the following Libraries or Cells must be saved first", "CVS "+getMessage(type)+" Error", bad.libs, bad.cells); return; } } // optimize a little, remove cells from cells list if cell's lib in libs list CVSLibrary.LibsCells good = CVSLibrary.consolidate(libs, cells); (new UpdateJob(good.cells, good.libs, type, updateProject)).startJob(); } private static class UpdateJob extends Job { private List<Cell> cellsToUpdate; private List<Library> librariesToUpdate; private int type; private List<Library> libsToReload; private boolean updateProject; // update whole project private int exitVal; /** * Update cells and/or libraries. * @param cellsToUpdate * @param librariesToUpdate */ private UpdateJob(List<Cell> cellsToUpdate, List<Library> librariesToUpdate, int type, boolean updateProject) { super("CVS Update Library", User.getUserTool(), ((type==STATUS)?Job.Type.EXAMINE:Job.Type.CHANGE), null, null, Job.Priority.USER); this.cellsToUpdate = cellsToUpdate; this.librariesToUpdate = librariesToUpdate; this.type = type; this.updateProject = updateProject; exitVal = -1; if (this.cellsToUpdate == null) this.cellsToUpdate = new ArrayList<Cell>(); if (this.librariesToUpdate == null) this.librariesToUpdate = new ArrayList<Library>(); } public boolean doIt() { String useDir = CVS.getUseDir(librariesToUpdate, cellsToUpdate); StringBuffer libs = CVS.getLibraryFiles(librariesToUpdate, useDir); StringBuffer cells = CVS.getCellFiles(cellsToUpdate, useDir); // disable this for now, since users with older versions // of electric will not commit new lastModified file, // and then users of new electric will not get updated files /* if (!updateProject && (type != ROLLBACK)) { // optimization: for DELIBs, check header first. If that // requires an update, then check cells List<Library> checkedDelibs = new ArrayList<Library>(); StringBuffer lastModifiedFiles = CVS.getDELIBLastModifiedFiles(librariesToUpdate, useDir, checkedDelibs); String arg = lastModifiedFiles.toString().trim(); if (!arg.equals("")) { // remove libs that can be checked, they will be added again if // cvs says the lastModified file has been changed in the repository for (Library lib : checkedDelibs) librariesToUpdate.remove(lib); StatusResult result = update(arg, useDir, type); // check for updated libraries List<Library> delibsToUpdate = new ArrayList<Library>(); delibsToUpdate.addAll(result.getLastModifiedFileLibs(State.UPDATE)); delibsToUpdate.addAll(result.getLastModifiedFileLibs(State.CONFLICT)); librariesToUpdate.addAll(delibsToUpdate); // libs of nondelibs and libs to run update on libs = CVS.getLibraryFiles(librariesToUpdate, useDir); } } */ String updateFiles = libs.toString() + " " + cells.toString(); if (updateFiles.trim().equals("") && !updateProject) { exitVal = 0; + fieldVariableChanged("exitVal"); System.out.println("Nothing to "+getMessage(type)); return true; } if (updateProject && (type == UPDATE || type == STATUS)) updateFiles = ""; StatusResult result = update(updateFiles, useDir, type); commentStatusResult(result, type); exitVal = result.getExitVal(); fieldVariableChanged("exitVal"); if (exitVal != 0) { return true; } // reload libs if needed libsToReload = new ArrayList<Library>(); if (type != STATUS) { for (Cell cell : result.getCells(State.UPDATE)) { Library lib = cell.getLibrary(); if (!libsToReload.contains(lib)) libsToReload.add(lib); } for (Library lib : libsToReload) { LibraryFiles.reloadLibrary(lib); } } if (type == ROLLBACK) { // turn off edit for rolled back cells for (Cell cell : result.getCells(State.UPDATE)) { CVSLibrary.setEditing(cell, false); } } // update states updateStates(result); System.out.println(getMessage(type)+" complete."); fieldVariableChanged("libsToReload"); return true; } public void terminateOK() { if (exitVal != 0) { Job.getUserInterface().showErrorMessage("CVS "+getMessage(type)+ " Failed! Please see messages window","CVS "+getMessage(type)+" Failed!"); return; } CVS.fixStaleCellReferences(libsToReload); } } /** * Update the given file in the given directory. * @param file * @param dir * @return */ private static StatusResult update(String file, String dir, int type) { String command = "-q update -d -P "; String message = "Running CVS Update"; if (type == STATUS) { command = "-nq update -d -P "; message = "Running CVS Status"; } if (type == ROLLBACK) { command = "-q update -C -P "; message = "Rollback from CVS"; } ByteArrayOutputStream out = new ByteArrayOutputStream(); int exitVal = CVS.runCVSCommand(command+file, message, dir, out); LineNumberReader result = new LineNumberReader(new InputStreamReader(new ByteArrayInputStream(out.toByteArray()))); return parseOutput(result, exitVal); } private static String getMessage(int type) { switch(type) { case 0: return "Update"; case 1: return "Status"; case 2: return "Rollback"; } return ""; } private static void updateStates(StatusResult result) { for (Cell cell : result.getCells(State.ADDED)) { CVSLibrary.setState(cell, State.ADDED); } for (Cell cell : result.getCells(State.REMOVED)) { CVSLibrary.setState(cell, State.REMOVED); } for (Cell cell : result.getCells(State.MODIFIED)) { CVSLibrary.setState(cell, State.MODIFIED); } for (Cell cell : result.getCells(State.CONFLICT)) { CVSLibrary.setState(cell, State.CONFLICT); } for (Cell cell : result.getCells(State.UPDATE)) { CVSLibrary.setState(cell, State.UPDATE); } for (Cell cell : result.getCells(State.UNKNOWN)) { CVSLibrary.setState(cell, State.UNKNOWN); } } // -------------------- Rollback ---------------------------- public static void rollback(Cell cell) { int ret = JOptionPane.showConfirmDialog(TopLevel.getCurrentJFrame(), "WARNING! Disk file for Cell "+cell.libDescribe()+" will revert to latest CVS version!\n"+ "All uncommited changes will be lost!!! Continue anyway?", "Rollback Cell", JOptionPane.YES_NO_OPTION, JOptionPane.WARNING_MESSAGE); if (ret == JOptionPane.NO_OPTION) return; updateCell(cell, ROLLBACK); } public static void rollback(Library lib) { int ret = JOptionPane.showConfirmDialog(TopLevel.getCurrentJFrame(), "WARNING! Disk file(s) for Library"+lib.getName()+" will revert to latest CVS version!\n"+ "All uncommited changes will be lost!!! Continue anyway?", "Rollback Library", JOptionPane.YES_NO_OPTION, JOptionPane.WARNING_MESSAGE); if (ret == JOptionPane.NO_OPTION) return; updateLibrary(lib, ROLLBACK); } // ---------------------- Output Parsing ------------------------- /** * Parse the output of an 'cvs -nq update' command, which * checks the status of the given files. * Returns true if all files are up-to-date, false otherwise * @param reader * @return */ private static StatusResult parseOutput(LineNumberReader reader, int exitVal) { StatusResult result = new StatusResult(exitVal); for (;;) { String line; try { line = reader.readLine(); } catch (IOException e) { System.out.println(e.getMessage()); return result; } if (line == null) break; if (line.equals("")) continue; String parts[] = line.split("\\s"); if (parts.length != 2) continue; State state = State.getState(parts[0]); if (state == null) continue; if (state == State.PATCHED) state = State.UPDATE; // find Cell for filename String filename = parts[1]; File file = new File(filename); if (filename.toLowerCase().endsWith(".jelib")) { // jelib library file, set state of all cells String endfile = file.getName(); Library lib = Library.findLibrary(endfile.substring(0, endfile.length()-6)); if (lib == null) continue; CVSLibrary.setState(lib, state); } if (filename.endsWith(DELIB.getLastModifiedFile())) { // delib header file, add delib library File header = new File(filename); File delib = header.getParentFile(); String endfile = delib.getName(); if (endfile.endsWith(".delib")) endfile = endfile.substring(0, endfile.length()-6); Library lib = Library.findLibrary(endfile); if (lib == null) continue; result.addLastModifiedFile(state, lib); continue; } Cell cell = CVS.getCellFromPath(filename); if (cell != null) { result.addCell(state, cell); } } return result; } /** * Parse the output of an 'cvs -nq update' command, which * checks the status of the given files. * Returns true if all files are up-to-date, false otherwise */ public static void commentStatusResult(StatusResult result, int type) { boolean allFilesUpToDate = true; for (Cell cell : result.getCells(State.ADDED)) { System.out.println("Added\t"+cell.libDescribe()); allFilesUpToDate = false; } for (Cell cell : result.getCells(State.REMOVED)) { System.out.println("Removed\t"+cell.libDescribe()); allFilesUpToDate = false; } for (Cell cell : result.getCells(State.MODIFIED)) { System.out.println("Modified\t"+cell.libDescribe()); allFilesUpToDate = false; } for (Cell cell : result.getCells(State.CONFLICT)) { System.out.println("Conflicts\t"+cell.libDescribe()); allFilesUpToDate = false; } for (Cell cell : result.getCells(State.UPDATE)) { if (type == STATUS) System.out.println("NeedsUpdate\t"+cell.libDescribe()); if (type == UPDATE) System.out.println("Updated\t"+cell.libDescribe()); allFilesUpToDate = false; } if (type == STATUS) { if (allFilesUpToDate) System.out.println("All files up-to-date"); else System.out.println("All other files up-to-date"); } } public static class StatusResult { private Map<State,List<Cell>> cells; private Map<State,List<Library>> lastModifiedFiles; private int exitVal; private StatusResult(int exitVal) { cells = new HashMap<State,List<Cell>>(); lastModifiedFiles = new HashMap<State,List<Library>>(); this.exitVal = exitVal; } private void addCell(State state, Cell cell) { List<Cell> statecells = cells.get(state); if (statecells == null) { statecells = new ArrayList<Cell>(); cells.put(state, statecells); } statecells.add(cell); } public List<Cell> getCells(State state) { List<Cell> statecells = cells.get(state); if (statecells == null) statecells = new ArrayList<Cell>(); return statecells; } public void addLastModifiedFile(State state, Library associatedLib) { List<Library> statelibs = lastModifiedFiles.get(state); if (statelibs == null) { statelibs = new ArrayList<Library>(); lastModifiedFiles.put(state, statelibs); } statelibs.add(associatedLib); } public List<Library> getLastModifiedFileLibs(State state) { List<Library> statelibs = lastModifiedFiles.get(state); if (statelibs == null) statelibs = new ArrayList<Library>(); return statelibs; } public int getExitVal() { return exitVal; } } }
true
true
public boolean doIt() { String useDir = CVS.getUseDir(librariesToUpdate, cellsToUpdate); StringBuffer libs = CVS.getLibraryFiles(librariesToUpdate, useDir); StringBuffer cells = CVS.getCellFiles(cellsToUpdate, useDir); // disable this for now, since users with older versions // of electric will not commit new lastModified file, // and then users of new electric will not get updated files /* if (!updateProject && (type != ROLLBACK)) { // optimization: for DELIBs, check header first. If that // requires an update, then check cells List<Library> checkedDelibs = new ArrayList<Library>(); StringBuffer lastModifiedFiles = CVS.getDELIBLastModifiedFiles(librariesToUpdate, useDir, checkedDelibs); String arg = lastModifiedFiles.toString().trim(); if (!arg.equals("")) { // remove libs that can be checked, they will be added again if // cvs says the lastModified file has been changed in the repository for (Library lib : checkedDelibs) librariesToUpdate.remove(lib); StatusResult result = update(arg, useDir, type); // check for updated libraries List<Library> delibsToUpdate = new ArrayList<Library>(); delibsToUpdate.addAll(result.getLastModifiedFileLibs(State.UPDATE)); delibsToUpdate.addAll(result.getLastModifiedFileLibs(State.CONFLICT)); librariesToUpdate.addAll(delibsToUpdate); // libs of nondelibs and libs to run update on libs = CVS.getLibraryFiles(librariesToUpdate, useDir); } } */ String updateFiles = libs.toString() + " " + cells.toString(); if (updateFiles.trim().equals("") && !updateProject) { exitVal = 0; System.out.println("Nothing to "+getMessage(type)); return true; } if (updateProject && (type == UPDATE || type == STATUS)) updateFiles = ""; StatusResult result = update(updateFiles, useDir, type); commentStatusResult(result, type); exitVal = result.getExitVal(); fieldVariableChanged("exitVal"); if (exitVal != 0) { return true; } // reload libs if needed libsToReload = new ArrayList<Library>(); if (type != STATUS) { for (Cell cell : result.getCells(State.UPDATE)) { Library lib = cell.getLibrary(); if (!libsToReload.contains(lib)) libsToReload.add(lib); } for (Library lib : libsToReload) { LibraryFiles.reloadLibrary(lib); } } if (type == ROLLBACK) { // turn off edit for rolled back cells for (Cell cell : result.getCells(State.UPDATE)) { CVSLibrary.setEditing(cell, false); } } // update states updateStates(result); System.out.println(getMessage(type)+" complete."); fieldVariableChanged("libsToReload"); return true; }
public boolean doIt() { String useDir = CVS.getUseDir(librariesToUpdate, cellsToUpdate); StringBuffer libs = CVS.getLibraryFiles(librariesToUpdate, useDir); StringBuffer cells = CVS.getCellFiles(cellsToUpdate, useDir); // disable this for now, since users with older versions // of electric will not commit new lastModified file, // and then users of new electric will not get updated files /* if (!updateProject && (type != ROLLBACK)) { // optimization: for DELIBs, check header first. If that // requires an update, then check cells List<Library> checkedDelibs = new ArrayList<Library>(); StringBuffer lastModifiedFiles = CVS.getDELIBLastModifiedFiles(librariesToUpdate, useDir, checkedDelibs); String arg = lastModifiedFiles.toString().trim(); if (!arg.equals("")) { // remove libs that can be checked, they will be added again if // cvs says the lastModified file has been changed in the repository for (Library lib : checkedDelibs) librariesToUpdate.remove(lib); StatusResult result = update(arg, useDir, type); // check for updated libraries List<Library> delibsToUpdate = new ArrayList<Library>(); delibsToUpdate.addAll(result.getLastModifiedFileLibs(State.UPDATE)); delibsToUpdate.addAll(result.getLastModifiedFileLibs(State.CONFLICT)); librariesToUpdate.addAll(delibsToUpdate); // libs of nondelibs and libs to run update on libs = CVS.getLibraryFiles(librariesToUpdate, useDir); } } */ String updateFiles = libs.toString() + " " + cells.toString(); if (updateFiles.trim().equals("") && !updateProject) { exitVal = 0; fieldVariableChanged("exitVal"); System.out.println("Nothing to "+getMessage(type)); return true; } if (updateProject && (type == UPDATE || type == STATUS)) updateFiles = ""; StatusResult result = update(updateFiles, useDir, type); commentStatusResult(result, type); exitVal = result.getExitVal(); fieldVariableChanged("exitVal"); if (exitVal != 0) { return true; } // reload libs if needed libsToReload = new ArrayList<Library>(); if (type != STATUS) { for (Cell cell : result.getCells(State.UPDATE)) { Library lib = cell.getLibrary(); if (!libsToReload.contains(lib)) libsToReload.add(lib); } for (Library lib : libsToReload) { LibraryFiles.reloadLibrary(lib); } } if (type == ROLLBACK) { // turn off edit for rolled back cells for (Cell cell : result.getCells(State.UPDATE)) { CVSLibrary.setEditing(cell, false); } } // update states updateStates(result); System.out.println(getMessage(type)+" complete."); fieldVariableChanged("libsToReload"); return true; }
diff --git a/Plugins/org.opendarts.prototype/src/main/java/org/opendarts/prototype/ui/x01/label/ScoreLabelProvider.java b/Plugins/org.opendarts.prototype/src/main/java/org/opendarts/prototype/ui/x01/label/ScoreLabelProvider.java index caf4d65..d2697a5 100644 --- a/Plugins/org.opendarts.prototype/src/main/java/org/opendarts/prototype/ui/x01/label/ScoreLabelProvider.java +++ b/Plugins/org.opendarts.prototype/src/main/java/org/opendarts/prototype/ui/x01/label/ScoreLabelProvider.java @@ -1,211 +1,211 @@ /* * */ package org.opendarts.prototype.ui.x01.label; import java.text.MessageFormat; import java.util.HashMap; import java.util.Map; import org.eclipse.jface.viewers.ColumnLabelProvider; import org.eclipse.swt.graphics.Color; import org.eclipse.swt.graphics.Font; import org.eclipse.swt.graphics.Image; import org.eclipse.swt.graphics.RGB; import org.eclipse.swt.widgets.Display; import org.eclipse.ui.forms.IFormColors; import org.opendarts.prototype.ProtoPlugin; import org.opendarts.prototype.internal.model.dart.ThreeDartsThrow; import org.opendarts.prototype.internal.model.dart.x01.BrokenX01DartsThrow; import org.opendarts.prototype.internal.model.dart.x01.WinningX01DartsThrow; import org.opendarts.prototype.internal.model.game.x01.DummyX01Entry; import org.opendarts.prototype.internal.model.game.x01.GameX01Entry; import org.opendarts.prototype.model.player.IPlayer; import org.opendarts.prototype.ui.ISharedImages; import org.opendarts.prototype.ui.utils.OpenDartsFormsToolkit; /** * The Class ScoreLabelProvider. */ public class ScoreLabelProvider extends ColumnLabelProvider { /** The player. */ private final IPlayer player; private final Map<Integer, Color> colors; private static RGB rgb60 = new RGB(0, 128, 64); private static RGB rgb100 = new RGB(0, 0, 255); private static RGB rgb180 = new RGB(128, 0, 128); /** * Instantiates a new score label provider. * * @param player the player */ public ScoreLabelProvider(IPlayer player) { super(); this.player = player; this.colors = new HashMap<Integer, Color>(); this.initColors(); } /** * Inits the colors. */ private void initColors() { Display display = Display.getDefault(); this.initColorRange(60, 100, rgb60, rgb100); this.initColorRange(100, 180, rgb100, rgb180); colors.put(180, new Color(display, rgb180)); } /** * Inits the color range. * * @param from the from * @param to the to * @param rgbFrom the rgb from * @param rgbTo the rgb to */ private void initColorRange(int from, int to, RGB rgbFrom, RGB rgbTo) { double delta; double redRatio; double greenRatio; double blueRatio; delta = (double) to - from; if (rgbTo.red == rgbFrom.red) { redRatio = 0d; } else { - redRatio = (delta / ((double) rgbTo.red - rgbFrom.red)); + redRatio = (((double) rgbTo.red - rgbFrom.red) / delta); } if (rgbTo.green == rgbFrom.green) { greenRatio = 0d; } else { - greenRatio = (delta / ((double) rgbTo.green - rgbFrom.green)); + greenRatio = (((double) rgbTo.green - rgbFrom.green) / delta); } if (rgbTo.blue == rgbFrom.blue) { blueRatio = 0d; } else { - blueRatio = (delta / ((double) rgbTo.blue - rgbFrom.blue)); + blueRatio = (((double) rgbTo.blue - rgbFrom.blue) / delta); } int r; int b; int g; RGB rgb; for (int i = from; i < to; i++) { r = (int) (rgbFrom.red + (i - from) * redRatio); g = (int) (rgbFrom.green + (i - from) * greenRatio); b = (int) (rgbFrom.blue + (i - from) * blueRatio); rgb = new RGB(r, g, b); colors.put(i, new Color(Display.getDefault(), rgb)); } } /* (non-Javadoc) * @see org.eclipse.jface.viewers.ColumnLabelProvider#getText(java.lang.Object) */ @Override public String getText(Object element) { if (element instanceof GameX01Entry) { String result; GameX01Entry gameEntry = (GameX01Entry) element; ThreeDartsThrow dartThrow = gameEntry.getPlayerThrow().get( this.player); if (dartThrow == null) { result = ""; } else if (dartThrow instanceof WinningX01DartsThrow) { WinningX01DartsThrow winThrow = (WinningX01DartsThrow) dartThrow; result = MessageFormat.format("+{0} ({1})", winThrow.getNbDartToFinish(), gameEntry.getNbPlayedDart()); } else { result = String.valueOf(dartThrow.getScore()); } return result; } else if (element instanceof DummyX01Entry) { return ""; } return super.getText(element); } /* (non-Javadoc) * @see org.eclipse.jface.viewers.ColumnLabelProvider#getImage(java.lang.Object) */ @Override public Image getImage(Object element) { if (element instanceof DummyX01Entry) { DummyX01Entry entry = (DummyX01Entry) element; if (this.player.equals(entry.getGame().getFirstPlayer())) { return ProtoPlugin.getImage(ISharedImages.IMG_START_DECO); } } return super.getImage(element); } /* (non-Javadoc) * @see org.eclipse.jface.viewers.ColumnLabelProvider#getBackground(java.lang.Object) */ @Override public Color getBackground(Object element) { if (element instanceof DummyX01Entry) { return OpenDartsFormsToolkit.getToolkit().getColors() .getColor(IFormColors.H_GRADIENT_START); } else if (element instanceof GameX01Entry) { GameX01Entry entry = (GameX01Entry) element; ThreeDartsThrow dartThrow = entry.getPlayerThrow().get(this.player); if (dartThrow instanceof BrokenX01DartsThrow) { return OpenDartsFormsToolkit.getToolkit().getColors() .getColor(OpenDartsFormsToolkit.COLOR_BROKEN); } else if (dartThrow instanceof WinningX01DartsThrow) { return OpenDartsFormsToolkit.getToolkit().getColors() .getColor(OpenDartsFormsToolkit.COLOR_WINNING); } } return super.getBackground(element); } /* (non-Javadoc) * @see org.eclipse.jface.viewers.ColumnLabelProvider#getFont(java.lang.Object) */ @Override public Font getFont(Object element) { return OpenDartsFormsToolkit .getFont(OpenDartsFormsToolkit.FONT_SCORE_SHEET); } /* (non-Javadoc) * @see org.eclipse.jface.viewers.ColumnLabelProvider#getForeground(java.lang.Object) */ @Override public Color getForeground(Object element) { Color result = null; if (element instanceof GameX01Entry) { GameX01Entry entry = (GameX01Entry) element; ThreeDartsThrow dartThrow = entry.getPlayerThrow().get(this.player); if (dartThrow != null) { int score = dartThrow.getScore(); result = this.colors.get(score); } } if (result == null) { result = super.getBackground(element); } return result; } /* (non-Javadoc) * @see org.eclipse.jface.viewers.BaseLabelProvider#dispose() */ @Override public void dispose() { for (Color col : this.colors.values()) { col.dispose(); } } }
false
true
private void initColorRange(int from, int to, RGB rgbFrom, RGB rgbTo) { double delta; double redRatio; double greenRatio; double blueRatio; delta = (double) to - from; if (rgbTo.red == rgbFrom.red) { redRatio = 0d; } else { redRatio = (delta / ((double) rgbTo.red - rgbFrom.red)); } if (rgbTo.green == rgbFrom.green) { greenRatio = 0d; } else { greenRatio = (delta / ((double) rgbTo.green - rgbFrom.green)); } if (rgbTo.blue == rgbFrom.blue) { blueRatio = 0d; } else { blueRatio = (delta / ((double) rgbTo.blue - rgbFrom.blue)); } int r; int b; int g; RGB rgb; for (int i = from; i < to; i++) { r = (int) (rgbFrom.red + (i - from) * redRatio); g = (int) (rgbFrom.green + (i - from) * greenRatio); b = (int) (rgbFrom.blue + (i - from) * blueRatio); rgb = new RGB(r, g, b); colors.put(i, new Color(Display.getDefault(), rgb)); } }
private void initColorRange(int from, int to, RGB rgbFrom, RGB rgbTo) { double delta; double redRatio; double greenRatio; double blueRatio; delta = (double) to - from; if (rgbTo.red == rgbFrom.red) { redRatio = 0d; } else { redRatio = (((double) rgbTo.red - rgbFrom.red) / delta); } if (rgbTo.green == rgbFrom.green) { greenRatio = 0d; } else { greenRatio = (((double) rgbTo.green - rgbFrom.green) / delta); } if (rgbTo.blue == rgbFrom.blue) { blueRatio = 0d; } else { blueRatio = (((double) rgbTo.blue - rgbFrom.blue) / delta); } int r; int b; int g; RGB rgb; for (int i = from; i < to; i++) { r = (int) (rgbFrom.red + (i - from) * redRatio); g = (int) (rgbFrom.green + (i - from) * greenRatio); b = (int) (rgbFrom.blue + (i - from) * blueRatio); rgb = new RGB(r, g, b); colors.put(i, new Color(Display.getDefault(), rgb)); } }
diff --git a/src/mlparch/MLPA_CLI.java b/src/mlparch/MLPA_CLI.java index 732390b..da903c7 100644 --- a/src/mlparch/MLPA_CLI.java +++ b/src/mlparch/MLPA_CLI.java @@ -1,184 +1,184 @@ /* * To change this template, choose Tools | Templates * and open the template in the editor. */ package mlparch; import java.io.File; import java.io.IOException; import java.text.NumberFormat; import java.util.regex.Pattern; import mlparch.MLPArch.MLPFileEntry; /** * * @author John Petska */ public class MLPA_CLI { public static void showHelp() { System.out.println("MLPArch packing/unpacking utility"); System.out.println("Public domain code, released 2012"); System.out.println("Options:"); System.out.println(" -u - unpack mode (default)"); System.out.println(" -p - pack mode"); System.out.println(" -l - list mode "); System.out.println(" -a <arg> - specify archive location (default \"main.1050.com.gameloft.android.ANMP.GloftPOHM.obb\")"); System.out.println(" -f <arg> - specify pack/unpack location (default \"extract\")"); System.out.println(" -s <arg> - pack/unpack only a single file"); System.out.println(" -r <arg> - pack/unpack only files matching this regex"); //System.out.println(" -m <arg> - pack/unpack only files matching this wildcard pattern (*, ?)"); System.out.println(" -v - show this help"); System.out.println(" -? - show this help"); System.out.println(" --help - show this help"); } public static void main(String[] args) throws Exception { String archName = "main.1050.com.gameloft.android.ANMP.GloftPOHM.obb"; String tmplName = archName; String packName = "extract"; int mode = 0; //0 == unpack, 1 == pack, 2 == list int matchMode = 0; //0 == all, 1 == single, 2 == regex, 3 == wildcard String matchPat = null; for (int i = 0; i < args.length; i++) { String arg0 = args[i]; if (arg0.startsWith("--")) { //long option arg0 = arg0.substring(2); if (arg0.equals("help")) { showHelp(); System.exit(0); } else { throw new IllegalArgumentException("Unrecognized long option: '"+arg0+"'."); } } else if (arg0.charAt(0) == '-') { //short option arg0 = arg0.substring(1); for (int j = 0; j < arg0.length(); j++) { //short optiona may be stacked char opt = arg0.charAt(j); switch (opt) { case 'v': case '?': showHelp(); System.exit(0); case 'u': mode = 0; //unpack break; case 'p': mode = 1; //pack break; case 'l': mode = 2; //list break; case 'a': - if (i!=arg0.length()-1 || ++i >= args.length) + if (j!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'a'!"); archName = args[i]; break; case 'f': - if (i!=arg0.length()-1 || ++i >= args.length) + if (j!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'f'!"); packName = args[i]; break; case 's': - if (i!=arg0.length()-1 || ++i >= args.length) + if (j!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 's'!"); matchMode = 1; matchPat = args[i]; break; case 'r': - if (i!=arg0.length()-1 || ++i >= args.length) + if (j!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'r'!"); matchMode = 2; matchPat = args[i]; break; case 'm': - if (i!=arg0.length()-1 || ++i >= args.length) + if (j!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'm'!"); System.err.println("Attempting to use wildcard matching: This doesn't work very well, have fun!"); matchMode = 3; matchPat = args[i]; break; default: throw new IllegalArgumentException("Unrecognized short option: '"+opt+"'."); } } } else { //bare argument throw new IllegalArgumentException("Unrecognized bare argument: '"+arg0+"'."); } } File archFile = new File(archName); File packFile = new File(packName); MLPArch arch = new MLPArch(archFile); switch (matchMode) { default: case 0: //all matchPat = null; break; case 1: //single matchPat = Pattern.quote(matchPat); break; case 2: //regex /* done. */ break; case 3: //wildcard matchPat = Pattern.quote(matchPat); matchPat = matchPat.replace("\\*", ".*"); matchPat = matchPat.replace("\\?", "."); break; } Pattern pat = matchPat==null ? null : Pattern.compile(matchPat); if (mode == 0 || mode == 2) { //unpack or list System.out.println((mode==0?"Unpacking":"Listing")+" MLPArch at \""+archFile.getPath()+"\" to \""+packFile.getPath()+"\"."); System.out.print("Reading header..."); arch.loadHeaderFromArchive(); System.out.println("done."); System.out.println("\tIndex Start: "+arch.indexOffset); System.out.print("Reading index..."); arch.loadIndexFromArchive(); System.out.println("done."); System.out.println("\tFile Count: "+arch.index.size()); if (mode == 0) { System.out.println("Unpacking archive..."); packFile.mkdir(); NumberFormat format = NumberFormat.getPercentInstance(); format.setMinimumFractionDigits(1); format.setMaximumFractionDigits(1); for (int i = 0; i < arch.index.size(); i++) { MLPFileEntry entry = arch.index.get(i); if (pat != null && !pat.matcher(entry.path).matches()) continue; //skipping System.out.print("Unpacking "+(i+1)+"/"+arch.index.size()+" ("+format.format((float)(i+1)/arch.index.size())+"): \""+entry.path+"\" ("+entry.size()+" bytes)..."); arch.unpackFile(entry, packFile); System.out.println("done."); } } else { System.out.println("Listing Index..."); for (int i = 0; i < arch.index.size(); i++) if (pat == null || pat.matcher(arch.index.get(i).path).matches()) System.out.println((i+1)+": "+arch.index.get(i).toString()); } } else if (mode == 1) { //pack System.out.println("Packing MLPArch at \""+archFile.getPath()+"\" from \""+packFile.getPath()+"\"."); System.out.print("Building index..."); arch.loadIndexFromFolder(packFile); System.out.println("done."); System.out.println("\tFile Count: "+arch.index.size()); System.out.println("Writing header..."); arch.writeHeaderToArchive(); System.out.println("Packing files..."); arch.writeFilesToArchive(packFile, pat); System.out.println("Writing index..."); arch.writeIndexToArchive(); } } }
false
true
public static void main(String[] args) throws Exception { String archName = "main.1050.com.gameloft.android.ANMP.GloftPOHM.obb"; String tmplName = archName; String packName = "extract"; int mode = 0; //0 == unpack, 1 == pack, 2 == list int matchMode = 0; //0 == all, 1 == single, 2 == regex, 3 == wildcard String matchPat = null; for (int i = 0; i < args.length; i++) { String arg0 = args[i]; if (arg0.startsWith("--")) { //long option arg0 = arg0.substring(2); if (arg0.equals("help")) { showHelp(); System.exit(0); } else { throw new IllegalArgumentException("Unrecognized long option: '"+arg0+"'."); } } else if (arg0.charAt(0) == '-') { //short option arg0 = arg0.substring(1); for (int j = 0; j < arg0.length(); j++) { //short optiona may be stacked char opt = arg0.charAt(j); switch (opt) { case 'v': case '?': showHelp(); System.exit(0); case 'u': mode = 0; //unpack break; case 'p': mode = 1; //pack break; case 'l': mode = 2; //list break; case 'a': if (i!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'a'!"); archName = args[i]; break; case 'f': if (i!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'f'!"); packName = args[i]; break; case 's': if (i!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 's'!"); matchMode = 1; matchPat = args[i]; break; case 'r': if (i!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'r'!"); matchMode = 2; matchPat = args[i]; break; case 'm': if (i!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'm'!"); System.err.println("Attempting to use wildcard matching: This doesn't work very well, have fun!"); matchMode = 3; matchPat = args[i]; break; default: throw new IllegalArgumentException("Unrecognized short option: '"+opt+"'."); } } } else { //bare argument throw new IllegalArgumentException("Unrecognized bare argument: '"+arg0+"'."); } } File archFile = new File(archName); File packFile = new File(packName); MLPArch arch = new MLPArch(archFile); switch (matchMode) { default: case 0: //all matchPat = null; break; case 1: //single matchPat = Pattern.quote(matchPat); break; case 2: //regex /* done. */ break; case 3: //wildcard matchPat = Pattern.quote(matchPat); matchPat = matchPat.replace("\\*", ".*"); matchPat = matchPat.replace("\\?", "."); break; } Pattern pat = matchPat==null ? null : Pattern.compile(matchPat); if (mode == 0 || mode == 2) { //unpack or list System.out.println((mode==0?"Unpacking":"Listing")+" MLPArch at \""+archFile.getPath()+"\" to \""+packFile.getPath()+"\"."); System.out.print("Reading header..."); arch.loadHeaderFromArchive(); System.out.println("done."); System.out.println("\tIndex Start: "+arch.indexOffset); System.out.print("Reading index..."); arch.loadIndexFromArchive(); System.out.println("done."); System.out.println("\tFile Count: "+arch.index.size()); if (mode == 0) { System.out.println("Unpacking archive..."); packFile.mkdir(); NumberFormat format = NumberFormat.getPercentInstance(); format.setMinimumFractionDigits(1); format.setMaximumFractionDigits(1); for (int i = 0; i < arch.index.size(); i++) { MLPFileEntry entry = arch.index.get(i); if (pat != null && !pat.matcher(entry.path).matches()) continue; //skipping System.out.print("Unpacking "+(i+1)+"/"+arch.index.size()+" ("+format.format((float)(i+1)/arch.index.size())+"): \""+entry.path+"\" ("+entry.size()+" bytes)..."); arch.unpackFile(entry, packFile); System.out.println("done."); } } else { System.out.println("Listing Index..."); for (int i = 0; i < arch.index.size(); i++) if (pat == null || pat.matcher(arch.index.get(i).path).matches()) System.out.println((i+1)+": "+arch.index.get(i).toString()); } } else if (mode == 1) { //pack System.out.println("Packing MLPArch at \""+archFile.getPath()+"\" from \""+packFile.getPath()+"\"."); System.out.print("Building index..."); arch.loadIndexFromFolder(packFile); System.out.println("done."); System.out.println("\tFile Count: "+arch.index.size()); System.out.println("Writing header..."); arch.writeHeaderToArchive(); System.out.println("Packing files..."); arch.writeFilesToArchive(packFile, pat); System.out.println("Writing index..."); arch.writeIndexToArchive(); } }
public static void main(String[] args) throws Exception { String archName = "main.1050.com.gameloft.android.ANMP.GloftPOHM.obb"; String tmplName = archName; String packName = "extract"; int mode = 0; //0 == unpack, 1 == pack, 2 == list int matchMode = 0; //0 == all, 1 == single, 2 == regex, 3 == wildcard String matchPat = null; for (int i = 0; i < args.length; i++) { String arg0 = args[i]; if (arg0.startsWith("--")) { //long option arg0 = arg0.substring(2); if (arg0.equals("help")) { showHelp(); System.exit(0); } else { throw new IllegalArgumentException("Unrecognized long option: '"+arg0+"'."); } } else if (arg0.charAt(0) == '-') { //short option arg0 = arg0.substring(1); for (int j = 0; j < arg0.length(); j++) { //short optiona may be stacked char opt = arg0.charAt(j); switch (opt) { case 'v': case '?': showHelp(); System.exit(0); case 'u': mode = 0; //unpack break; case 'p': mode = 1; //pack break; case 'l': mode = 2; //list break; case 'a': if (j!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'a'!"); archName = args[i]; break; case 'f': if (j!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'f'!"); packName = args[i]; break; case 's': if (j!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 's'!"); matchMode = 1; matchPat = args[i]; break; case 'r': if (j!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'r'!"); matchMode = 2; matchPat = args[i]; break; case 'm': if (j!=arg0.length()-1 || ++i >= args.length) throw new IllegalArgumentException("Expected another bare argument after 'm'!"); System.err.println("Attempting to use wildcard matching: This doesn't work very well, have fun!"); matchMode = 3; matchPat = args[i]; break; default: throw new IllegalArgumentException("Unrecognized short option: '"+opt+"'."); } } } else { //bare argument throw new IllegalArgumentException("Unrecognized bare argument: '"+arg0+"'."); } } File archFile = new File(archName); File packFile = new File(packName); MLPArch arch = new MLPArch(archFile); switch (matchMode) { default: case 0: //all matchPat = null; break; case 1: //single matchPat = Pattern.quote(matchPat); break; case 2: //regex /* done. */ break; case 3: //wildcard matchPat = Pattern.quote(matchPat); matchPat = matchPat.replace("\\*", ".*"); matchPat = matchPat.replace("\\?", "."); break; } Pattern pat = matchPat==null ? null : Pattern.compile(matchPat); if (mode == 0 || mode == 2) { //unpack or list System.out.println((mode==0?"Unpacking":"Listing")+" MLPArch at \""+archFile.getPath()+"\" to \""+packFile.getPath()+"\"."); System.out.print("Reading header..."); arch.loadHeaderFromArchive(); System.out.println("done."); System.out.println("\tIndex Start: "+arch.indexOffset); System.out.print("Reading index..."); arch.loadIndexFromArchive(); System.out.println("done."); System.out.println("\tFile Count: "+arch.index.size()); if (mode == 0) { System.out.println("Unpacking archive..."); packFile.mkdir(); NumberFormat format = NumberFormat.getPercentInstance(); format.setMinimumFractionDigits(1); format.setMaximumFractionDigits(1); for (int i = 0; i < arch.index.size(); i++) { MLPFileEntry entry = arch.index.get(i); if (pat != null && !pat.matcher(entry.path).matches()) continue; //skipping System.out.print("Unpacking "+(i+1)+"/"+arch.index.size()+" ("+format.format((float)(i+1)/arch.index.size())+"): \""+entry.path+"\" ("+entry.size()+" bytes)..."); arch.unpackFile(entry, packFile); System.out.println("done."); } } else { System.out.println("Listing Index..."); for (int i = 0; i < arch.index.size(); i++) if (pat == null || pat.matcher(arch.index.get(i).path).matches()) System.out.println((i+1)+": "+arch.index.get(i).toString()); } } else if (mode == 1) { //pack System.out.println("Packing MLPArch at \""+archFile.getPath()+"\" from \""+packFile.getPath()+"\"."); System.out.print("Building index..."); arch.loadIndexFromFolder(packFile); System.out.println("done."); System.out.println("\tFile Count: "+arch.index.size()); System.out.println("Writing header..."); arch.writeHeaderToArchive(); System.out.println("Packing files..."); arch.writeFilesToArchive(packFile, pat); System.out.println("Writing index..."); arch.writeIndexToArchive(); } }
diff --git a/org.projectusus.core/src/org/projectusus/core/internal/yellowcount/ProjectCount.java b/org.projectusus.core/src/org/projectusus/core/internal/yellowcount/ProjectCount.java index 4da4f528..bb80a6be 100644 --- a/org.projectusus.core/src/org/projectusus/core/internal/yellowcount/ProjectCount.java +++ b/org.projectusus.core/src/org/projectusus/core/internal/yellowcount/ProjectCount.java @@ -1,79 +1,81 @@ // Copyright (c) 2005-2006, 2009 by the projectusus.org contributors // This software is released under the terms and conditions // of the Eclipse Public License (EPL) 1.0. // See http://www.eclipse.org/legal/epl-v10.html for details. package org.projectusus.core.internal.yellowcount; import static org.eclipse.core.resources.IMarker.PROBLEM; import static org.eclipse.core.resources.IResource.DEPTH_INFINITE; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import org.eclipse.core.resources.IFile; import org.eclipse.core.resources.IMarker; import org.eclipse.core.resources.IProject; import org.eclipse.core.resources.IResource; import org.eclipse.core.runtime.CoreException; import org.projectusus.core.internal.proportions.model.IHotspot; class ProjectCount { private final IProject project; private final HashMap<IResource, Integer> counts; ProjectCount( IProject project ) throws CoreException { this.project = project; counts = new HashMap<IResource, Integer>(); computeProjectYellowCount(); } public boolean isEmpty() { return counts.isEmpty(); } int sum() { int result = 0; for( IResource resource : counts.keySet() ) { result += counts.get( resource ).intValue(); } return result; } int countFiles() { return counts.size(); } List<IHotspot> getHotspots() { List<IHotspot> result = new ArrayList<IHotspot>(); for( IResource resource : counts.keySet() ) { - IFile file = (IFile)resource; - int count = counts.get( resource ).intValue(); - result.add( new MetricCWHotspot( file, count ) ); + if( resource instanceof IFile ) { + IFile file = (IFile)resource; + int count = counts.get( resource ).intValue(); + result.add( new MetricCWHotspot( file, count ) ); + } } return result; } private void computeProjectYellowCount() throws CoreException { IMarker[] markers = project.findMarkers( PROBLEM, true, DEPTH_INFINITE ); for( IMarker marker : markers ) { if( isWarning( marker ) ) { increase( marker.getResource() ); } } } private void increase( IResource resource ) { Integer count = counts.get( resource ); if( count == null ) { count = new Integer( 0 ); } count = new Integer( count.intValue() + 1 ); counts.put( resource, count ); } private boolean isWarning( final IMarker marker ) throws CoreException { Integer severity = (Integer)marker.getAttribute( IMarker.SEVERITY ); return severity != null && severity.intValue() == IMarker.SEVERITY_WARNING; } }
true
true
List<IHotspot> getHotspots() { List<IHotspot> result = new ArrayList<IHotspot>(); for( IResource resource : counts.keySet() ) { IFile file = (IFile)resource; int count = counts.get( resource ).intValue(); result.add( new MetricCWHotspot( file, count ) ); } return result; }
List<IHotspot> getHotspots() { List<IHotspot> result = new ArrayList<IHotspot>(); for( IResource resource : counts.keySet() ) { if( resource instanceof IFile ) { IFile file = (IFile)resource; int count = counts.get( resource ).intValue(); result.add( new MetricCWHotspot( file, count ) ); } } return result; }
diff --git a/Madz.DatabaseMetaData/src/main/java/net/madz/db/core/meta/mutable/mysql/impl/MySQLSchemaMetaDataBuilderImpl.java b/Madz.DatabaseMetaData/src/main/java/net/madz/db/core/meta/mutable/mysql/impl/MySQLSchemaMetaDataBuilderImpl.java index 2eb0f18..ed8d7bc 100644 --- a/Madz.DatabaseMetaData/src/main/java/net/madz/db/core/meta/mutable/mysql/impl/MySQLSchemaMetaDataBuilderImpl.java +++ b/Madz.DatabaseMetaData/src/main/java/net/madz/db/core/meta/mutable/mysql/impl/MySQLSchemaMetaDataBuilderImpl.java @@ -1,68 +1,68 @@ package net.madz.db.core.meta.mutable.mysql.impl; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import net.madz.db.core.meta.DottedPath; import net.madz.db.core.meta.immutable.impl.MetaDataResultSet; import net.madz.db.core.meta.immutable.mysql.MySQLColumnMetaData; import net.madz.db.core.meta.immutable.mysql.MySQLForeignKeyMetaData; import net.madz.db.core.meta.immutable.mysql.MySQLIndexMetaData; import net.madz.db.core.meta.immutable.mysql.MySQLSchemaMetaData; import net.madz.db.core.meta.immutable.mysql.MySQLTableMetaData; import net.madz.db.core.meta.immutable.mysql.impl.MySQLSchemaMetaDataImpl; import net.madz.db.core.meta.mutable.impl.BasedSchemaMetaDataBuilder; import net.madz.db.core.meta.mutable.mysql.MySQLColumnMetaDataBuilder; import net.madz.db.core.meta.mutable.mysql.MySQLForeignKeyMetaDataBuilder; import net.madz.db.core.meta.mutable.mysql.MySQLIndexMetaDataBuilder; import net.madz.db.core.meta.mutable.mysql.MySQLSchemaMetaDataBuilder; import net.madz.db.core.meta.mutable.mysql.MySQLTableMetaDataBuilder; public class MySQLSchemaMetaDataBuilderImpl extends BasedSchemaMetaDataBuilder<MySQLSchemaMetaDataBuilder, MySQLTableMetaDataBuilder, MySQLColumnMetaDataBuilder, MySQLForeignKeyMetaDataBuilder, MySQLIndexMetaDataBuilder, MySQLSchemaMetaData, MySQLTableMetaData, MySQLColumnMetaData, MySQLForeignKeyMetaData, MySQLIndexMetaData> implements MySQLSchemaMetaDataBuilder { private String charSet; private String collation; public MySQLSchemaMetaDataBuilderImpl(DottedPath schemaPath) throws SQLException { super(schemaPath); } public MySQLSchemaMetaDataBuilder build(Connection conn) throws SQLException { System.out.println("Mysql schema builder"); Statement stmt = conn.createStatement(); stmt.executeQuery("use information_schema;"); ResultSet rs = stmt.executeQuery("select * from SCHEMATA where schema_name = '" + schemaPath.getName() + "'"); while ( rs.next() && rs.getRow() == 1 ) { charSet = rs.getString("DEFAULT_CHARACTER_SET_NAME"); collation = rs.getString("DEFAULT_COLLATION_NAME"); } - rs = stmt.executeQuery("SELECT * FROM tables INNER JOIN character_sets ON default_collate_name = table_collation WHERE schema_name = '" + schemaPath.getName() + "'"); + rs = stmt.executeQuery("SELECT * FROM tables INNER JOIN character_sets ON default_collate_name = table_collation WHERE table_schema = '" + schemaPath.getName() + "'"); MetaDataResultSet<MySQLTableDbMetaDataEnum> rsMd = new MetaDataResultSet<MySQLTableDbMetaDataEnum>(rs, MySQLTableDbMetaDataEnum.values()); while ( rsMd.next() ) { final MySQLTableMetaDataBuilder table = new MySQLTableMetaDataBuilderImpl(this).build(rsMd,conn); appendTableMetaDataBuilder(table); } return this; } @Override public String getCharSet() { return this.charSet; } @Override public String getCollation() { return this.collation; } @Override public MySQLSchemaMetaData getMetaData() { return new MySQLSchemaMetaDataImpl(this); } }
true
true
public MySQLSchemaMetaDataBuilder build(Connection conn) throws SQLException { System.out.println("Mysql schema builder"); Statement stmt = conn.createStatement(); stmt.executeQuery("use information_schema;"); ResultSet rs = stmt.executeQuery("select * from SCHEMATA where schema_name = '" + schemaPath.getName() + "'"); while ( rs.next() && rs.getRow() == 1 ) { charSet = rs.getString("DEFAULT_CHARACTER_SET_NAME"); collation = rs.getString("DEFAULT_COLLATION_NAME"); } rs = stmt.executeQuery("SELECT * FROM tables INNER JOIN character_sets ON default_collate_name = table_collation WHERE schema_name = '" + schemaPath.getName() + "'"); MetaDataResultSet<MySQLTableDbMetaDataEnum> rsMd = new MetaDataResultSet<MySQLTableDbMetaDataEnum>(rs, MySQLTableDbMetaDataEnum.values()); while ( rsMd.next() ) { final MySQLTableMetaDataBuilder table = new MySQLTableMetaDataBuilderImpl(this).build(rsMd,conn); appendTableMetaDataBuilder(table); } return this; }
public MySQLSchemaMetaDataBuilder build(Connection conn) throws SQLException { System.out.println("Mysql schema builder"); Statement stmt = conn.createStatement(); stmt.executeQuery("use information_schema;"); ResultSet rs = stmt.executeQuery("select * from SCHEMATA where schema_name = '" + schemaPath.getName() + "'"); while ( rs.next() && rs.getRow() == 1 ) { charSet = rs.getString("DEFAULT_CHARACTER_SET_NAME"); collation = rs.getString("DEFAULT_COLLATION_NAME"); } rs = stmt.executeQuery("SELECT * FROM tables INNER JOIN character_sets ON default_collate_name = table_collation WHERE table_schema = '" + schemaPath.getName() + "'"); MetaDataResultSet<MySQLTableDbMetaDataEnum> rsMd = new MetaDataResultSet<MySQLTableDbMetaDataEnum>(rs, MySQLTableDbMetaDataEnum.values()); while ( rsMd.next() ) { final MySQLTableMetaDataBuilder table = new MySQLTableMetaDataBuilderImpl(this).build(rsMd,conn); appendTableMetaDataBuilder(table); } return this; }
diff --git a/src/main/java/org/atlasapi/equiv/generators/FilmEquivalenceGenerator.java b/src/main/java/org/atlasapi/equiv/generators/FilmEquivalenceGenerator.java index 054f63c5f..f267ef678 100644 --- a/src/main/java/org/atlasapi/equiv/generators/FilmEquivalenceGenerator.java +++ b/src/main/java/org/atlasapi/equiv/generators/FilmEquivalenceGenerator.java @@ -1,103 +1,103 @@ package org.atlasapi.equiv.generators; import static com.google.common.collect.Iterables.filter; import java.util.List; import org.atlasapi.application.ApplicationConfiguration; import org.atlasapi.equiv.results.description.ResultDescription; import org.atlasapi.equiv.results.scores.DefaultScoredEquivalents; import org.atlasapi.equiv.results.scores.DefaultScoredEquivalents.ScoredEquivalentsBuilder; import org.atlasapi.equiv.results.scores.Score; import org.atlasapi.equiv.results.scores.ScoredEquivalents; import org.atlasapi.media.entity.Film; import org.atlasapi.media.entity.Identified; import org.atlasapi.media.entity.Publisher; import org.atlasapi.persistence.content.SearchResolver; import org.atlasapi.search.model.SearchQuery; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.metabroadcast.common.query.Selection; public class FilmEquivalenceGenerator implements ContentEquivalenceGenerator<Film> { private static final ApplicationConfiguration config = new ApplicationConfiguration(ImmutableSet.of(Publisher.PREVIEW_NETWORKS), null); private static final float TITLE_WEIGHTING = 1.0f; private static final float BROADCAST_WEIGHTING = 0.0f; private static final float CATCHUP_WEIGHTING = 0.0f; private final SearchResolver searchResolver; public FilmEquivalenceGenerator(SearchResolver searchResolver) { this.searchResolver = searchResolver; } @Override public ScoredEquivalents<Film> generate(Film film, ResultDescription desc) { ScoredEquivalentsBuilder<Film> scores = DefaultScoredEquivalents.<Film> fromSource("Film"); desc.startStage("Film equivalence generator"); if (film.getYear() == null || Strings.isNullOrEmpty(film.getTitle())) { - desc.appendText("Can't continue: year '%s', title '%'", film.getYear(), film.getTitle()).finishStage(); + desc.appendText("Can't continue: year '%s', title '%s'", film.getYear(), film.getTitle()).finishStage(); return scores.build(); } else { desc.appendText("Using year %s, title %s", film.getYear(), film.getTitle()); } List<Identified> possibleEquivalentFilms = searchResolver.search(new SearchQuery(film.getTitle(), Selection.ALL, ImmutableList.of(Publisher.PREVIEW_NETWORKS), TITLE_WEIGHTING, BROADCAST_WEIGHTING, CATCHUP_WEIGHTING), config); Iterable<Film> foundFilms = filter(possibleEquivalentFilms, Film.class); desc.appendText("Found %s films through title search", Iterables.size(foundFilms)); for (Film equivFilm : foundFilms) { if(sameYear(film, equivFilm)) { Score score = Score.valueOf(titleMatch(film, equivFilm)); desc.appendText("%s (%s) scored %s", equivFilm.getTitle(), equivFilm.getCanonicalUri(), score); scores.addEquivalent(equivFilm, score); } else { desc.appendText("%s (%s) ignored. Wrong year %s", equivFilm.getTitle(), equivFilm.getCanonicalUri(), equivFilm.getYear()); scores.addEquivalent(equivFilm, Score.valueOf(0.0)); } } desc.finishStage(); return scores.build(); } private double titleMatch(Film film, Film equivFilm) { return match(removeThe(alphaNumeric(film.getTitle())), removeThe(alphaNumeric(equivFilm.getTitle()))); } public double match(String subjectTitle, String equivalentTitle) { double commonPrefix = commonPrefixLength(subjectTitle, equivalentTitle); double difference = Math.abs(equivalentTitle.length() - commonPrefix) / equivalentTitle.length(); return commonPrefix / (subjectTitle.length() / 1.0) - difference; } private String removeThe(String title) { if(title.startsWith("the")) { return title.substring(3); } return title; } private String alphaNumeric(String title) { return title.replaceAll("[^\\d\\w]", "").toLowerCase(); } private double commonPrefixLength(String t1, String t2) { int i = 0; for (; i < Math.min(t1.length(), t2.length()) && t1.charAt(i) == t2.charAt(i); i++) { } return i; } private boolean sameYear(Film film, Film equivFilm) { return film.getYear().equals(equivFilm.getYear()); } }
true
true
public ScoredEquivalents<Film> generate(Film film, ResultDescription desc) { ScoredEquivalentsBuilder<Film> scores = DefaultScoredEquivalents.<Film> fromSource("Film"); desc.startStage("Film equivalence generator"); if (film.getYear() == null || Strings.isNullOrEmpty(film.getTitle())) { desc.appendText("Can't continue: year '%s', title '%'", film.getYear(), film.getTitle()).finishStage(); return scores.build(); } else { desc.appendText("Using year %s, title %s", film.getYear(), film.getTitle()); } List<Identified> possibleEquivalentFilms = searchResolver.search(new SearchQuery(film.getTitle(), Selection.ALL, ImmutableList.of(Publisher.PREVIEW_NETWORKS), TITLE_WEIGHTING, BROADCAST_WEIGHTING, CATCHUP_WEIGHTING), config); Iterable<Film> foundFilms = filter(possibleEquivalentFilms, Film.class); desc.appendText("Found %s films through title search", Iterables.size(foundFilms)); for (Film equivFilm : foundFilms) { if(sameYear(film, equivFilm)) { Score score = Score.valueOf(titleMatch(film, equivFilm)); desc.appendText("%s (%s) scored %s", equivFilm.getTitle(), equivFilm.getCanonicalUri(), score); scores.addEquivalent(equivFilm, score); } else { desc.appendText("%s (%s) ignored. Wrong year %s", equivFilm.getTitle(), equivFilm.getCanonicalUri(), equivFilm.getYear()); scores.addEquivalent(equivFilm, Score.valueOf(0.0)); } } desc.finishStage(); return scores.build(); }
public ScoredEquivalents<Film> generate(Film film, ResultDescription desc) { ScoredEquivalentsBuilder<Film> scores = DefaultScoredEquivalents.<Film> fromSource("Film"); desc.startStage("Film equivalence generator"); if (film.getYear() == null || Strings.isNullOrEmpty(film.getTitle())) { desc.appendText("Can't continue: year '%s', title '%s'", film.getYear(), film.getTitle()).finishStage(); return scores.build(); } else { desc.appendText("Using year %s, title %s", film.getYear(), film.getTitle()); } List<Identified> possibleEquivalentFilms = searchResolver.search(new SearchQuery(film.getTitle(), Selection.ALL, ImmutableList.of(Publisher.PREVIEW_NETWORKS), TITLE_WEIGHTING, BROADCAST_WEIGHTING, CATCHUP_WEIGHTING), config); Iterable<Film> foundFilms = filter(possibleEquivalentFilms, Film.class); desc.appendText("Found %s films through title search", Iterables.size(foundFilms)); for (Film equivFilm : foundFilms) { if(sameYear(film, equivFilm)) { Score score = Score.valueOf(titleMatch(film, equivFilm)); desc.appendText("%s (%s) scored %s", equivFilm.getTitle(), equivFilm.getCanonicalUri(), score); scores.addEquivalent(equivFilm, score); } else { desc.appendText("%s (%s) ignored. Wrong year %s", equivFilm.getTitle(), equivFilm.getCanonicalUri(), equivFilm.getYear()); scores.addEquivalent(equivFilm, Score.valueOf(0.0)); } } desc.finishStage(); return scores.build(); }
diff --git a/src/frontend/edu/brown/hstore/PartitionExecutor.java b/src/frontend/edu/brown/hstore/PartitionExecutor.java index a51805414..4beba6e61 100644 --- a/src/frontend/edu/brown/hstore/PartitionExecutor.java +++ b/src/frontend/edu/brown/hstore/PartitionExecutor.java @@ -1,5148 +1,5150 @@ /*************************************************************************** * Copyright (C) 2013 by H-Store Project * * Brown University * * Massachusetts Institute of Technology * * Yale University * * * * Permission is hereby granted, free of charge, to any person obtaining * * a copy of this software and associated documentation files (the * * "Software"), to deal in the Software without restriction, including * * without limitation the rights to use, copy, modify, merge, publish, * * distribute, sublicense, and/or sell copies of the Software, and to * * permit persons to whom the Software is furnished to do so, subject to * * the following conditions: * * * * The above copyright notice and this permission notice shall be * * included in all copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.* * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR * * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * * OTHER DEALINGS IN THE SOFTWARE. * ***************************************************************************/ /* This file is part of VoltDB. * Copyright (C) 2008-2010 VoltDB L.L.C. * * VoltDB is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * VoltDB is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with VoltDB. If not, see <http://www.gnu.org/licenses/>. */ package edu.brown.hstore; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Deque; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Queue; import java.util.concurrent.BlockingDeque; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import org.apache.log4j.Logger; import org.voltdb.BackendTarget; import org.voltdb.CatalogContext; import org.voltdb.ClientResponseImpl; import org.voltdb.DependencySet; import org.voltdb.HsqlBackend; import org.voltdb.MemoryStats; import org.voltdb.ParameterSet; import org.voltdb.SQLStmt; import org.voltdb.SnapshotSiteProcessor; import org.voltdb.SnapshotSiteProcessor.SnapshotTableTask; import org.voltdb.SysProcSelector; import org.voltdb.VoltProcedure; import org.voltdb.VoltProcedure.VoltAbortException; import org.voltdb.VoltSystemProcedure; import org.voltdb.VoltTable; import org.voltdb.catalog.Catalog; import org.voltdb.catalog.Cluster; import org.voltdb.catalog.Database; import org.voltdb.catalog.Host; import org.voltdb.catalog.Partition; import org.voltdb.catalog.PlanFragment; import org.voltdb.catalog.Procedure; import org.voltdb.catalog.Site; import org.voltdb.catalog.Statement; import org.voltdb.catalog.Table; import org.voltdb.exceptions.ConstraintFailureException; import org.voltdb.exceptions.EEException; import org.voltdb.exceptions.EvictedTupleAccessException; import org.voltdb.exceptions.MispredictionException; import org.voltdb.exceptions.SQLException; import org.voltdb.exceptions.SerializableException; import org.voltdb.exceptions.ServerFaultException; import org.voltdb.jni.ExecutionEngine; import org.voltdb.jni.ExecutionEngineIPC; import org.voltdb.jni.ExecutionEngineJNI; import org.voltdb.jni.MockExecutionEngine; import org.voltdb.messaging.FastDeserializer; import org.voltdb.messaging.FastSerializer; import org.voltdb.types.SpecExecSchedulerPolicyType; import org.voltdb.types.SpeculationConflictCheckerType; import org.voltdb.types.SpeculationType; import org.voltdb.utils.DBBPool; import org.voltdb.utils.DBBPool.BBContainer; import org.voltdb.utils.Encoder; import org.voltdb.utils.EstTime; import org.voltdb.utils.Pair; import com.google.protobuf.ByteString; import com.google.protobuf.RpcCallback; import edu.brown.catalog.CatalogUtil; import edu.brown.catalog.PlanFragmentIdGenerator; import edu.brown.catalog.special.CountedStatement; import edu.brown.hstore.Hstoreservice.QueryEstimate; import edu.brown.hstore.Hstoreservice.Status; import edu.brown.hstore.Hstoreservice.TransactionPrefetchResult; import edu.brown.hstore.Hstoreservice.TransactionWorkRequest; import edu.brown.hstore.Hstoreservice.TransactionWorkResponse; import edu.brown.hstore.Hstoreservice.WorkFragment; import edu.brown.hstore.Hstoreservice.WorkResult; import edu.brown.hstore.callbacks.LocalFinishCallback; import edu.brown.hstore.callbacks.LocalPrepareCallback; import edu.brown.hstore.callbacks.PartitionCountingCallback; import edu.brown.hstore.callbacks.TransactionCallback; import edu.brown.hstore.conf.HStoreConf; import edu.brown.hstore.estimators.Estimate; import edu.brown.hstore.estimators.EstimatorState; import edu.brown.hstore.estimators.EstimatorUtil; import edu.brown.hstore.estimators.TransactionEstimator; import edu.brown.hstore.internal.DeferredQueryMessage; import edu.brown.hstore.internal.FinishTxnMessage; import edu.brown.hstore.internal.InitializeRequestMessage; import edu.brown.hstore.internal.InitializeTxnMessage; import edu.brown.hstore.internal.InternalMessage; import edu.brown.hstore.internal.InternalTxnMessage; import edu.brown.hstore.internal.PotentialSnapshotWorkMessage; import edu.brown.hstore.internal.PrepareTxnMessage; import edu.brown.hstore.internal.SetDistributedTxnMessage; import edu.brown.hstore.internal.StartTxnMessage; import edu.brown.hstore.internal.UtilityWorkMessage; import edu.brown.hstore.internal.UtilityWorkMessage.TableStatsRequestMessage; import edu.brown.hstore.internal.UtilityWorkMessage.UpdateMemoryMessage; import edu.brown.hstore.internal.WorkFragmentMessage; import edu.brown.hstore.specexec.QueryTracker; import edu.brown.hstore.specexec.checkers.AbstractConflictChecker; import edu.brown.hstore.specexec.checkers.MarkovConflictChecker; import edu.brown.hstore.specexec.checkers.OptimisticConflictChecker; import edu.brown.hstore.specexec.checkers.TableConflictChecker; import edu.brown.hstore.specexec.checkers.UnsafeConflictChecker; import edu.brown.hstore.txns.AbstractTransaction; import edu.brown.hstore.txns.DependencyTracker; import edu.brown.hstore.txns.ExecutionState; import edu.brown.hstore.txns.LocalTransaction; import edu.brown.hstore.txns.MapReduceTransaction; import edu.brown.hstore.txns.PrefetchState; import edu.brown.hstore.txns.RemoteTransaction; import edu.brown.hstore.util.ArrayCache.IntArrayCache; import edu.brown.hstore.util.ArrayCache.LongArrayCache; import edu.brown.hstore.util.ParameterSetArrayCache; import edu.brown.hstore.util.TransactionCounter; import edu.brown.hstore.util.TransactionWorkRequestBuilder; import edu.brown.interfaces.Configurable; import edu.brown.interfaces.DebugContext; import edu.brown.interfaces.Shutdownable; import edu.brown.logging.LoggerUtil; import edu.brown.logging.LoggerUtil.LoggerBoolean; import edu.brown.markov.EstimationThresholds; import edu.brown.profilers.PartitionExecutorProfiler; import edu.brown.statistics.FastIntHistogram; import edu.brown.utils.ClassUtil; import edu.brown.utils.CollectionUtil; import edu.brown.utils.EventObservable; import edu.brown.utils.EventObserver; import edu.brown.utils.PartitionEstimator; import edu.brown.utils.PartitionSet; import edu.brown.utils.StringBoxUtil; import edu.brown.utils.StringUtil; /** * The main executor of transactional work in the system for a single partition. * Controls running stored procedures and manages the execution engine's running of plan * fragments. Interacts with the DTXN system to get work to do. The thread might * do other things, but this is where the good stuff happens. */ public class PartitionExecutor implements Runnable, Configurable, Shutdownable { private static final Logger LOG = Logger.getLogger(PartitionExecutor.class); private static final LoggerBoolean debug = new LoggerBoolean(LOG.isDebugEnabled()); private static final LoggerBoolean trace = new LoggerBoolean(LOG.isTraceEnabled()); static { LoggerUtil.attachObserver(LOG, debug, trace); } private static final long WORK_QUEUE_POLL_TIME = 10; // 0.5 milliseconds private static final TimeUnit WORK_QUEUE_POLL_TIMEUNIT = TimeUnit.MICROSECONDS; private static final UtilityWorkMessage UTIL_WORK_MSG = new UtilityWorkMessage(); private static final UpdateMemoryMessage STATS_WORK_MSG = new UpdateMemoryMessage(); // ---------------------------------------------------------------------------- // INTERNAL EXECUTION STATE // ---------------------------------------------------------------------------- /** * The current execution mode for this PartitionExecutor * This defines what level of speculative execution we have enabled. */ public enum ExecutionMode { /** * Disable processing all transactions until told otherwise. * We will still accept new ones */ DISABLED, /** * Reject any transaction that tries to get added */ DISABLED_REJECT, /** * No speculative execution. All transactions are committed immediately */ COMMIT_ALL, /** * Allow read-only txns to return results. */ COMMIT_READONLY, /** * Allow non-conflicting txns to return results. */ COMMIT_NONCONFLICTING, /** * All txn responses must wait until the current distributed txn is committed */ COMMIT_NONE, }; // ---------------------------------------------------------------------------- // DATA MEMBERS // ---------------------------------------------------------------------------- private Thread self; /** * If this flag is enabled, then we need to shut ourselves down and stop running txns */ private ShutdownState shutdown_state = Shutdownable.ShutdownState.INITIALIZED; private Semaphore shutdown_latch; /** * Catalog objects */ protected final CatalogContext catalogContext; protected Site site; protected int siteId; private Partition partition; private int partitionId; private final BackendTarget backend_target; private final ExecutionEngine ee; private final HsqlBackend hsql; private final DBBPool buffer_pool = new DBBPool(false, false); private final FastSerializer fs = new FastSerializer(this.buffer_pool); /** * The PartitionEstimator is what we use to figure our what partitions each * query invocation needs to be sent to at run time. * It is deterministic. */ private final PartitionEstimator p_estimator; /** * The TransactionEstimator is the runtime piece that we use to keep track of * where a locally running transaction is in its execution workflow. This allows * us to make predictions about what kind of things we expect the xact to do in * the future */ private final TransactionEstimator localTxnEstimator; private EstimationThresholds thresholds = EstimationThresholds.factory(); // Each execution site manages snapshot using a SnapshotSiteProcessor private final SnapshotSiteProcessor m_snapshotter; /** * ProcedureId -> Queue<VoltProcedure> */ private final Queue<VoltProcedure>[] procedures; // ---------------------------------------------------------------------------- // H-Store Transaction Stuff // ---------------------------------------------------------------------------- private HStoreSite hstore_site; private HStoreCoordinator hstore_coordinator; private HStoreConf hstore_conf; private TransactionInitializer txnInitializer; private TransactionQueueManager queueManager; private PartitionLockQueue lockQueue; private DependencyTracker depTracker; // ---------------------------------------------------------------------------- // Work Queue // ---------------------------------------------------------------------------- /** * This is the queue of the list of things that we need to execute. * The entries may be either InitiateTaskMessages (i.e., start a stored procedure) or * WorkFragment (i.e., execute some fragments on behalf of another transaction) * We will use this special wrapper around the PartitionExecutorQueue that can determine * whether this partition is overloaded and therefore new requests should be throttled */ private final PartitionMessageQueue work_queue; // ---------------------------------------------------------------------------- // Internal Execution State // ---------------------------------------------------------------------------- /** * The transaction id of the current transaction * This is mostly used for testing and should not be relied on from the outside. */ private Long currentTxnId = null; /** * We can only have one active "parent" transaction at a time. * We can speculatively execute other transactions out of order, but the active parent * transaction will always be the same. */ private AbstractTransaction currentTxn; /** * We can only have one active distributed transactions at a time. * The multi-partition TransactionState that is currently executing at this partition * When we get the response for these txn, we know we can commit/abort the speculatively executed transactions */ private AbstractTransaction currentDtxn = null; private String lastDtxnDebug = null; /** * The current VoltProcedure handle that is executing at this partition * This will be set to null as soon as the VoltProcedure.run() method completes */ private VoltProcedure currentVoltProc = null; /** * List of messages that are blocked waiting for the outstanding dtxn to commit */ private final List<InternalMessage> currentBlockedTxns = new ArrayList<InternalMessage>(); /** * The current ExecutionMode. This defines when transactions are allowed to execute * and whether they can return their results to the client immediately or whether they * must wait until the current_dtxn commits. */ private ExecutionMode currentExecMode = ExecutionMode.COMMIT_ALL; /** * The time in ms since epoch of the last call to ExecutionEngine.tick(...) */ private long lastTickTime = 0; /** * The time in ms since last stats update */ private long lastStatsTime = 0; /** * The last txn id that we executed (either local or remote) */ private volatile Long lastExecutedTxnId = null; /** * The last txn id that we committed */ private volatile Long lastCommittedTxnId = Long.valueOf(-1l); /** * The last undoToken that we handed out */ private long lastUndoToken = 0l; /** * The last undoToken that we committed at this partition */ private long lastCommittedUndoToken = -1l; // ---------------------------------------------------------------------------- // SPECULATIVE EXECUTION STATE // ---------------------------------------------------------------------------- private SpeculationConflictCheckerType specExecCheckerType; private AbstractConflictChecker specExecChecker; private SpecExecScheduler specExecScheduler; /** * ClientResponses from speculatively executed transactions that were executed * before or after the current distributed transaction finished at this partition and are * now waiting to be committed. */ private final LinkedList<Pair<LocalTransaction, ClientResponseImpl>> specExecBlocked; /** * If this flag is set to true, that means some txn has modified the database * in the current batch of speculatively executed txns. Any read-only specexec txn that * is executed when this flag is set to false can be returned to the client immediately. * TODO: This should really be a bitmap of table ids so that we have finer grain control */ private boolean specExecModified; /** * If set to true, then we should not check for speculative execution candidates * at run time. This needs to be set any time we change the currentDtxn */ private boolean specExecIgnoreCurrent = false; // ---------------------------------------------------------------------------- // SHARED VOLTPROCEDURE DATA MEMBERS // ---------------------------------------------------------------------------- /** * This is the execution state for a running transaction. * We have a circular queue so that we can reuse them for speculatively execute txns */ private final Queue<ExecutionState> execStates = new LinkedList<ExecutionState>(); /** * Mapping from SQLStmt batch hash codes (computed by VoltProcedure.getBatchHashCode()) to BatchPlanners * The idea is that we can quickly derived the partitions for each unique set of SQLStmt list */ private final Map<Integer, BatchPlanner> batchPlanners = new HashMap<Integer, BatchPlanner>(100); // ---------------------------------------------------------------------------- // DISTRIBUTED TRANSACTION TEMPORARY DATA COLLECTIONS // ---------------------------------------------------------------------------- /** * WorkFragments that we need to send to a remote HStoreSite for execution */ private final List<WorkFragment.Builder> tmp_remoteFragmentBuilders = new ArrayList<WorkFragment.Builder>(); /** * WorkFragments that we need to send to our own PartitionExecutor */ private final List<WorkFragment.Builder> tmp_localWorkFragmentBuilders = new ArrayList<WorkFragment.Builder>(); /** * WorkFragments that we need to send to a different PartitionExecutor that is on this same HStoreSite */ private final List<WorkFragment.Builder> tmp_localSiteFragmentBuilders = new ArrayList<WorkFragment.Builder>(); /** * Temporary space used when calling removeInternalDependencies() */ private final HashMap<Integer, List<VoltTable>> tmp_removeDependenciesMap = new HashMap<Integer, List<VoltTable>>(); /** * Remote SiteId -> TransactionWorkRequest.Builder */ private final TransactionWorkRequestBuilder tmp_transactionRequestBuilders[]; /** * PartitionId -> List<VoltTable> */ private final Map<Integer, List<VoltTable>> tmp_EEdependencies = new HashMap<Integer, List<VoltTable>>(); /** * List of serialized ParameterSets */ private final List<ByteString> tmp_serializedParams = new ArrayList<ByteString>(); /** * Histogram for the number of WorkFragments that we're going to send to partitions * in the current batch. */ private final FastIntHistogram tmp_fragmentsPerPartition = new FastIntHistogram(true); /** * Reusable int array for StmtCounters */ private final IntArrayCache tmp_stmtCounters = new IntArrayCache(10); /** * Reusable ParameterSet array cache for WorkFragments */ private final ParameterSetArrayCache tmp_fragmentParams = new ParameterSetArrayCache(5); /** * Reusable long array for fragment ids */ private final LongArrayCache tmp_fragmentIds = new LongArrayCache(10); /** * Reusable long array for fragment id offsets */ private final IntArrayCache tmp_fragmentOffsets = new IntArrayCache(10); /** * Reusable int array for output dependency ids */ private final IntArrayCache tmp_outputDepIds = new IntArrayCache(10); /** * Reusable int array for input dependency ids */ private final IntArrayCache tmp_inputDepIds = new IntArrayCache(10); /** * The following three arrays are used by utilityWork() to create transactions * for deferred queries */ private final SQLStmt[] tmp_def_stmt = new SQLStmt[1]; private final ParameterSet[] tmp_def_params = new ParameterSet[1]; private LocalTransaction tmp_def_txn; // ---------------------------------------------------------------------------- // INTERNAL CLASSES // ---------------------------------------------------------------------------- private class DonePartitionsNotification { /** * All of the partitions that a transaction is currently done with. */ private final PartitionSet donePartitions = new PartitionSet(); /** * RemoteSiteId -> Partitions that we need to notify that this txn is done with. */ private PartitionSet[] notificationsPerSite; /** * Site ids that we need to notify separately about the done partitions. */ private Collection<Integer> _sitesToNotify; public void addSiteNotification(Site remoteSite, int partitionId, boolean noQueriesInBatch) { int remoteSiteId = remoteSite.getId(); if (this.notificationsPerSite == null) { this.notificationsPerSite = new PartitionSet[catalogContext.numberOfSites]; } if (this.notificationsPerSite[remoteSiteId] == null) { this.notificationsPerSite[remoteSiteId] = new PartitionSet(); } this.notificationsPerSite[remoteSiteId].add(partitionId); if (noQueriesInBatch) { if (this._sitesToNotify == null) { this._sitesToNotify = new HashSet<Integer>(); } this._sitesToNotify.add(Integer.valueOf(remoteSiteId)); } } /** * Return the set of partitions that needed to be notified separately * for the given site id. The return value may be null. * @param remoteSiteId * @return */ public PartitionSet getNotifications(int remoteSiteId) { if (this.notificationsPerSite != null) { return (this.notificationsPerSite[remoteSiteId]); } return (null); } public boolean hasSitesToNotify() { return (this._sitesToNotify != null && this._sitesToNotify.isEmpty() == false); } } // ---------------------------------------------------------------------------- // PROFILING OBJECTS // ---------------------------------------------------------------------------- private final PartitionExecutorProfiler profiler = new PartitionExecutorProfiler(); // ---------------------------------------------------------------------------- // WORK REQUEST CALLBACK // ---------------------------------------------------------------------------- /** * This will be invoked for each TransactionWorkResponse that comes back from * the remote HStoreSites. Note that we don't need to do any counting as to whether * a transaction has gotten back all of the responses that it expected. That logic is down * below in waitForResponses() */ private final RpcCallback<TransactionWorkResponse> request_work_callback = new RpcCallback<TransactionWorkResponse>() { @Override public void run(TransactionWorkResponse msg) { Long txn_id = msg.getTransactionId(); LocalTransaction ts = hstore_site.getTransaction(txn_id); // We can ignore anything that comes in for a transaction that we don't know about if (ts == null) { if (debug.val) LOG.debug("No transaction state exists for txn #" + txn_id); return; } if (debug.val) LOG.debug(String.format("Processing TransactionWorkResponse for %s with %d results%s", ts, msg.getResultsCount(), (trace.val ? "\n"+msg : ""))); for (int i = 0, cnt = msg.getResultsCount(); i < cnt; i++) { WorkResult result = msg.getResults(i); if (debug.val) LOG.debug(String.format("Got %s from partition %d for %s", result.getClass().getSimpleName(), result.getPartitionId(), ts)); PartitionExecutor.this.processWorkResult(ts, result); } // FOR if (hstore_conf.site.specexec_enable) { specExecScheduler.interruptSearch(UTIL_WORK_MSG); } } }; // END CLASS // ---------------------------------------------------------------------------- // SYSPROC STUFF // ---------------------------------------------------------------------------- // Associate the system procedure planfragment ids to wrappers. // Planfragments are registered when the procedure wrapper is init()'d. private final Map<Long, VoltSystemProcedure> m_registeredSysProcPlanFragments = new HashMap<Long, VoltSystemProcedure>(); public void registerPlanFragment(final long pfId, final VoltSystemProcedure proc) { synchronized (m_registeredSysProcPlanFragments) { if (!m_registeredSysProcPlanFragments.containsKey(pfId)) { assert(m_registeredSysProcPlanFragments.containsKey(pfId) == false) : "Trying to register the same sysproc more than once: " + pfId; m_registeredSysProcPlanFragments.put(pfId, proc); if (trace.val) LOG.trace(String.format("Registered %s sysproc handle at partition %d for FragmentId #%d", VoltSystemProcedure.procCallName(proc.getClass()), partitionId, pfId)); } } // SYNCH } /** * SystemProcedures are "friends" with PartitionExecutors and granted * access to internal state via m_systemProcedureContext. * access to internal state via m_systemProcedureContext. */ public interface SystemProcedureExecutionContext { public Catalog getCatalog(); public Database getDatabase(); public Cluster getCluster(); public Site getSite(); public Host getHost(); public ExecutionEngine getExecutionEngine(); public long getLastCommittedTxnId(); public PartitionExecutor getPartitionExecutor(); public HStoreSite getHStoreSite(); public Long getCurrentTxnId(); } protected class SystemProcedureContext implements SystemProcedureExecutionContext { public Catalog getCatalog() { return catalogContext.catalog; } public Database getDatabase() { return catalogContext.database; } public Cluster getCluster() { return catalogContext.cluster; } public Site getSite() { return site; } public Host getHost() { return site.getHost(); } public ExecutionEngine getExecutionEngine() { return ee; } public long getLastCommittedTxnId() { return lastCommittedTxnId; } public PartitionExecutor getPartitionExecutor() { return PartitionExecutor.this; } public HStoreSite getHStoreSite() { return hstore_site; } public Long getCurrentTxnId() { return PartitionExecutor.this.currentTxnId; } } private final SystemProcedureContext m_systemProcedureContext = new SystemProcedureContext(); // ---------------------------------------------------------------------------- // INITIALIZATION // ---------------------------------------------------------------------------- /** * Dummy constructor... */ protected PartitionExecutor() { this.catalogContext = null; this.work_queue = null; this.ee = null; this.hsql = null; this.specExecChecker = null; this.specExecScheduler = null; this.specExecBlocked = null; this.p_estimator = null; this.localTxnEstimator = null; this.m_snapshotter = null; this.thresholds = null; this.site = null; this.backend_target = BackendTarget.HSQLDB_BACKEND; this.siteId = 0; this.partitionId = 0; this.procedures = null; this.tmp_transactionRequestBuilders = null; } /** * Initialize the StoredProcedure runner and EE for this Site. * @param partitionId * @param t_estimator * @param coordinator * @param siteManager * @param serializedCatalog A list of catalog commands, separated by * newlines that, when executed, reconstruct the complete m_catalog. */ public PartitionExecutor(final int partitionId, final CatalogContext catalogContext, final BackendTarget target, final PartitionEstimator p_estimator, final TransactionEstimator t_estimator) { this.hstore_conf = HStoreConf.singleton(); this.work_queue = new PartitionMessageQueue(); this.backend_target = target; this.catalogContext = catalogContext; this.partition = catalogContext.getPartitionById(partitionId); assert(this.partition != null) : "Invalid Partition #" + partitionId; this.partitionId = this.partition.getId(); this.site = this.partition.getParent(); assert(site != null) : "Unable to get Site for Partition #" + partitionId; this.siteId = this.site.getId(); this.lastUndoToken = this.partitionId * 1000000; this.p_estimator = p_estimator; this.localTxnEstimator = t_estimator; // Speculative Execution this.specExecBlocked = new LinkedList<Pair<LocalTransaction,ClientResponseImpl>>(); this.specExecModified = false; // VoltProcedure Queues @SuppressWarnings("unchecked") Queue<VoltProcedure> voltProcQueues[] = new Queue[catalogContext.procedures.size()+1]; this.procedures = voltProcQueues; // An execution site can be backed by HSQLDB, by volt's EE accessed // via JNI or by volt's EE accessed via IPC. When backed by HSQLDB, // the VoltProcedure interface invokes HSQLDB directly through its // hsql Backend member variable. The real volt backend is encapsulated // by the ExecutionEngine class. This class has implementations for both // JNI and IPC - and selects the desired implementation based on the // value of this.eeBackend. HsqlBackend hsqlTemp = null; ExecutionEngine eeTemp = null; SnapshotSiteProcessor snapshotter = null; try { if (trace.val) LOG.trace("Creating EE wrapper with target type '" + target + "'"); if (this.backend_target == BackendTarget.HSQLDB_BACKEND) { hsqlTemp = new HsqlBackend(partitionId); final String hexDDL = catalogContext.database.getSchema(); final String ddl = Encoder.hexDecodeToString(hexDDL); final String[] commands = ddl.split(";"); for (String command : commands) { if (command.length() == 0) { continue; } hsqlTemp.runDDL(command); } eeTemp = new MockExecutionEngine(); } else if (target == BackendTarget.NATIVE_EE_JNI) { org.voltdb.EELibraryLoader.loadExecutionEngineLibrary(true); // set up the EE eeTemp = new ExecutionEngineJNI(this, catalogContext.cluster.getRelativeIndex(), this.getSiteId(), this.getPartitionId(), this.site.getHost().getId(), "localhost"); // Initialize Anti-Cache if (hstore_conf.site.anticache_enable) { File acFile = AntiCacheManager.getDatabaseDir(this); long blockSize = hstore_conf.site.anticache_block_size; eeTemp.antiCacheInitialize(acFile, blockSize); } eeTemp.loadCatalog(catalogContext.catalog.serialize()); this.lastTickTime = System.currentTimeMillis(); eeTemp.tick(this.lastTickTime, 0); snapshotter = new SnapshotSiteProcessor(new Runnable() { final PotentialSnapshotWorkMessage msg = new PotentialSnapshotWorkMessage(); @Override public void run() { PartitionExecutor.this.work_queue.add(this.msg); } }); } else { // set up the EE over IPC eeTemp = new ExecutionEngineIPC(this, catalogContext.cluster.getRelativeIndex(), this.getSiteId(), this.getPartitionId(), this.site.getHost().getId(), "localhost", target); eeTemp.loadCatalog(catalogContext.catalog.serialize()); this.lastTickTime = System.currentTimeMillis(); eeTemp.tick(this.lastTickTime, 0); } } // just print error info an bail if we run into an error here catch (final Exception ex) { throw new ServerFaultException("Failed to initialize PartitionExecutor", ex); } this.ee = eeTemp; this.hsql = hsqlTemp; m_snapshotter = snapshotter; assert(this.ee != null); assert(!(this.ee == null && this.hsql == null)) : "Both execution engine objects are empty. This should never happen"; // Initialize temporary data structures int num_sites = this.catalogContext.numberOfSites; this.tmp_transactionRequestBuilders = new TransactionWorkRequestBuilder[num_sites]; } /** * Link this PartitionExecutor with its parent HStoreSite * This will initialize the references the various components shared among the PartitionExecutors * @param hstore_site */ public void initHStoreSite(HStoreSite hstore_site) { if (trace.val) LOG.trace(String.format("Initializing HStoreSite components at partition %d", this.partitionId)); assert(this.hstore_site == null) : String.format("Trying to initialize HStoreSite for PartitionExecutor #%d twice!", this.partitionId); this.hstore_site = hstore_site; this.depTracker = hstore_site.getDependencyTracker(this.partitionId); this.thresholds = hstore_site.getThresholds(); this.txnInitializer = hstore_site.getTransactionInitializer(); this.queueManager = hstore_site.getTransactionQueueManager(); this.lockQueue = this.queueManager.getLockQueue(this.partitionId); if (hstore_conf.site.exec_deferrable_queries) { tmp_def_txn = new LocalTransaction(hstore_site); } // ------------------------------- // BENCHMARK START NOTIFICATIONS // ------------------------------- // Poke ourselves to update the partition stats when the first // non-sysproc procedure shows up. I forget why we need to do this... EventObservable<HStoreSite> observable = this.hstore_site.getStartWorkloadObservable(); observable.addObserver(new EventObserver<HStoreSite>() { @Override public void update(EventObservable<HStoreSite> o, HStoreSite arg) { queueUtilityWork(STATS_WORK_MSG); } }); // Reset our profiling information when we get the first non-sysproc this.profiler.resetOnEventObservable(observable); // Initialize speculative execution scheduler this.initSpecExecScheduler(); } /** * Initialize this PartitionExecutor' speculative execution scheduler */ private void initSpecExecScheduler() { assert(this.specExecScheduler == null); assert(this.hstore_site != null); this.specExecCheckerType = SpeculationConflictCheckerType.get(hstore_conf.site.specexec_scheduler_checker); switch (this.specExecCheckerType) { // ------------------------------- // ROW-LEVEL // ------------------------------- case MARKOV: // The MarkovConflictChecker is thread-safe, so we all of the partitions // at this site can reuse the same one. this.specExecChecker = MarkovConflictChecker.singleton(this.catalogContext, this.thresholds); break; // ------------------------------- // TABLE-LEVEL // ------------------------------- case TABLE: this.specExecChecker = new TableConflictChecker(this.catalogContext); break; // ------------------------------- // UNSAFE // NOTE: You probably don't want to use this! // ------------------------------- case UNSAFE: this.specExecChecker = new UnsafeConflictChecker(this.catalogContext, hstore_conf.site.specexec_unsafe_limit); LOG.warn(String.format("Using %s in the %s for partition %d. This is a bad idea!", this.specExecChecker.getClass().getSimpleName(), this.getClass().getSimpleName(), this.partitionId)); break; // ------------------------------- // OPTIMISTIC // ------------------------------- case OPTIMISTIC: this.specExecChecker = new OptimisticConflictChecker(this.catalogContext, this.ee); break; // BUSTED! default: { String msg = String.format("Invalid %s '%s'", SpeculationConflictCheckerType.class.getSimpleName(), hstore_conf.site.specexec_scheduler_checker); throw new RuntimeException(msg); } } // SWITCH SpecExecSchedulerPolicyType policy = SpecExecSchedulerPolicyType.get(hstore_conf.site.specexec_scheduler_policy); assert(policy != null) : String.format("Invalid %s '%s'", SpecExecSchedulerPolicyType.class.getSimpleName(), hstore_conf.site.specexec_scheduler_policy); assert(this.lockQueue.getPartitionId() == this.partitionId); this.specExecScheduler = new SpecExecScheduler(this.specExecChecker, this.partitionId, this.lockQueue, policy, hstore_conf.site.specexec_scheduler_window); this.specExecChecker.setEstimationThresholds(this.thresholds); this.specExecScheduler.updateConf(hstore_conf, null); if (debug.val && hstore_conf.site.specexec_enable) LOG.debug(String.format("Initialized %s for partition %d [checker=%s, policy=%s]", this.specExecScheduler.getClass().getSimpleName(), this.partitionId, this.specExecChecker.getClass().getSimpleName(), policy)); } private ExecutionState initExecutionState() { ExecutionState state = this.execStates.poll(); if (state == null) { state = new ExecutionState(this); } return (state); } @Override public void updateConf(HStoreConf hstore_conf, String[] changed) { if (this.specExecScheduler != null) { this.specExecScheduler.updateConf(hstore_conf, changed); } } // ---------------------------------------------------------------------------- // MAIN EXECUTION LOOP // ---------------------------------------------------------------------------- /** * Primary run method that is invoked a single time when the thread is started. * Has the opportunity to do startup config. */ @Override public final void run() { if (this.hstore_site == null) { String msg = String.format("Trying to start %s for partition %d before its HStoreSite was initialized", this.getClass().getSimpleName(), this.partitionId); throw new RuntimeException(msg); } else if (this.self != null) { String msg = String.format("Trying to restart %s for partition %d after it was already running", this.getClass().getSimpleName(), this.partitionId); throw new RuntimeException(msg); } // Initialize all of our VoltProcedures handles // This needs to be done here so that the Workload trace handles can be // set up properly this.initializeVoltProcedures(); this.self = Thread.currentThread(); this.self.setName(HStoreThreadManager.getThreadName(this.hstore_site, this.partitionId)); this.hstore_coordinator = hstore_site.getCoordinator(); this.hstore_site.getThreadManager().registerEEThread(partition); this.shutdown_latch = new Semaphore(0); this.shutdown_state = ShutdownState.STARTED; if (hstore_conf.site.exec_profiling) profiler.start_time = System.currentTimeMillis(); assert(this.hstore_site != null); assert(this.hstore_coordinator != null); assert(this.specExecScheduler != null); assert(this.queueManager != null); // *********************************** DEBUG *********************************** if (hstore_conf.site.exec_validate_work) { LOG.warn("Enabled Distributed Transaction Validation Checker"); } // *********************************** DEBUG *********************************** // Things that we will need in the loop below InternalMessage nextWork = null; AbstractTransaction nextTxn = null; if (debug.val) LOG.debug("Starting PartitionExecutor run loop..."); try { while (this.shutdown_state == ShutdownState.STARTED) { this.currentTxnId = null; nextTxn = null; nextWork = null; // This is the starting state of the PartitionExecutor. // At this point here we currently don't have a txn to execute nor // are we involved in a distributed txn running at another partition. // So we need to go our PartitionLockQueue and get back the next // txn that will have our lock. if (this.currentDtxn == null) { this.tick(); if (hstore_conf.site.exec_profiling) profiler.poll_time.start(); try { nextTxn = this.queueManager.checkLockQueue(this.partitionId); // NON-BLOCKING } finally { if (hstore_conf.site.exec_profiling) profiler.poll_time.stopIfStarted(); } // If we get something back here, then it should become our current transaction. if (nextTxn != null) { // If it's a single-partition txn, then we can return the StartTxnMessage // so that we can fire it off right away. if (nextTxn.isPredictSinglePartition()) { LocalTransaction localTxn = (LocalTransaction)nextTxn; nextWork = localTxn.getStartTxnMessage(); if (hstore_conf.site.txn_profiling && localTxn.profiler != null) localTxn.profiler.startQueueExec(); } // If it's as distribued txn, then we'll want to just set it as our // current dtxn at this partition and then keep checking the queue // for more work. else { this.setCurrentDtxn(nextTxn); } } } // ------------------------------- // Poll Work Queue // ------------------------------- // Check if we have anything to do right now if (nextWork == null) { if (hstore_conf.site.exec_profiling) profiler.idle_time.start(); try { // If we're allowed to speculatively execute txns, then we don't want to have // to wait to see if anything will show up in our work queue. if (hstore_conf.site.specexec_enable && this.lockQueue.approximateIsEmpty() == false) { nextWork = this.work_queue.poll(); } else { nextWork = this.work_queue.poll(WORK_QUEUE_POLL_TIME, WORK_QUEUE_POLL_TIMEUNIT); } } catch (InterruptedException ex) { continue; } finally { if (hstore_conf.site.exec_profiling) profiler.idle_time.stopIfStarted(); } } // ------------------------------- // Process Work // ------------------------------- if (nextWork != null) { if (trace.val) LOG.trace("Next Work: " + nextWork); if (hstore_conf.site.exec_profiling) { profiler.numMessages.put(nextWork.getClass().getSimpleName()); profiler.exec_time.start(); if (this.currentDtxn != null) profiler.sp2_time.stopIfStarted(); } try { this.processInternalMessage(nextWork); } finally { if (hstore_conf.site.exec_profiling) { profiler.exec_time.stopIfStarted(); if (this.currentDtxn != null) profiler.sp2_time.start(); } } if (this.currentTxnId != null) this.lastExecutedTxnId = this.currentTxnId; } // Check if we have any utility work to do while we wait else if (hstore_conf.site.specexec_enable) { // if (trace.val) // LOG.trace(String.format("The %s for partition %s empty. Checking for utility work...", // this.work_queue.getClass().getSimpleName(), this.partitionId)); if (this.utilityWork()) { nextWork = UTIL_WORK_MSG; } } } // WHILE } catch (final Throwable ex) { if (this.isShuttingDown() == false) { // ex.printStackTrace(); LOG.fatal(String.format("Unexpected error at partition %d [current=%s, lastDtxn=%s]", this.partitionId, this.currentTxn, this.lastDtxnDebug), ex); if (this.currentTxn != null) LOG.fatal("TransactionState Dump:\n" + this.currentTxn.debug()); } this.shutdown_latch.release(); this.hstore_coordinator.shutdownClusterBlocking(ex); } finally { if (debug.val) { String txnDebug = ""; if (this.currentTxn != null && this.currentTxn.getBasePartition() == this.partitionId) { txnDebug = " while a txn is still running\n" + this.currentTxn.debug(); } LOG.warn(String.format("PartitionExecutor %d is stopping%s%s", this.partitionId, (this.currentTxnId != null ? " In-Flight Txn: #" + this.currentTxnId : ""), txnDebug)); } // Release the shutdown latch in case anybody waiting for us this.shutdown_latch.release(); } } /** * Special function that allows us to do some utility work while * we are waiting for a response or something real to do. * Note: this tracks how long the system spends doing utility work. It would * be interesting to have the system report on this before it shuts down. * @return true if there is more utility work that can be done */ private boolean utilityWork() { if (hstore_conf.site.exec_profiling) this.profiler.util_time.start(); // ------------------------------- // Poll Lock Queue // ------------------------------- LocalTransaction specTxn = null; InternalMessage work = null; // Check whether there is something we can speculatively execute right now if (this.specExecIgnoreCurrent == false && this.lockQueue.approximateIsEmpty() == false) { // if (trace.val) // LOG.trace(String.format("Checking %s for something to do at partition %d while %s", // this.specExecScheduler.getClass().getSimpleName(), // this.partitionId, // (this.currentDtxn != null ? "blocked on " + this.currentDtxn : "idle"))); assert(hstore_conf.site.specexec_enable) : "Trying to schedule speculative txn even though it is disabled"; SpeculationType specType = this.calculateSpeculationType(); if (hstore_conf.site.exec_profiling) this.profiler.conflicts_time.start(); try { specTxn = this.specExecScheduler.next(this.currentDtxn, specType); } finally { if (hstore_conf.site.exec_profiling) this.profiler.conflicts_time.stopIfStarted(); } // Because we don't have fine-grained undo support, we are just going // keep all of our speculative execution txn results around if (specTxn != null) { // TODO: What we really want to do is check to see whether we have anything // in our work queue before we go ahead and fire off this txn if (debug.val) { if (this.work_queue.isEmpty() == false) { LOG.warn(String.format("About to speculatively execute %s on partition %d but there " + "are %d messages in the work queue\n%s", specTxn, this.partitionId, this.work_queue.size(), CollectionUtil.first(this.work_queue))); } LOG.debug(String.format("Utility Work found speculative txn to execute on " + "partition %d [%s, specType=%s]", this.partitionId, specTxn, specType)); // IMPORTANT: We need to make sure that we remove this transaction for the lock queue // before we execute it so that we don't try to run it again. // We have to do this now because otherwise we may get the same transaction again assert(this.lockQueue.contains(specTxn.getTransactionId()) == false) : String.format("Failed to remove speculative %s before executing", specTxn); } assert(specTxn.getBasePartition() == this.partitionId) : String.format("Trying to speculatively execute %s at partition %d but its base partition is %d\n%s", specTxn, this.partitionId, specTxn.getBasePartition(), specTxn.debug()); assert(specTxn.isMarkExecuted() == false) : String.format("Trying to speculatively execute %s at partition %d but was already executed\n%s", specTxn, this.partitionId, specTxn.getBasePartition(), specTxn.debug()); assert(specTxn.isSpeculative() == false) : String.format("Trying to speculatively execute %s at partition %d but was already speculative\n%s", specTxn, this.partitionId, specTxn.getBasePartition(), specTxn.debug()); // It's also important that we cancel this txn's init queue callback, otherwise // it will never get cleaned up properly. This is necessary in order to support // sending out client results *before* the dtxn finishes specTxn.getInitCallback().cancel(); // Ok now that that's out of the way, let's run this baby... specTxn.setSpeculative(specType); if (hstore_conf.site.exec_profiling) profiler.specexec_time.start(); try { this.executeTransaction(specTxn); } finally { if (hstore_conf.site.exec_profiling) profiler.specexec_time.stopIfStarted(); } } // else if (trace.val) { // LOG.trace(String.format("%s - No speculative execution candidates found at partition %d [queueSize=%d]", // this.currentDtxn, this.partitionId, this.queueManager.getLockQueue(this.partitionId).size())); // } } // else if (trace.val && this.currentDtxn != null) { // LOG.trace(String.format("%s - Skipping check for speculative execution txns at partition %d " + // "[lockQueue=%d, specExecIgnoreCurrent=%s]", // this.currentDtxn, this.partitionId, this.lockQueue.size(), this.specExecIgnoreCurrent)); // } if (hstore_conf.site.exec_profiling) this.profiler.util_time.stopIfStarted(); return (specTxn != null || work != null); } // ---------------------------------------------------------------------------- // MESSAGE PROCESSING METHODS // ---------------------------------------------------------------------------- /** * Process an InternalMessage * @param work */ private final void processInternalMessage(InternalMessage work) { // ------------------------------- // TRANSACTIONAL WORK // ------------------------------- if (work instanceof InternalTxnMessage) { this.processInternalTxnMessage((InternalTxnMessage)work); } // ------------------------------- // UTILITY WORK // ------------------------------- else if (work instanceof UtilityWorkMessage) { // UPDATE MEMORY STATS if (work instanceof UpdateMemoryMessage) { this.updateMemoryStats(EstTime.currentTimeMillis()); } // TABLE STATS REQUEST else if (work instanceof TableStatsRequestMessage) { TableStatsRequestMessage stats_work = (TableStatsRequestMessage)work; VoltTable results[] = this.ee.getStats(SysProcSelector.TABLE, stats_work.getLocators(), false, EstTime.currentTimeMillis()); assert(results.length == 1); stats_work.getObservable().notifyObservers(results[0]); } else { // IGNORE } } // ------------------------------- // TRANSACTION INITIALIZATION // ------------------------------- else if (work instanceof InitializeRequestMessage) { this.processInitializeRequestMessage((InitializeRequestMessage)work); } // ------------------------------- // DEFERRED QUERIES // ------------------------------- else if (work instanceof DeferredQueryMessage) { DeferredQueryMessage def_work = (DeferredQueryMessage)work; // Set the txnId in our handle to be what the original txn was that // deferred this query. tmp_def_stmt[0] = def_work.getStmt(); tmp_def_params[0] = def_work.getParams(); tmp_def_txn.init(def_work.getTxnId(), -1, // We don't really need the clientHandle EstTime.currentTimeMillis(), this.partitionId, catalogContext.getPartitionSetSingleton(this.partitionId), false, false, tmp_def_stmt[0].getProcedure(), def_work.getParams(), null // We don't need the client callback ); this.executeSQLStmtBatch(tmp_def_txn, 1, tmp_def_stmt, tmp_def_params, false, false); } // ------------------------------- // SNAPSHOT WORK // ------------------------------- else if (work instanceof PotentialSnapshotWorkMessage) { m_snapshotter.doSnapshotWork(ee); } // ------------------------------- // BAD MOJO! // ------------------------------- else { String msg = "Unexpected work message in queue: " + work; throw new ServerFaultException(msg, this.currentTxnId); } } /** * Process an InitializeRequestMessage * @param work */ protected void processInitializeRequestMessage(InitializeRequestMessage work) { LocalTransaction ts = this.txnInitializer.createLocalTransaction( work.getSerializedRequest(), work.getInitiateTime(), work.getClientHandle(), this.partitionId, work.getProcedure(), work.getProcParams(), work.getClientCallback()); // ------------------------------- // SINGLE-PARTITION TRANSACTION // ------------------------------- if (ts.isPredictSinglePartition() && ts.isMapReduce() == false && ts.isSysProc() == false) { if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startQueueExec(); // If we are in the middle of a distributed txn at this partition, then we can't // just go and fire off this txn. We actually need to use our SpecExecScheduler to // decide whether it is safe to speculatively execute this txn. But the problem is that // the SpecExecScheduler is only examining the work queue when utilityWork() is called // But it will never be called at this point because if we add this txn back to the queue // it will get picked up right away. if (this.currentDtxn != null) { this.blockTransaction(ts); } else { this.executeTransaction(ts); } } // ------------------------------- // DISTRIBUTED TRANSACTION // ------------------------------- else { if (debug.val) LOG.debug(ts + " - Queuing up txn at local HStoreSite for further processing"); this.hstore_site.transactionQueue(ts); } } /** * Process an InternalTxnMessage * @param work */ private void processInternalTxnMessage(InternalTxnMessage work) { AbstractTransaction ts = work.getTransaction(); this.currentTxn = ts; this.currentTxnId = ts.getTransactionId(); // If this transaction has already been aborted and they are trying to give us // something that isn't a FinishTaskMessage, then we won't bother processing it if (ts.isAborted() && (work instanceof FinishTxnMessage) == false) { if (debug.val) LOG.debug(String.format("%s - Cannot process %s on partition %d because txn was marked as aborted", ts, work.getClass().getSimpleName(), this.partitionId)); return; } if (debug.val) LOG.debug(String.format("Processing %s at partition %d", work, this.partitionId)); // ------------------------------- // Start Transaction // ------------------------------- if (work instanceof StartTxnMessage) { if (hstore_conf.site.specexec_enable && ts.isPredictSinglePartition()) this.specExecScheduler.reset(); if (hstore_conf.site.exec_profiling) profiler.txn_time.start(); try { this.executeTransaction((LocalTransaction)ts); } finally { if (hstore_conf.site.exec_profiling) profiler.txn_time.stopIfStarted(); } } // ------------------------------- // Execute Query Plan Fragments // ------------------------------- else if (work instanceof WorkFragmentMessage) { WorkFragment fragment = ((WorkFragmentMessage)work).getFragment(); assert(fragment != null); // HACK HACK HACK if (ts.isInitialized() == false) { LOG.warn(String.format("Skipping %s at partition %d for unitialized txn", work.getClass().getSimpleName(), this.partitionId)); return; } // Get the ParameterSet array for this WorkFragment // It can either be attached to the AbstractTransaction handle if it came // over the wire directly from the txn's base partition, or it can be attached // as for prefetch WorkFragments ParameterSet parameters[] = null; if (fragment.getPrefetch()) { parameters = ts.getPrefetchParameterSets(); ts.markExecPrefetchQuery(this.partitionId); if (trace.val && ts.isSysProc() == false) LOG.trace(ts + " - Prefetch Parameters:\n" + StringUtil.join("\n", parameters)); } else { parameters = ts.getAttachedParameterSets(); if (trace.val && ts.isSysProc() == false) LOG.trace(ts + " - Attached Parameters:\n" + StringUtil.join("\n", parameters)); } // At this point we know that we are either the current dtxn or the current dtxn is null // We will allow any read-only transaction to commit if // (1) The WorkFragment for the remote txn is read-only // (2) This txn has always been read-only up to this point at this partition ExecutionMode newMode = null; if (hstore_conf.site.specexec_enable) { if (fragment.getReadOnly() && ts.isExecReadOnly(this.partitionId)) { newMode = ExecutionMode.COMMIT_READONLY ; } else { newMode = ExecutionMode.COMMIT_NONE; } } else { newMode = ExecutionMode.DISABLED; } // There is no current DTXN, so that means its us! if (this.currentDtxn == null) { this.setCurrentDtxn(ts); if (debug.val) LOG.debug(String.format("Marking %s as current DTXN on partition %d [nextMode=%s]", ts, this.partitionId, newMode)); } // There is a current DTXN but it's not us! // That means we need to block ourselves until it finishes else if (this.currentDtxn != ts) { if (debug.val) LOG.debug(String.format("%s - Blocking on partition %d until current Dtxn %s finishes", ts, this.partitionId, this.currentDtxn)); this.blockTransaction(work); return; } assert(this.currentDtxn == ts) : String.format("Trying to execute a second Dtxn %s before the current one has finished [current=%s]", ts, this.currentDtxn); this.setExecutionMode(ts, newMode); this.processWorkFragment(ts, fragment, parameters); } // ------------------------------- // Finish Transaction // ------------------------------- else if (work instanceof FinishTxnMessage) { FinishTxnMessage ftask = (FinishTxnMessage)work; this.finishDistributedTransaction(ftask.getTransaction(), ftask.getStatus()); } // ------------------------------- // Prepare Transaction // ------------------------------- else if (work instanceof PrepareTxnMessage) { PrepareTxnMessage ftask = (PrepareTxnMessage)work; // assert(this.currentDtxn.equals(ftask.getTransaction())) : // String.format("The current dtxn %s does not match %s given in the %s", // this.currentTxn, ftask.getTransaction(), ftask.getClass().getSimpleName()); this.prepareTransaction(ftask.getTransaction()); } // ------------------------------- // Set Distributed Transaction // ------------------------------- else if (work instanceof SetDistributedTxnMessage) { if (this.currentDtxn != null) { this.blockTransaction(work); } else { this.setCurrentDtxn(((SetDistributedTxnMessage)work).getTransaction()); } } // ------------------------------- // Add Transaction to Lock Queue // ------------------------------- else if (work instanceof InitializeTxnMessage) { this.queueManager.lockQueueInsert(ts, this.partitionId, ts.getInitCallback()); } } // ---------------------------------------------------------------------------- // DATA MEMBER METHODS // ---------------------------------------------------------------------------- public final ExecutionEngine getExecutionEngine() { return (this.ee); } public final Thread getExecutionThread() { return (this.self); } public final HsqlBackend getHsqlBackend() { return (this.hsql); } public final PartitionEstimator getPartitionEstimator() { return (this.p_estimator); } public final TransactionEstimator getTransactionEstimator() { return (this.localTxnEstimator); } public final BackendTarget getBackendTarget() { return (this.backend_target); } public final HStoreSite getHStoreSite() { return (this.hstore_site); } public final HStoreConf getHStoreConf() { return (this.hstore_conf); } public final CatalogContext getCatalogContext() { return (this.catalogContext); } public final int getSiteId() { return (this.siteId); } public final Partition getPartition() { return (this.partition); } public final int getPartitionId() { return (this.partitionId); } public final DependencyTracker getDependencyTracker() { return (this.depTracker); } public final PartitionExecutorProfiler getProfiler() { return profiler; } // ---------------------------------------------------------------------------- // VOLT PROCEDURE HELPER METHODS // ---------------------------------------------------------------------------- protected void initializeVoltProcedures() { // load up all the stored procedures for (final Procedure catalog_proc : catalogContext.procedures) { VoltProcedure volt_proc = this.initializeVoltProcedure(catalog_proc); Queue<VoltProcedure> queue = new LinkedList<VoltProcedure>(); queue.add(volt_proc); this.procedures[catalog_proc.getId()] = queue; } // FOR } @SuppressWarnings("unchecked") protected VoltProcedure initializeVoltProcedure(Procedure catalog_proc) { VoltProcedure volt_proc = null; if (catalog_proc.getHasjava()) { // Only try to load the Java class file for the SP if it has one Class<? extends VoltProcedure> p_class = null; final String className = catalog_proc.getClassname(); try { p_class = (Class<? extends VoltProcedure>)Class.forName(className); volt_proc = (VoltProcedure)p_class.newInstance(); } catch (Exception e) { throw new ServerFaultException("Failed to created VoltProcedure instance for " + catalog_proc.getName() , e); } } else { volt_proc = new VoltProcedure.StmtProcedure(); } volt_proc.globalInit(PartitionExecutor.this, catalog_proc, this.backend_target, this.hsql, this.p_estimator); return (volt_proc); } /** * Returns a new VoltProcedure instance for a given stored procedure name * <B>Note:</B> You will get a new VoltProcedure for each invocation * @param proc_name * @return */ protected VoltProcedure getVoltProcedure(int proc_id) { VoltProcedure voltProc = this.procedures[proc_id].poll(); if (voltProc == null) { Procedure catalog_proc = catalogContext.getProcedureById(proc_id); voltProc = this.initializeVoltProcedure(catalog_proc); } return (voltProc); } /** * Return the given VoltProcedure back into the queue to be re-used again * @param voltProc */ protected void finishVoltProcedure(VoltProcedure voltProc) { voltProc.finish(); this.procedures[voltProc.getProcedureId()].offer(voltProc); } // ---------------------------------------------------------------------------- // UTILITY METHODS // ---------------------------------------------------------------------------- private void tick() { // invoke native ee tick if at least one second has passed final long time = EstTime.currentTimeMillis(); long elapsed = time - this.lastTickTime; if (elapsed >= 1000) { if ((this.lastTickTime != 0) && (this.ee != null)) { this.ee.tick(time, this.lastCommittedTxnId); // do other periodic work if (m_snapshotter != null) m_snapshotter.doSnapshotWork(this.ee); if ((time - this.lastStatsTime) >= 20000) { this.updateMemoryStats(time); } } this.lastTickTime = time; } } private void updateMemoryStats(long time) { if (trace.val) LOG.trace("Updating memory stats for partition " + this.partitionId); Collection<Table> tables = this.catalogContext.database.getTables(); int[] tableIds = new int[tables.size()]; int i = 0; for (Table table : tables) { tableIds[i++] = table.getRelativeIndex(); } // data to aggregate long tupleCount = 0; @SuppressWarnings("unused") long tupleAccessCount = 0; int tupleDataMem = 0; int tupleAllocatedMem = 0; int indexMem = 0; int stringMem = 0; // ACTIVE long tuplesEvicted = 0; long blocksEvicted = 0; long bytesEvicted = 0; // GLOBAL WRITTEN long tuplesWritten = 0; long blocksWritten = 0; long bytesWritten = 0; // GLOBAL READ long tuplesRead = 0; long blocksRead = 0; long bytesRead = 0; // update table stats VoltTable[] s1 = null; try { s1 = this.ee.getStats(SysProcSelector.TABLE, tableIds, false, time); } catch (RuntimeException ex) { LOG.warn("Unexpected error when trying to retrieve EE stats for partition " + this.partitionId, ex); } if (s1 != null) { VoltTable stats = s1[0]; assert(stats != null); // rollup the table memory stats for this site while (stats.advanceRow()) { int idx = 7; tupleCount += stats.getLong(idx++); tupleAccessCount += stats.getLong(idx++); tupleAllocatedMem += (int) stats.getLong(idx++); tupleDataMem += (int) stats.getLong(idx++); stringMem += (int) stats.getLong(idx++); // ACTIVE if (hstore_conf.site.anticache_enable) { tuplesEvicted += (long) stats.getLong(idx++); blocksEvicted += (long) stats.getLong(idx++); bytesEvicted += (long) stats.getLong(idx++); // GLOBAL WRITTEN tuplesWritten += (long) stats.getLong(idx++); blocksWritten += (long) stats.getLong(idx++); bytesWritten += (long) stats.getLong(idx++); // GLOBAL READ tuplesRead += (long) stats.getLong(idx++); blocksRead += (long) stats.getLong(idx++); bytesRead += (long) stats.getLong(idx++); } } stats.resetRowPosition(); } // update index stats // final VoltTable[] s2 = ee.getStats(SysProcSelector.INDEX, tableIds, false, time); // if ((s2 != null) && (s2.length > 0)) { // VoltTable stats = s2[0]; // assert(stats != null); // LOG.info("INDEX:\n" + VoltTableUtil.format(stats)); // // // rollup the index memory stats for this site //// while (stats.advanceRow()) { //// indexMem += stats.getLong(10); //// } // stats.resetRowPosition(); // // // m_indexStats.setStatsTable(stats); // } // update the rolled up memory statistics MemoryStats memoryStats = hstore_site.getMemoryStatsSource(); memoryStats.eeUpdateMemStats(this.siteId, tupleCount, tupleDataMem, tupleAllocatedMem, indexMem, stringMem, 0, // FIXME // ACTIVE tuplesEvicted, blocksEvicted, bytesEvicted, // GLOBAL WRITTEN tuplesWritten, blocksWritten, bytesWritten, // GLOBAL READ tuplesRead, blocksRead, bytesRead ); this.lastStatsTime = time; } public void haltProcessing() { // if (debug.val) LOG.warn("Halting transaction processing at partition " + this.partitionId); ExecutionMode origMode = this.currentExecMode; this.setExecutionMode(this.currentTxn, ExecutionMode.DISABLED_REJECT); List<InternalMessage> toKeep = new ArrayList<InternalMessage>(); InternalMessage msg = null; while ((msg = this.work_queue.poll()) != null) { // ------------------------------- // InitializeRequestMessage // ------------------------------- if (msg instanceof InitializeRequestMessage) { InitializeRequestMessage initMsg = (InitializeRequestMessage)msg; hstore_site.responseError(initMsg.getClientHandle(), Status.ABORT_REJECT, hstore_site.getRejectionMessage() + " - [2]", initMsg.getClientCallback(), EstTime.currentTimeMillis()); } // ------------------------------- // InitializeTxnMessage // ------------------------------- if (msg instanceof InitializeTxnMessage) { InitializeTxnMessage initMsg = (InitializeTxnMessage)msg; AbstractTransaction ts = initMsg.getTransaction(); TransactionCallback callback = ts.getInitCallback(); callback.abort(this.partitionId, Status.ABORT_REJECT); } // ------------------------------- // StartTxnMessage // ------------------------------- else if (msg instanceof StartTxnMessage) { StartTxnMessage startMsg = (StartTxnMessage)msg; hstore_site.transactionReject((LocalTransaction)startMsg.getTransaction(), Status.ABORT_REJECT); } // ------------------------------- // Things to keep // ------------------------------- else { toKeep.add(msg); } } // WHILE // assert(this.work_queue.isEmpty()); this.work_queue.addAll(toKeep); // For now we'll set it back so that we can execute new stuff. Clearing out // the queue should enough for now this.setExecutionMode(this.currentTxn, origMode); } /** * Figure out the current speculative execution mode for this partition * @return */ private SpeculationType calculateSpeculationType() { SpeculationType specType = SpeculationType.NULL; // IDLE if (this.currentDtxn == null) { specType = SpeculationType.IDLE; } // LOCAL else if (this.currentDtxn.getBasePartition() == this.partitionId) { if (((LocalTransaction)this.currentDtxn).isMarkExecuted() == false) { specType = SpeculationType.IDLE; } else if (this.currentDtxn.isMarkedPrepared(this.partitionId)) { specType = SpeculationType.SP3_LOCAL; } else { specType = SpeculationType.SP1_LOCAL; } } // REMOTE else { if (this.currentDtxn.isMarkedPrepared(this.partitionId)) { specType = SpeculationType.SP3_REMOTE; } else if (this.currentDtxn.hasExecutedWork(this.partitionId) == false) { specType = SpeculationType.SP2_REMOTE_BEFORE; } else { specType = SpeculationType.SP2_REMOTE_AFTER; } } return (specType); } /** * Set the current ExecutionMode for this executor. The transaction handle given as an input * argument is the transaction that caused the mode to get changed. It is only used for debug * purposes. * @param newMode * @param txn_id */ private void setExecutionMode(AbstractTransaction ts, ExecutionMode newMode) { if (debug.val && this.currentExecMode != newMode) { LOG.debug(String.format("Setting ExecutionMode for partition %d to %s because of %s [origMode=%s]", this.partitionId, newMode, ts, this.currentExecMode)); } assert(newMode != ExecutionMode.COMMIT_READONLY || (newMode == ExecutionMode.COMMIT_READONLY && this.currentDtxn != null)) : String.format("%s is trying to set partition %d to %s when the current DTXN is null?", ts, this.partitionId, newMode); this.currentExecMode = newMode; } /** * Returns the next undo token to use when hitting up the EE with work * MAX_VALUE = no undo * @param txn_id * @return */ private long getNextUndoToken() { if (trace.val) LOG.trace(String.format("Next Undo for Partition %d: %d", this.partitionId, this.lastUndoToken+1)); return (++this.lastUndoToken); } /** * For the given txn, return the next undo token to use for its next execution round * @param ts * @param readOnly * @return */ private long calculateNextUndoToken(AbstractTransaction ts, boolean readOnly) { long undoToken = HStoreConstants.DISABLE_UNDO_LOGGING_TOKEN; long lastUndoToken = ts.getLastUndoToken(this.partitionId); boolean singlePartition = ts.isPredictSinglePartition(); // Speculative txns always need an undo token // It's just easier this way... if (ts.isSpeculative()) { undoToken = this.getNextUndoToken(); } // If this plan is read-only, then we don't need a new undo token (unless // we don't have one already) else if (readOnly) { if (lastUndoToken == HStoreConstants.NULL_UNDO_LOGGING_TOKEN) { lastUndoToken = HStoreConstants.DISABLE_UNDO_LOGGING_TOKEN; // lastUndoToken = this.getNextUndoToken(); } undoToken = lastUndoToken; } // Otherwise, we need to figure out whether we want to be a brave soul and // not use undo logging at all else { // If one of the following conditions are true, then we need to get a new token: // (1) If this our first time up at bat // (2) If we're a distributed transaction // (3) The force undo logging option is enabled if (lastUndoToken == HStoreConstants.NULL_UNDO_LOGGING_TOKEN || singlePartition == false || hstore_conf.site.exec_force_undo_logging_all) { undoToken = this.getNextUndoToken(); } // If we originally executed this transaction with undo buffers and we have a MarkovEstimate, // then we can go back and check whether we want to disable undo logging for the rest of the transaction else if (ts.getEstimatorState() != null && singlePartition && ts.isSpeculative() == false) { Estimate est = ts.getEstimatorState().getLastEstimate(); assert(est != null) : "Got back null MarkovEstimate for " + ts; if (hstore_conf.site.exec_no_undo_logging == false || est.isValid() == false || est.isAbortable(this.thresholds) || est.isReadOnlyPartition(this.thresholds, this.partitionId) == false) { undoToken = lastUndoToken; } else if (debug.val) { LOG.warn(String.format("Bold! Disabling undo buffers for inflight %s\n%s", ts, est)); } } } // Make sure that it's at least as big as the last one handed out if (undoToken < this.lastUndoToken) undoToken = this.lastUndoToken; if (debug.val) LOG.debug(String.format("%s - Next undo token at partition %d is %s [readOnly=%s]", ts, this.partitionId, (undoToken == HStoreConstants.DISABLE_UNDO_LOGGING_TOKEN ? "<DISABLED>" : (undoToken == HStoreConstants.NULL_UNDO_LOGGING_TOKEN ? "<NULL>" : undoToken)), readOnly)); return (undoToken); } /** * Populate the provided inputs map with the VoltTables needed for the give * input DependencyId. If the txn is a LocalTransaction, then we will * get the data we need from the base partition's DependencyTracker. * @param ts * @param input_dep_ids * @param inputs * @return */ private void getFragmentInputs(AbstractTransaction ts, int input_dep_id, Map<Integer, List<VoltTable>> inputs) { if (input_dep_id == HStoreConstants.NULL_DEPENDENCY_ID) return; if (trace.val) LOG.trace(String.format("%s - Attempting to retrieve input dependencies for DependencyId #%d", ts, input_dep_id)); // If the Transaction is on the same HStoreSite, then all the // input dependencies will be internal and can be retrieved locally if (ts instanceof LocalTransaction) { DependencyTracker txnTracker = null; if (ts.getBasePartition() != this.partitionId) { txnTracker = hstore_site.getDependencyTracker(ts.getBasePartition()); } else { txnTracker = this.depTracker; } List<VoltTable> deps = txnTracker.getInternalDependency((LocalTransaction)ts, input_dep_id); assert(deps != null); assert(inputs.containsKey(input_dep_id) == false); inputs.put(input_dep_id, deps); if (trace.val) LOG.trace(String.format("%s - Retrieved %d INTERNAL VoltTables for DependencyId #%d", ts, deps.size(), input_dep_id, (trace.val ? "\n" + deps : ""))); } // Otherwise they will be "attached" inputs to the RemoteTransaction handle // We should really try to merge these two concepts into a single function call else if (ts.getAttachedInputDependencies().containsKey(input_dep_id)) { List<VoltTable> deps = ts.getAttachedInputDependencies().get(input_dep_id); List<VoltTable> pDeps = null; // We have to copy the tables if we have debugging enabled if (trace.val) { // this.firstPartition == false) { pDeps = new ArrayList<VoltTable>(); for (VoltTable vt : deps) { ByteBuffer buffer = vt.getTableDataReference(); byte arr[] = new byte[vt.getUnderlyingBufferSize()]; buffer.get(arr, 0, arr.length); pDeps.add(new VoltTable(ByteBuffer.wrap(arr), true)); } } else { pDeps = deps; } inputs.put(input_dep_id, pDeps); if (trace.val) LOG.trace(String.format("%s - Retrieved %d ATTACHED VoltTables for DependencyId #%d in %s", ts, deps.size(), input_dep_id)); } } /** * Set the given AbstractTransaction handle as the current distributed txn * that is running at this partition. Note that this will check to make sure * that no other txn is marked as the currentDtxn. * @param ts */ private void setCurrentDtxn(AbstractTransaction ts) { // There can never be another current dtxn still unfinished at this partition! assert(this.currentBlockedTxns.isEmpty()) : String.format("Concurrent multi-partition transactions at partition %d: " + "Orig[%s] <=> New[%s] / BlockedQueue:%d", this.partitionId, this.currentDtxn, ts, this.currentBlockedTxns.size()); assert(this.currentDtxn == null) : String.format("Concurrent multi-partition transactions at partition %d: " + "Orig[%s] <=> New[%s] / BlockedQueue:%d", this.partitionId, this.currentDtxn, ts, this.currentBlockedTxns.size()); // Check whether we should check for speculative txns to execute whenever this // dtxn is idle at this partition this.currentDtxn = ts; if (hstore_conf.site.specexec_enable && ts.isSysProc() == false && this.specExecScheduler.isDisabled() == false) { this.specExecIgnoreCurrent = this.specExecChecker.shouldIgnoreTransaction(ts); } else { this.specExecIgnoreCurrent = true; } if (debug.val) { LOG.debug(String.format("Set %s as the current DTXN for partition %d [specExecIgnore=%s, previous=%s]", ts, this.partitionId, this.specExecIgnoreCurrent, this.lastDtxnDebug)); this.lastDtxnDebug = this.currentDtxn.toString(); } if (hstore_conf.site.exec_profiling && ts.getBasePartition() != this.partitionId) { profiler.sp2_time.start(); } } /** * Reset the current dtxn for this partition */ private void resetCurrentDtxn() { assert(this.currentDtxn != null) : "Trying to reset the currentDtxn when it is already null"; if (debug.val) LOG.debug(String.format("Resetting current DTXN for partition %d to null [previous=%s]", this.partitionId, this.lastDtxnDebug)); this.currentDtxn = null; } /** * Store a new prefetch result for a transaction * @param txnId * @param fragmentId * @param partitionId * @param params * @param result */ public void addPrefetchResult(LocalTransaction ts, int stmtCounter, int fragmentId, int partitionId, int paramsHash, VoltTable result) { if (debug.val) LOG.debug(String.format("%s - Adding prefetch result for %s with %d rows from partition %d " + "[stmtCounter=%d / paramsHash=%d]", ts, CatalogUtil.getPlanFragment(catalogContext.catalog, fragmentId).fullName(), result.getRowCount(), partitionId, stmtCounter, paramsHash)); this.depTracker.addPrefetchResult(ts, stmtCounter, fragmentId, partitionId, paramsHash, result); } // --------------------------------------------------------------- // PartitionExecutor API // --------------------------------------------------------------- /** * Queue a new transaction initialization at this partition. This will cause the * transaction to get added to this partition's lock queue. This PartitionExecutor does * not have to be this txn's base partition/ * @param ts */ public void queueSetPartitionLock(AbstractTransaction ts) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; SetDistributedTxnMessage work = ts.getSetDistributedTxnMessage(); boolean success = this.work_queue.offer(work); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); if (debug.val) LOG.debug(String.format("%s - Added %s to front of partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); if (hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(work); } /** * New work from the coordinator that this local site needs to execute (non-blocking) * This method will simply chuck the task into the work queue. * We should not be sent an InitiateTaskMessage here! * @param ts * @param task */ public void queueWork(AbstractTransaction ts, WorkFragment fragment) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; WorkFragmentMessage work = ts.getWorkFragmentMessage(fragment); boolean success = this.work_queue.offer(work); // , true); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); ts.markQueuedWork(this.partitionId); if (debug.val) LOG.debug(String.format("%s - Added %s to partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); if (hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(work); } /** * Add a new work message to our utility queue * @param work */ public void queueUtilityWork(InternalMessage work) { if (debug.val) LOG.debug(String.format("Added utility work %s to partition %d", work.getClass().getSimpleName(), this.partitionId)); this.work_queue.offer(work); } /** * Put the prepare request for the transaction into the queue * @param task * @param status The final status of the transaction */ public void queuePrepare(AbstractTransaction ts) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; PrepareTxnMessage work = ts.getPrepareTxnMessage(); boolean success = this.work_queue.offer(work); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); if (debug.val) LOG.debug(String.format("%s - Added %s to partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); // if (hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(); } /** * Put the finish request for the transaction into the queue * @param task * @param status The final status of the transaction */ public void queueFinish(AbstractTransaction ts, Status status) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; FinishTxnMessage work = ts.getFinishTxnMessage(status); boolean success = this.work_queue.offer(work); // , true); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); if (debug.val) LOG.debug(String.format("%s - Added %s to partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); // if (success) this.specExecScheduler.haltSearch(); } /** * Queue a new transaction invocation request at this partition * @param serializedRequest * @param catalog_proc * @param procParams * @param clientCallback * @return */ public boolean queueNewTransaction(ByteBuffer serializedRequest, long initiateTime, Procedure catalog_proc, ParameterSet procParams, RpcCallback<ClientResponseImpl> clientCallback) { boolean sysproc = catalog_proc.getSystemproc(); if (this.currentExecMode == ExecutionMode.DISABLED_REJECT && sysproc == false) return (false); InitializeRequestMessage work = new InitializeRequestMessage(serializedRequest, initiateTime, catalog_proc, procParams, clientCallback); if (debug.val) LOG.debug(String.format("Queuing %s for '%s' request on partition %d " + "[currentDtxn=%s, queueSize=%d, mode=%s]", work.getClass().getSimpleName(), catalog_proc.getName(), this.partitionId, this.currentDtxn, this.work_queue.size(), this.currentExecMode)); return (this.work_queue.offer(work)); } /** * Queue a new transaction invocation request at this partition * @param ts * @param task * @param callback */ public boolean queueStartTransaction(LocalTransaction ts) { assert(ts != null) : "Unexpected null transaction handle!"; boolean singlePartitioned = ts.isPredictSinglePartition(); boolean force = (singlePartitioned == false) || ts.isMapReduce() || ts.isSysProc(); // UPDATED 2012-07-12 // We used to have a bunch of checks to determine whether we needed // put the new request in the blocked queue or not. This required us to // acquire the exec_lock to do the check and then another lock to actually put // the request into the work_queue. Now we'll just throw it right in // the queue (checking for throttling of course) and let the main // thread sort out the mess of whether the txn should get blocked or not if (this.currentExecMode == ExecutionMode.DISABLED_REJECT) { if (debug.val) LOG.warn(String.format("%s - Not queuing txn at partition %d because current mode is %s", ts, this.partitionId, this.currentExecMode)); return (false); } StartTxnMessage work = ts.getStartTxnMessage(); if (debug.val) LOG.debug(String.format("Queuing %s for '%s' request on partition %d " + "[currentDtxn=%s, queueSize=%d, mode=%s]", work.getClass().getSimpleName(), ts.getProcedure().getName(), this.partitionId, this.currentDtxn, this.work_queue.size(), this.currentExecMode)); boolean success = this.work_queue.offer(work); // , force); if (debug.val && force && success == false) { String msg = String.format("Failed to add %s even though force flag was true!", ts); throw new ServerFaultException(msg, ts.getTransactionId()); } if (success && hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(work); return (success); } // --------------------------------------------------------------- // WORK QUEUE PROCESSING METHODS // --------------------------------------------------------------- /** * Process a WorkResult and update the internal state the LocalTransaction accordingly * Note that this will always be invoked by a thread other than the main execution thread * for this PartitionExecutor. That means if something comes back that's bad, we need a way * to alert the other thread so that it can act on it. * @param ts * @param result */ private void processWorkResult(LocalTransaction ts, WorkResult result) { boolean needs_profiling = (hstore_conf.site.txn_profiling && ts.profiler != null); if (debug.val) LOG.debug(String.format("Processing WorkResult for %s on partition %d [srcPartition=%d, deps=%d]", ts, this.partitionId, result.getPartitionId(), result.getDepDataCount())); // If the Fragment failed to execute, then we need to abort the Transaction // Note that we have to do this before we add the responses to the TransactionState so that // we can be sure that the VoltProcedure knows about the problem when it wakes the stored // procedure back up if (result.getStatus() != Status.OK) { if (trace.val) LOG.trace(String.format("Received non-success response %s from partition %d for %s", result.getStatus(), result.getPartitionId(), ts)); SerializableException error = null; if (needs_profiling) ts.profiler.startDeserialization(); try { ByteBuffer buffer = result.getError().asReadOnlyByteBuffer(); error = SerializableException.deserializeFromBuffer(buffer); } catch (Exception ex) { String msg = String.format("Failed to deserialize SerializableException from partition %d " + "for %s [bytes=%d]", result.getPartitionId(), ts, result.getError().size()); throw new ServerFaultException(msg, ex); } finally { if (needs_profiling) ts.profiler.stopDeserialization(); } // At this point there is no need to even deserialize the rest of the message because // we know that we're going to have to abort the transaction if (error == null) { LOG.warn(ts + " - Unexpected null SerializableException\n" + result); } else { if (debug.val) LOG.error(String.format("%s - Got error from partition %d in %s", ts, result.getPartitionId(), result.getClass().getSimpleName()), error); ts.setPendingError(error, true); } return; } if (needs_profiling) ts.profiler.startDeserialization(); for (int i = 0, cnt = result.getDepDataCount(); i < cnt; i++) { if (trace.val) LOG.trace(String.format("Storing intermediate results from partition %d for %s", result.getPartitionId(), ts)); int depId = result.getDepId(i); ByteString bs = result.getDepData(i); VoltTable vt = null; if (bs.isEmpty() == false) { FastDeserializer fd = new FastDeserializer(bs.asReadOnlyByteBuffer()); try { vt = fd.readObject(VoltTable.class); } catch (Exception ex) { throw new ServerFaultException("Failed to deserialize VoltTable from partition " + result.getPartitionId() + " for " + ts, ex); } } this.depTracker.addResult(ts, result.getPartitionId(), depId, vt); } // FOR (dependencies) if (needs_profiling) ts.profiler.stopDeserialization(); } /** * Execute a new transaction at this partition. * This will invoke the run() method define in the VoltProcedure for this txn and * then process the ClientResponse. Only the PartitionExecutor itself should be calling * this directly, since it's the only thing that knows what's going on with the world... * @param ts */ private void executeTransaction(LocalTransaction ts) { assert(ts.isInitialized()) : String.format("Trying to execute uninitialized transaction %s at partition %d", ts, this.partitionId); assert(ts.isMarkedReleased(this.partitionId)) : String.format("Transaction %s was not marked released at partition %d before being executed", ts, this.partitionId); if (trace.val) LOG.debug(String.format("%s - Attempting to start transaction on partition %d", ts, this.partitionId)); // If this is a MapReduceTransaction handle, we actually want to get the // inner LocalTransaction handle for this partition. The MapReduceTransaction // is just a placeholder if (ts instanceof MapReduceTransaction) { MapReduceTransaction mr_ts = (MapReduceTransaction)ts; ts = mr_ts.getLocalTransaction(this.partitionId); assert(ts != null) : "Unexpected null LocalTransaction handle from " + mr_ts; } ExecutionMode before_mode = this.currentExecMode; boolean predict_singlePartition = ts.isPredictSinglePartition(); // ------------------------------- // DISTRIBUTED TXN // ------------------------------- if (predict_singlePartition == false) { // If there is already a dtxn running, then we need to throw this // mofo back into the blocked txn queue // TODO: If our dtxn is on the same site as us, then at this point we know that // it is done executing the control code and is sending around 2PC messages // to commit/abort. That means that we could assume that all of the other // remote partitions are going to agree on the same outcome and we can start // speculatively executing this dtxn. After all, if we're at this point in // the PartitionExecutor then we know that we got this partition's locks // from the TransactionQueueManager. if (this.currentDtxn != null && this.currentDtxn.equals(ts) == false) { assert(this.currentDtxn.equals(ts) == false) : String.format("New DTXN %s != Current DTXN %s", ts, this.currentDtxn); // If this is a local txn, then we can finagle things a bit. if (this.currentDtxn.isExecLocal(this.partitionId)) { // It would be safe for us to speculative execute this DTXN right here // if the currentDtxn has aborted... but we can never be in this state. assert(this.currentDtxn.isAborted() == false) : // Sanity Check String.format("We want to execute %s on partition %d but aborted %s is still hanging around\n", ts, this.partitionId, this.currentDtxn, this.work_queue); // So that means we know that it committed, which doesn't necessarily mean // that it will still commit, but we'll be able to abort, rollback, and requeue // if that happens. // TODO: Right now our current dtxn marker is a single value. We may want to // switch it to a FIFO queue so that we can multiple guys hanging around. // For now we will just do the default thing and block this txn this.blockTransaction(ts); return; } // If it's not local, then we just have to block it right away else { this.blockTransaction(ts); return; } } // If there is no other DTXN right now, then we're it! else if (this.currentDtxn == null) { // || this.currentDtxn.equals(ts) == false) { this.setCurrentDtxn(ts); } // 2011-11-14: We don't want to set the execution mode here, because we know that we // can check whether we were read-only after the txn finishes this.setExecutionMode(this.currentDtxn, ExecutionMode.COMMIT_NONE); if (debug.val) LOG.debug(String.format("Marking %s as current DTXN on Partition %d [isLocal=%s, execMode=%s]", ts, this.partitionId, true, this.currentExecMode)); } // ------------------------------- // SINGLE-PARTITION TXN // ------------------------------- else { // If this is a single-partition transaction, then we need to check whether we are // being executed under speculative execution mode. We have to check this here // because it may be the case that we queued a bunch of transactions when speculative // execution was enabled, but now the transaction that was ahead of this one is finished, // so now we're just executing them regularly if (this.currentDtxn != null) { // HACK: If we are currently under DISABLED mode when we get this, then we just // need to block the transaction and return back to the queue. This is easier than // having to set all sorts of crazy locks if (this.currentExecMode == ExecutionMode.DISABLED || hstore_conf.site.specexec_enable == false) { if (debug.val) LOG.debug(String.format("%s - Blocking single-partition %s until dtxn finishes [mode=%s]", this.currentDtxn, ts, this.currentExecMode)); this.blockTransaction(ts); return; } assert(ts.getSpeculationType() != null); if (debug.val) LOG.debug(String.format("Speculatively executing %s while waiting for dtxn %s [%s]", ts, this.currentDtxn, ts.getSpeculationType())); assert(ts.isSpeculative()) : ts + " was not marked as being speculative!"; } } // If we reach this point, we know that we're about to execute our homeboy here... if (hstore_conf.site.txn_profiling && ts.profiler != null) { ts.profiler.startExec(); } if (hstore_conf.site.exec_profiling) this.profiler.numTransactions++; // Make sure the dependency tracker knows about us if (ts.hasDependencyTracker()) this.depTracker.addTransaction(ts); // Grab a new ExecutionState for this txn ExecutionState execState = this.initExecutionState(); ts.setExecutionState(execState); VoltProcedure volt_proc = this.getVoltProcedure(ts.getProcedure().getId()); assert(volt_proc != null) : "No VoltProcedure for " + ts; if (debug.val) { LOG.debug(String.format("%s - Starting execution of txn on partition %d " + "[txnMode=%s, mode=%s]", ts, this.partitionId, before_mode, this.currentExecMode)); if (trace.val) LOG.trace(String.format("Current Transaction at partition #%d\n%s", this.partitionId, ts.debug())); } if (hstore_conf.site.txn_counters) TransactionCounter.EXECUTED.inc(ts.getProcedure()); ClientResponseImpl cresponse = null; VoltProcedure previous = this.currentVoltProc; try { this.currentVoltProc = volt_proc; cresponse = volt_proc.call(ts, ts.getProcedureParameters().toArray()); // Blocking... // VoltProcedure.call() should handle any exceptions thrown by the transaction // If we get anything out here then that's bad news } catch (Throwable ex) { if (this.isShuttingDown() == false) { SQLStmt last[] = volt_proc.voltLastQueriesExecuted(); LOG.fatal("Unexpected error while executing " + ts, ex); if (last.length > 0) { LOG.fatal(String.format("Last Queries Executed [%d]: %s", last.length, Arrays.toString(last))); } LOG.fatal("LocalTransactionState Dump:\n" + ts.debug()); this.crash(ex); } } finally { this.currentVoltProc = previous; ts.resetExecutionState(); execState.finish(); this.execStates.add(execState); this.finishVoltProcedure(volt_proc); if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPost(); // if (cresponse.getStatus() == Status.ABORT_UNEXPECTED) { // cresponse.getException().printStackTrace(); // } } // If this is a MapReduce job, then we can just ignore the ClientResponse // and return immediately. The VoltMapReduceProcedure is responsible for storing // the result at the proper location. if (ts.isMapReduce()) { return; } else if (cresponse == null) { assert(this.isShuttingDown()) : String.format("No ClientResponse for %s???", ts); return; } // ------------------------------- // PROCESS RESPONSE AND FIGURE OUT NEXT STEP // ------------------------------- Status status = cresponse.getStatus(); if (debug.val) { LOG.debug(String.format("%s - Finished execution of transaction control code " + "[status=%s, beforeMode=%s, currentMode=%s]", ts, status, before_mode, this.currentExecMode)); if (ts.hasPendingError()) { LOG.debug(String.format("%s - Txn finished with pending error: %s", ts, ts.getPendingErrorMessage())); } } // We assume that most transactions are not speculatively executed and are successful // Therefore we don't want to grab the exec_mode lock here. if (predict_singlePartition == false || this.canProcessClientResponseNow(ts, status, before_mode)) { this.processClientResponse(ts, cresponse); } // Otherwise always queue our response, since we know that whatever thread is out there // is waiting for us to finish before it drains the queued responses else { // If the transaction aborted, then we can't execute any transaction that touch the tables // that this guy touches. But since we can't just undo this transaction without undoing // everything that came before it, we'll just disable executing all transactions until the // current distributed transaction commits if (status != Status.OK && ts.isExecReadOnly(this.partitionId) == false) { this.setExecutionMode(ts, ExecutionMode.DISABLED); int blocked = this.work_queue.drainTo(this.currentBlockedTxns); if (debug.val) { if (trace.val && blocked > 0) LOG.trace(String.format("Blocking %d transactions at partition %d because ExecutionMode is now %s", blocked, this.partitionId, this.currentExecMode)); LOG.debug(String.format("Disabling execution on partition %d because speculative %s aborted", this.partitionId, ts)); } } if (trace.val) LOG.trace(String.format("%s - Queuing ClientResponse [status=%s, origMode=%s, newMode=%s, dtxn=%s]", ts, cresponse.getStatus(), before_mode, this.currentExecMode, this.currentDtxn)); this.blockClientResponse(ts, cresponse); } } /** * Determines whether a finished transaction that executed locally can have their ClientResponse processed immediately * or if it needs to wait for the response from the outstanding multi-partition transaction for this partition * (1) This is the multi-partition transaction that everyone is waiting for * (2) The transaction was not executed under speculative execution mode * (3) The transaction does not need to wait for the multi-partition transaction to finish first * @param ts * @param status * @param before_mode * @return */ private boolean canProcessClientResponseNow(LocalTransaction ts, Status status, ExecutionMode before_mode) { if (debug.val) LOG.debug(String.format("%s - Checking whether to process %s response now at partition %d " + "[singlePartition=%s, readOnly=%s, specExecModified=%s, before=%s, current=%s]", ts, status, this.partitionId, ts.isPredictSinglePartition(), ts.isExecReadOnly(this.partitionId), this.specExecModified, before_mode, this.currentExecMode)); // Commit All if (this.currentExecMode == ExecutionMode.COMMIT_ALL) { return (true); } // SPECIAL CASE // Any user-aborted, speculative single-partition transaction should be processed immediately. else if (status == Status.ABORT_USER && ts.isSpeculative()) { return (true); } // // SPECIAL CASE // // If this txn threw a user abort, and the current outstanding dtxn is read-only // // then it's safe for us to rollback // else if (status == Status.ABORT_USER && // this.currentDtxn != null && // this.currentDtxn.isExecReadOnly(this.partitionId)) { // return (true); // } // SPECIAL CASE // Anything mispredicted should be processed right away else if (status == Status.ABORT_MISPREDICT) { return (true); } // Process successful txns based on the mode that it was executed under else if (status == Status.OK) { switch (before_mode) { case COMMIT_ALL: return (true); case COMMIT_READONLY: // Read-only speculative txns can be committed right now // TODO: Right now we're going to use the specExecModified flag to disable // sending out any results from spec execed txns that may have read from // a modified database. We should switch to a bitmap of table ids so that we // have can be more selective. // return (false); return (this.specExecModified == false && ts.isExecReadOnly(this.partitionId)); case COMMIT_NONE: { // If this txn does not conflict with the current dtxn, then we should be able // to let it commit but we can't because of the way our undo tokens work return (false); } default: throw new ServerFaultException("Unexpected execution mode: " + before_mode, ts.getTransactionId()); } // SWITCH } // // If the transaction aborted and it was read-only thus far, then we want to process it immediately // else if (status != Status.OK && ts.isExecReadOnly(this.partitionId)) { // return (true); // } assert(this.currentExecMode != ExecutionMode.COMMIT_ALL) : String.format("Queuing ClientResponse for %s when in non-specutative mode [mode=%s, status=%s]", ts, this.currentExecMode, status); return (false); } /** * Process a WorkFragment for a transaction and execute it in this partition's underlying EE. * @param ts * @param fragment * @param allParameters The array of all the ParameterSets for the current SQLStmt batch. */ private void processWorkFragment(AbstractTransaction ts, WorkFragment fragment, ParameterSet allParameters[]) { assert(this.partitionId == fragment.getPartitionId()) : String.format("Tried to execute WorkFragment %s for %s at partition %d but it was suppose " + "to be executed on partition %d", fragment.getFragmentIdList(), ts, this.partitionId, fragment.getPartitionId()); assert(ts.isMarkedPrepared(this.partitionId) == false) : String.format("Tried to execute WorkFragment %s for %s at partition %d after it was marked 2PC:PREPARE", fragment.getFragmentIdList(), ts, this.partitionId); // A txn is "local" if the Java is executing at the same partition as this one boolean is_basepartition = (ts.getBasePartition() == this.partitionId); boolean is_remote = (ts instanceof LocalTransaction == false); boolean is_prefetch = fragment.getPrefetch(); boolean is_readonly = fragment.getReadOnly(); if (debug.val) LOG.debug(String.format("%s - Executing %s [isBasePartition=%s, isRemote=%s, isPrefetch=%s, isReadOnly=%s, fragments=%s]", ts, fragment.getClass().getSimpleName(), is_basepartition, is_remote, is_prefetch, is_readonly, fragment.getFragmentIdCount())); // If this WorkFragment isn't being executed at this txn's base partition, then // we need to start a new execution round if (is_basepartition == false) { long undoToken = this.calculateNextUndoToken(ts, is_readonly); ts.initRound(this.partitionId, undoToken); ts.startRound(this.partitionId); } DependencySet result = null; Status status = Status.OK; SerializableException error = null; // Check how many fragments are not marked as ignored // If the fragment is marked as ignore then it means that it was already // sent to this partition for prefetching. We need to make sure that we remove // it from the list of fragmentIds that we need to execute. int fragmentCount = fragment.getFragmentIdCount(); for (int i = 0; i < fragmentCount; i++) { if (fragment.getStmtIgnore(i)) { fragmentCount--; } } // FOR final ParameterSet parameters[] = tmp_fragmentParams.getParameterSet(fragmentCount); assert(parameters.length == fragmentCount); // Construct data given to the EE to execute this work fragment this.tmp_EEdependencies.clear(); long fragmentIds[] = tmp_fragmentIds.getArray(fragmentCount); int fragmentOffsets[] = tmp_fragmentOffsets.getArray(fragmentCount); int outputDepIds[] = tmp_outputDepIds.getArray(fragmentCount); int inputDepIds[] = tmp_inputDepIds.getArray(fragmentCount); int offset = 0; for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) { if (fragment.getStmtIgnore(i) == false) { fragmentIds[offset] = fragment.getFragmentId(i); fragmentOffsets[offset] = i; outputDepIds[offset] = fragment.getOutputDepId(i); inputDepIds[offset] = fragment.getInputDepId(i); parameters[offset] = allParameters[fragment.getParamIndex(i)]; this.getFragmentInputs(ts, inputDepIds[offset], this.tmp_EEdependencies); if (trace.val && ts.isSysProc() == false && is_basepartition == false) LOG.trace(String.format("%s - Offset:%d FragmentId:%d OutputDep:%d/%d InputDep:%d/%d", ts, offset, fragmentIds[offset], outputDepIds[offset], fragment.getOutputDepId(i), inputDepIds[offset], fragment.getInputDepId(i))); offset++; } } // FOR assert(offset == fragmentCount); try { result = this.executeFragmentIds(ts, ts.getLastUndoToken(this.partitionId), fragmentIds, parameters, outputDepIds, inputDepIds, this.tmp_EEdependencies); } catch (EvictedTupleAccessException ex) { // XXX: What do we do if this is not a single-partition txn? status = Status.ABORT_EVICTEDACCESS; error = ex; } catch (ConstraintFailureException ex) { status = Status.ABORT_UNEXPECTED; error = ex; } catch (SQLException ex) { status = Status.ABORT_UNEXPECTED; error = ex; } catch (EEException ex) { // this.crash(ex); status = Status.ABORT_UNEXPECTED; error = ex; } catch (Throwable ex) { status = Status.ABORT_UNEXPECTED; if (ex instanceof SerializableException) { error = (SerializableException)ex; } else { error = new SerializableException(ex); } } finally { if (error != null) { // error.printStackTrace(); LOG.warn(String.format("%s - Unexpected %s on partition %d", ts, error.getClass().getSimpleName(), this.partitionId), error); // (debug.val ? error : null)); } // Success, but without any results??? if (result == null && status == Status.OK) { String msg = String.format("The WorkFragment %s executed successfully on Partition %d but " + "result is null for %s", fragment.getFragmentIdList(), this.partitionId, ts); Exception ex = new Exception(msg); if (debug.val) LOG.warn(ex); status = Status.ABORT_UNEXPECTED; error = new SerializableException(ex); } } // For single-partition INSERT/UPDATE/DELETE queries, we don't directly // execute the SendPlanNode in order to get back the number of tuples that // were modified. So we have to rely on the output dependency ids set in the task assert(status != Status.OK || (status == Status.OK && result.size() == fragmentIds.length)) : "Got back " + result.size() + " results but was expecting " + fragmentIds.length; // Make sure that we mark the round as finished before we start sending results if (is_basepartition == false) { ts.finishRound(this.partitionId); } // ------------------------------- // PREFETCH QUERIES // ------------------------------- if (is_prefetch) { // Regardless of whether this txn is running at the same HStoreSite as this PartitionExecutor, // we always need to put the result inside of the local query cache // This is so that we can identify if we get request for a query that we have already executed // We'll only do this if it succeeded. If it failed, then we won't do anything and will // just wait until they come back to execute the query again before // we tell them that something went wrong. It's ghetto, but it's just easier this way... if (status == Status.OK) { // We're going to store the result in the base partition cache if they're // on the same HStoreSite as us if (is_remote == false) { PartitionExecutor other = this.hstore_site.getPartitionExecutor(ts.getBasePartition()); for (int i = 0, cnt = result.size(); i < cnt; i++) { if (trace.val) LOG.trace(String.format("%s - Storing %s prefetch result [params=%s]", ts, CatalogUtil.getPlanFragment(catalogContext.catalog, fragment.getFragmentId(fragmentOffsets[i])).fullName(), parameters[i])); other.addPrefetchResult((LocalTransaction)ts, fragment.getStmtCounter(fragmentOffsets[i]), fragment.getFragmentId(fragmentOffsets[i]), this.partitionId, parameters[i].hashCode(), result.dependencies[i]); } // FOR } } // Now if it's a remote transaction, we need to use the coordinator to send // them our result. Note that we want to send a single message per partition. Unlike // with the TransactionWorkRequests, we don't need to wait until all of the partitions // that are prefetching for this txn at our local HStoreSite to finish. if (is_remote) { WorkResult wr = this.buildWorkResult(ts, result, status, error); TransactionPrefetchResult.Builder builder = TransactionPrefetchResult.newBuilder() .setTransactionId(ts.getTransactionId().longValue()) .setSourcePartition(this.partitionId) .setResult(wr) .setStatus(status) .addAllFragmentId(fragment.getFragmentIdList()) .addAllStmtCounter(fragment.getStmtCounterList()); for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) { builder.addParamHash(parameters[i].hashCode()); } if (debug.val) LOG.debug(String.format("%s - Sending back %s to partition %d [numResults=%s, status=%s]", ts, wr.getClass().getSimpleName(), ts.getBasePartition(), result.size(), status)); hstore_coordinator.transactionPrefetchResult((RemoteTransaction)ts, builder.build()); } } // ------------------------------- // LOCAL TRANSACTION // ------------------------------- else if (is_remote == false) { LocalTransaction local_ts = (LocalTransaction)ts; // If the transaction is local, store the result directly in the local TransactionState if (status == Status.OK) { if (trace.val) LOG.trace(String.format("%s - Storing %d dependency results locally for successful work fragment", ts, result.size())); assert(result.size() == outputDepIds.length); DependencyTracker otherTracker = this.hstore_site.getDependencyTracker(ts.getBasePartition()); for (int i = 0; i < outputDepIds.length; i++) { if (trace.val) LOG.trace(String.format("%s - Storing DependencyId #%d [numRows=%d]\n%s", ts, outputDepIds[i], result.dependencies[i].getRowCount(), result.dependencies[i])); try { otherTracker.addResult(local_ts, this.partitionId, outputDepIds[i], result.dependencies[i]); } catch (Throwable ex) { // ex.printStackTrace(); String msg = String.format("Failed to stored Dependency #%d for %s [idx=%d, fragmentId=%d]", outputDepIds[i], ts, i, fragmentIds[i]); LOG.error(String.format("%s - WorkFragment:%d\nExpectedIds:%s\nOutputDepIds: %s\nResultDepIds: %s\n%s", msg, fragment.hashCode(), fragment.getOutputDepIdList(), Arrays.toString(outputDepIds), Arrays.toString(result.depIds), fragment)); throw new ServerFaultException(msg, ex); } } // FOR } else { local_ts.setPendingError(error, true); } } // ------------------------------- // REMOTE TRANSACTION // ------------------------------- else { if (trace.val) LOG.trace(String.format("%s - Constructing WorkResult with %d bytes from partition %d to send " + "back to initial partition %d [status=%s]", ts, (result != null ? result.size() : null), this.partitionId, ts.getBasePartition(), status)); RpcCallback<WorkResult> callback = ((RemoteTransaction)ts).getWorkCallback(); if (callback == null) { LOG.fatal("Unable to send FragmentResponseMessage for " + ts); LOG.fatal("Orignal WorkFragment:\n" + fragment); LOG.fatal(ts.toString()); throw new ServerFaultException("No RPC callback to HStoreSite for " + ts, ts.getTransactionId()); } WorkResult response = this.buildWorkResult((RemoteTransaction)ts, result, status, error); assert(response != null); callback.run(response); } // Check whether this is the last query that we're going to get // from this transaction. If it is, then we can go ahead and prepare the txn if (is_basepartition == false && fragment.getLastFragment()) { if (debug.val) LOG.debug(String.format("%s - Invoking early 2PC:PREPARE at partition %d", ts, this.partitionId)); this.queuePrepare(ts); } } /** * Executes a WorkFragment on behalf of some remote site and returns the * resulting DependencySet * @param fragment * @return * @throws Exception */ private DependencySet executeFragmentIds(AbstractTransaction ts, long undoToken, long fragmentIds[], ParameterSet parameters[], int output_depIds[], int input_depIds[], Map<Integer, List<VoltTable>> input_deps) throws Exception { if (fragmentIds.length == 0) { LOG.warn(String.format("Got a fragment batch for %s that does not have any fragments?", ts)); return (null); } // *********************************** DEBUG *********************************** if (trace.val) { LOG.trace(String.format("%s - Getting ready to kick %d fragments to partition %d EE [undoToken=%d]", ts, fragmentIds.length, this.partitionId, (undoToken != HStoreConstants.NULL_UNDO_LOGGING_TOKEN ? undoToken : "null"))); // if (trace.val) { // LOG.trace("WorkFragmentIds: " + Arrays.toString(fragmentIds)); // Map<String, Object> m = new LinkedHashMap<String, Object>(); // for (int i = 0; i < parameters.length; i++) { // m.put("Parameter[" + i + "]", parameters[i]); // } // FOR // LOG.trace("Parameters:\n" + StringUtil.formatMaps(m)); // } } // *********************************** DEBUG *********************************** DependencySet result = null; // ------------------------------- // SYSPROC FRAGMENTS // ------------------------------- if (ts.isSysProc()) { result = this.executeSysProcFragments(ts, undoToken, fragmentIds.length, fragmentIds, parameters, output_depIds, input_depIds, input_deps); // ------------------------------- // REGULAR FRAGMENTS // ------------------------------- } else { result = this.executePlanFragments(ts, undoToken, fragmentIds.length, fragmentIds, parameters, output_depIds, input_depIds, input_deps); if (result == null) { LOG.warn(String.format("Output DependencySet for %s in %s is null?", Arrays.toString(fragmentIds), ts)); } } return (result); } /** * Execute a BatchPlan directly on this PartitionExecutor without having to covert it * to WorkFragments first. This is big speed improvement over having to queue things up * @param ts * @param plan * @return */ private VoltTable[] executeLocalPlan(LocalTransaction ts, BatchPlanner.BatchPlan plan, ParameterSet parameterSets[]) { // Start the new execution round long undoToken = this.calculateNextUndoToken(ts, plan.isReadOnly()); ts.initFirstRound(undoToken, plan.getBatchSize()); int fragmentCount = plan.getFragmentCount(); long fragmentIds[] = plan.getFragmentIds(); int output_depIds[] = plan.getOutputDependencyIds(); int input_depIds[] = plan.getInputDependencyIds(); // Mark that we touched the local partition once for each query in the batch // ts.getTouchedPartitions().put(this.partitionId, plan.getBatchSize()); // Only notify other partitions that we're done with them if we're not // a single-partition transaction if (hstore_conf.site.specexec_enable && ts.isPredictSinglePartition() == false) { //FIXME //PartitionSet new_done = ts.calculateDonePartitions(this.thresholds); //if (new_done != null && new_done.isEmpty() == false) { // LocalPrepareCallback callback = ts.getPrepareCallback(); // assert(callback.isInitialized()); // this.hstore_coordinator.transactionPrepare(ts, callback, new_done); //} } if (trace.val) LOG.trace(String.format("Txn #%d - BATCHPLAN:\n" + " fragmentIds: %s\n" + " fragmentCount: %s\n" + " output_depIds: %s\n" + " input_depIds: %s", ts.getTransactionId(), Arrays.toString(plan.getFragmentIds()), plan.getFragmentCount(), Arrays.toString(plan.getOutputDependencyIds()), Arrays.toString(plan.getInputDependencyIds()))); // NOTE: There are no dependencies that we need to pass in because the entire // batch is local to this partition. DependencySet result = null; try { result = this.executePlanFragments(ts, undoToken, fragmentCount, fragmentIds, parameterSets, output_depIds, input_depIds, null); } finally { ts.fastFinishRound(this.partitionId); } // assert(result != null) : "Unexpected null DependencySet result for " + ts; if (trace.val) LOG.trace("Output:\n" + result); return (result != null ? result.dependencies : null); } /** * Execute the given fragment tasks on this site's underlying EE * @param ts * @param undoToken * @param batchSize * @param fragmentIds * @param parameterSets * @param output_depIds * @param input_depIds * @return */ private DependencySet executeSysProcFragments(AbstractTransaction ts, long undoToken, int batchSize, long fragmentIds[], ParameterSet parameters[], int output_depIds[], int input_depIds[], Map<Integer, List<VoltTable>> input_deps) { assert(fragmentIds.length == 1); assert(fragmentIds.length == parameters.length) : String.format("%s - Fragments:%d / Parameters:%d", ts, fragmentIds.length, parameters.length); VoltSystemProcedure volt_proc = this.m_registeredSysProcPlanFragments.get(fragmentIds[0]); if (volt_proc == null) { String msg = "No sysproc handle exists for FragmentID #" + fragmentIds[0] + " :: " + this.m_registeredSysProcPlanFragments; throw new ServerFaultException(msg, ts.getTransactionId()); } // HACK: We have to set the TransactionState for sysprocs manually volt_proc.setTransactionState(ts); ts.markExecNotReadOnly(this.partitionId); DependencySet result = null; try { result = volt_proc.executePlanFragment(ts.getTransactionId(), this.tmp_EEdependencies, (int)fragmentIds[0], parameters[0], this.m_systemProcedureContext); } catch (Throwable ex) { String msg = "Unexpected error when executing system procedure"; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } if (debug.val) LOG.debug(String.format("%s - Finished executing sysproc fragment for %s (#%d)%s", ts, m_registeredSysProcPlanFragments.get(fragmentIds[0]).getClass().getSimpleName(), fragmentIds[0], (trace.val ? "\n" + result : ""))); return (result); } /** * Execute the given fragment tasks on this site's underlying EE * @param ts * @param undoToken * @param batchSize * @param fragmentIds * @param parameterSets * @param output_depIds * @param input_depIds * @return */ private DependencySet executePlanFragments(AbstractTransaction ts, long undoToken, int batchSize, long fragmentIds[], ParameterSet parameterSets[], int output_depIds[], int input_depIds[], Map<Integer, List<VoltTable>> input_deps) { assert(this.ee != null) : "The EE object is null. This is bad!"; Long txn_id = ts.getTransactionId(); //LOG.info("in executePlanFragments()"); // *********************************** DEBUG *********************************** if (debug.val) { StringBuilder sb = new StringBuilder(); sb.append(String.format("%s - Executing %d fragments [lastTxnId=%d, undoToken=%d]", ts, batchSize, this.lastCommittedTxnId, undoToken)); // if (trace.val) { Map<String, Object> m = new LinkedHashMap<String, Object>(); m.put("Fragments", Arrays.toString(fragmentIds)); Map<Integer, Object> inner = new LinkedHashMap<Integer, Object>(); for (int i = 0; i < batchSize; i++) inner.put(i, parameterSets[i].toString()); m.put("Parameters", inner); if (batchSize > 0 && input_depIds[0] != HStoreConstants.NULL_DEPENDENCY_ID) { inner = new LinkedHashMap<Integer, Object>(); for (int i = 0; i < batchSize; i++) { List<VoltTable> deps = input_deps.get(input_depIds[i]); inner.put(input_depIds[i], (deps != null ? StringUtil.join("\n", deps) : "???")); } // FOR m.put("Input Dependencies", inner); } m.put("Output Dependencies", Arrays.toString(output_depIds)); sb.append("\n" + StringUtil.formatMaps(m)); // } LOG.debug(sb.toString().trim()); } // *********************************** DEBUG *********************************** // pass attached dependencies to the EE (for non-sysproc work). if (input_deps != null && input_deps.isEmpty() == false) { if (debug.val) LOG.debug(String.format("%s - Stashing %d InputDependencies at partition %d", ts, input_deps.size(), this.partitionId)); this.ee.stashWorkUnitDependencies(input_deps); } // Java-based Table Read-Write Sets boolean readonly = true; boolean speculative = ts.isSpeculative(); boolean singlePartition = ts.isPredictSinglePartition(); int tableIds[] = null; for (int i = 0; i < batchSize; i++) { boolean fragReadOnly = PlanFragmentIdGenerator.isPlanFragmentReadOnly(fragmentIds[i]); // We don't need to maintain read/write sets for non-speculative txns if (speculative || singlePartition == false) { if (fragReadOnly) { tableIds = catalogContext.getReadTableIds(Long.valueOf(fragmentIds[i])); if (tableIds != null) ts.markTableIdsRead(this.partitionId, tableIds); } else { tableIds = catalogContext.getWriteTableIds(Long.valueOf(fragmentIds[i])); if (tableIds != null) ts.markTableIdsWritten(this.partitionId, tableIds); } } readonly = readonly && fragReadOnly; } // Enable read/write set tracking if (hstore_conf.site.exec_readwrite_tracking && ts.hasExecutedWork(this.partitionId) == false) { if (trace.val) LOG.trace(String.format("%s - Enabling read/write set tracking in EE at partition %d", ts, this.partitionId)); this.ee.trackingEnable(txn_id); } // Check whether the txn has only exeuted read-only queries up to this point if (ts.isExecReadOnly(this.partitionId)) { if (readonly == false) { if (trace.val) LOG.trace(String.format("%s - Marking txn as not read-only %s", ts, Arrays.toString(fragmentIds))); ts.markExecNotReadOnly(this.partitionId); } // We can do this here because the only way that we're not read-only is if // we actually modify data at this partition ts.markExecutedWork(this.partitionId); } DependencySet result = null; boolean needs_profiling = false; if (ts.isExecLocal(this.partitionId)) { if (hstore_conf.site.txn_profiling && ((LocalTransaction)ts).profiler != null) { needs_profiling = true; ((LocalTransaction)ts).profiler.startExecEE(); } } Throwable error = null; try { assert(this.lastCommittedUndoToken < undoToken) : String.format("Trying to execute work using undoToken %d for %s but " + "it is less than the last committed undoToken %d at partition %d", undoToken, ts, this.lastCommittedUndoToken, this.partitionId); if (trace.val) LOG.trace(String.format("%s - Executing fragments %s at partition %d [undoToken=%d]", ts, Arrays.toString(fragmentIds), this.partitionId, undoToken)); result = this.ee.executeQueryPlanFragmentsAndGetDependencySet( fragmentIds, batchSize, input_depIds, output_depIds, parameterSets, batchSize, txn_id.longValue(), this.lastCommittedTxnId.longValue(), undoToken); } catch (AssertionError ex) { LOG.error("Fatal error when processing " + ts + "\n" + ts.debug()); error = ex; throw ex; } catch (EvictedTupleAccessException ex) { if (debug.val) LOG.warn("Caught EvictedTupleAccessException."); error = ex; throw ex; } catch (SerializableException ex) { if (debug.val) LOG.error(String.format("%s - Unexpected error in the ExecutionEngine on partition %d", ts, this.partitionId), ex); error = ex; throw ex; } catch (Throwable ex) { error = ex; String msg = String.format("%s - Failed to execute PlanFragments: %s", ts, Arrays.toString(fragmentIds)); throw new ServerFaultException(msg, ex); } finally { if (needs_profiling) ((LocalTransaction)ts).profiler.stopExecEE(); if (error == null && result == null) { LOG.warn(String.format("%s - Finished executing fragments but got back null results [fragmentIds=%s]", ts, Arrays.toString(fragmentIds))); } } // *********************************** DEBUG *********************************** if (debug.val) { if (result != null) { LOG.debug(String.format("%s - Finished executing fragments and got back %d results", ts, result.depIds.length)); } else { LOG.warn(String.format("%s - Finished executing fragments but got back null results? That seems bad...", ts)); } } // *********************************** DEBUG *********************************** return (result); } /** * Load a VoltTable directly into the EE at this partition. * <B>NOTE:</B> This should only be invoked by a system stored procedure. * @param txn_id * @param clusterName * @param databaseName * @param tableName * @param data * @param allowELT * @throws VoltAbortException */ public void loadTable(AbstractTransaction ts, String clusterName, String databaseName, String tableName, VoltTable data, int allowELT) throws VoltAbortException { Table table = this.catalogContext.database.getTables().getIgnoreCase(tableName); if (table == null) { throw new VoltAbortException("Table '" + tableName + "' does not exist in database " + clusterName + "." + databaseName); } if (debug.val) LOG.debug(String.format("Loading %d row(s) into %s [txnId=%d]", data.getRowCount(), table.getName(), ts.getTransactionId())); ts.markExecutedWork(this.partitionId); this.ee.loadTable(table.getRelativeIndex(), data, ts.getTransactionId(), this.lastCommittedTxnId.longValue(), ts.getLastUndoToken(this.partitionId), allowELT != 0); } /** * Load a VoltTable directly into the EE at this partition. * <B>NOTE:</B> This should only be used for testing * @param txnId * @param table * @param data * @param allowELT * @throws VoltAbortException */ protected void loadTable(Long txnId, Table table, VoltTable data, boolean allowELT) throws VoltAbortException { if (debug.val) LOG.debug(String.format("Loading %d row(s) into %s [txnId=%d]", data.getRowCount(), table.getName(), txnId)); this.ee.loadTable(table.getRelativeIndex(), data, txnId.longValue(), this.lastCommittedTxnId.longValue(), HStoreConstants.NULL_UNDO_LOGGING_TOKEN, allowELT); } /** * Execute a SQLStmt batch at this partition. This is the main entry point from * VoltProcedure for where we will execute a SQLStmt batch from a txn. * @param ts The txn handle that is executing this query batch * @param batchSize The number of SQLStmts that the txn queued up using voltQueueSQL() * @param batchStmts The SQLStmts that the txn is trying to execute * @param batchParams The input parameters for the SQLStmts * @param finalTask Whether the txn has marked this as the last batch that they will ever execute * @param forceSinglePartition Whether to force the BatchPlanner to only generate a single-partition plan * @return */ public VoltTable[] executeSQLStmtBatch(LocalTransaction ts, int batchSize, SQLStmt batchStmts[], ParameterSet batchParams[], boolean finalTask, boolean forceSinglePartition) { boolean needs_profiling = (hstore_conf.site.txn_profiling && ts.profiler != null); if (needs_profiling) { ts.profiler.addBatch(batchSize); ts.profiler.stopExecJava(); ts.profiler.startExecPlanning(); } // HACK: This is needed to handle updates on replicated tables properly // when there is only one partition in the cluster. if (catalogContext.numberOfPartitions == 1) { this.depTracker.addTransaction(ts); } if (hstore_conf.site.exec_deferrable_queries) { // TODO: Loop through batchStmts and check whether their corresponding Statement // is marked as deferrable. If so, then remove them from batchStmts and batchParams // (sliding everyone over by one in the arrays). Queue up the deferred query. // Be sure decrement batchSize after you finished processing this. // EXAMPLE: batchStmts[0].getStatement().getDeferrable() } // Calculate the hash code for this batch to see whether we already have a planner final Integer batchHashCode = VoltProcedure.getBatchHashCode(batchStmts, batchSize); BatchPlanner planner = this.batchPlanners.get(batchHashCode); if (planner == null) { // Assume fast case planner = new BatchPlanner(batchStmts, batchSize, ts.getProcedure(), this.p_estimator, forceSinglePartition); this.batchPlanners.put(batchHashCode, planner); } assert(planner != null); // At this point we have to calculate exactly what we need to do on each partition // for this batch. So somehow right now we need to fire this off to either our // local executor or to Evan's magical distributed transaction manager BatchPlanner.BatchPlan plan = planner.plan(ts.getTransactionId(), this.partitionId, ts.getPredictTouchedPartitions(), ts.getTouchedPartitions(), batchParams); assert(plan != null); if (trace.val) { LOG.trace(ts + " - Touched Partitions: " + ts.getTouchedPartitions().values()); LOG.trace(ts + " - Next BatchPlan:\n" + plan.toString()); } if (needs_profiling) ts.profiler.stopExecPlanning(); // Tell the TransactionEstimator that we're about to execute these mofos EstimatorState t_state = ts.getEstimatorState(); if (this.localTxnEstimator != null && t_state != null && t_state.isUpdatesEnabled()) { if (needs_profiling) ts.profiler.startExecEstimation(); try { this.localTxnEstimator.executeQueries(t_state, planner.getStatements(), plan.getStatementPartitions()); } finally { if (needs_profiling) ts.profiler.stopExecEstimation(); } } else if (t_state != null && t_state.shouldAllowUpdates()) { LOG.warn("Skipping estimator updates for " + ts); } // Check whether our plan was caused a mispredict // Doing it this way allows us to update the TransactionEstimator before we abort the txn if (plan.getMisprediction() != null) { MispredictionException ex = plan.getMisprediction(); ts.setPendingError(ex, false); assert(ex.getPartitions().isEmpty() == false) : "Unexpected empty PartitionSet for mispredicated txn " + ts; // Print Misprediction Debug if (hstore_conf.site.exec_mispredict_crash) { // Use a lock so that only dump out the first txn that fails synchronized (PartitionExecutor.class) { LOG.warn("\n" + EstimatorUtil.mispredictDebug(ts, planner, batchStmts, batchParams)); LOG.fatal(String.format("Crashing because site.exec_mispredict_crash is true [txn=%s]", ts)); this.crash(ex); } // SYNCH } else if (debug.val) { if (trace.val) LOG.warn("\n" + EstimatorUtil.mispredictDebug(ts, planner, batchStmts, batchParams)); LOG.debug(ts + " - Aborting and restarting mispredicted txn."); } throw ex; } // Keep track of the number of times that we've executed each query for this transaction int stmtCounters[] = this.tmp_stmtCounters.getArray(batchSize); for (int i = 0; i < batchSize; i++) { stmtCounters[i] = ts.updateStatementCounter(batchStmts[i].getStatement()); } // FOR if (ts.hasPrefetchQueries()) { PartitionSet stmtPartitions[] = plan.getStatementPartitions(); PrefetchState prefetchState = ts.getPrefetchState(); QueryTracker queryTracker = prefetchState.getExecQueryTracker(); assert(prefetchState != null); for (int i = 0; i < batchSize; i++) { // We always have to update the query tracker regardless of whether // the query was prefetched or not. This is so that we can ensure // that we execute the queries in the right order. Statement stmt = batchStmts[i].getStatement(); stmtCounters[i] = queryTracker.addQuery(stmt, stmtPartitions[i], batchParams[i]); } // FOR // FIXME PrefetchQueryUtil.checkSQLStmtBatch(this, ts, plan, batchSize, batchStmts, batchParams); } // PREFETCH VoltTable results[] = null; // FAST-PATH: Single-partition + Local // If the BatchPlan only has WorkFragments that are for this partition, then // we can use the fast-path executeLocalPlan() method if (plan.isSingledPartitionedAndLocal()) { if (trace.val) LOG.trace(String.format("%s - Sending %s directly to the ExecutionEngine at partition %d", ts, plan.getClass().getSimpleName(), this.partitionId)); // If this the finalTask flag is set to true, and we're only executing queries at this // partition, then we need to notify the other partitions that we're done with them. if (hstore_conf.site.exec_early_prepare && finalTask == true && ts.isPredictSinglePartition() == false && ts.isSysProc() == false && ts.allowEarlyPrepare() == true) { tmp_fragmentsPerPartition.clearValues(); tmp_fragmentsPerPartition.put(this.partitionId, batchSize); DonePartitionsNotification notify = this.computeDonePartitions(ts, null, tmp_fragmentsPerPartition, finalTask); - if (notify.hasSitesToNotify()) this.notifyDonePartitions(ts, notify); + if (notify != null && notify.hasSitesToNotify()) + this.notifyDonePartitions(ts, notify); } // Execute the queries right away. results = this.executeLocalPlan(ts, plan, batchParams); } // DISTRIBUTED EXECUTION // Otherwise, we need to generate WorkFragments and then send the messages out // to our remote partitions using the HStoreCoordinator else { ExecutionState execState = ts.getExecutionState(); execState.tmp_partitionFragments.clear(); plan.getWorkFragmentsBuilders(ts.getTransactionId(), stmtCounters, execState.tmp_partitionFragments); if (debug.val) LOG.debug(String.format("%s - Using dispatchWorkFragments to execute %d %ss", ts, execState.tmp_partitionFragments.size(), WorkFragment.class.getSimpleName())); if (needs_profiling) { int remote_cnt = 0; PartitionSet stmtPartitions[] = plan.getStatementPartitions(); for (int i = 0; i < batchSize; i++) { if (stmtPartitions[i].get() != ts.getBasePartition()) remote_cnt++; if (trace.val) LOG.trace(String.format("%s - [%02d] stmt:%s / partitions:%s", ts, i, batchStmts[i].getStatement().getName(), stmtPartitions[i])); } // FOR if (trace.val) LOG.trace(String.format("%s - Remote Queries Count = %d", ts, remote_cnt)); ts.profiler.addRemoteQuery(remote_cnt); } // Block until we get all of our responses. results = this.dispatchWorkFragments(ts, batchSize, batchParams, execState.tmp_partitionFragments, finalTask); } if (debug.val && results == null) LOG.warn("Got back a null results array for " + ts + "\n" + plan.toString()); if (needs_profiling) ts.profiler.startExecJava(); return (results); } /** * * @param fresponse */ protected WorkResult buildWorkResult(AbstractTransaction ts, DependencySet result, Status status, SerializableException error) { WorkResult.Builder builder = WorkResult.newBuilder(); // Partition Id builder.setPartitionId(this.partitionId); // Status builder.setStatus(status); // SerializableException if (error != null) { int size = error.getSerializedSize(); BBContainer bc = this.buffer_pool.acquire(size); try { error.serializeToBuffer(bc.b); } catch (IOException ex) { String msg = "Failed to serialize error for " + ts; throw new ServerFaultException(msg, ex); } bc.b.rewind(); builder.setError(ByteString.copyFrom(bc.b)); bc.discard(); } // Push dependencies back to the remote partition that needs it if (status == Status.OK) { for (int i = 0, cnt = result.size(); i < cnt; i++) { builder.addDepId(result.depIds[i]); this.fs.clear(); try { result.dependencies[i].writeExternal(this.fs); ByteString bs = ByteString.copyFrom(this.fs.getBBContainer().b); builder.addDepData(bs); } catch (Exception ex) { throw new ServerFaultException(String.format("Failed to serialize output dependency %d for %s", result.depIds[i], ts), ex); } if (trace.val) LOG.trace(String.format("%s - Serialized Output Dependency %d\n%s", ts, result.depIds[i], result.dependencies[i])); } // FOR this.fs.getBBContainer().discard(); } return (builder.build()); } /** * This method is invoked when the PartitionExecutor wants to execute work at a remote HStoreSite. * The doneNotificationsPerSite is an array where each offset (based on SiteId) may contain * a PartitionSet of the partitions that this txn is finished with at the remote node and will * not be executing any work in the current batch. * @param ts * @param fragmentBuilders * @param parameterSets * @param doneNotificationsPerSite */ private void requestWork(LocalTransaction ts, Collection<WorkFragment.Builder> fragmentBuilders, List<ByteString> parameterSets, DonePartitionsNotification notify) { assert(fragmentBuilders.isEmpty() == false); assert(ts != null); Long txn_id = ts.getTransactionId(); if (trace.val) LOG.trace(String.format("%s - Wrapping %d %s into a %s", ts, fragmentBuilders.size(), WorkFragment.class.getSimpleName(), TransactionWorkRequest.class.getSimpleName())); // If our transaction was originally designated as a single-partitioned, then we need to make // sure that we don't touch any partition other than our local one. If we do, then we need abort // it and restart it as multi-partitioned boolean need_restart = false; boolean predict_singlepartition = ts.isPredictSinglePartition(); PartitionSet done_partitions = ts.getDonePartitions(); Estimate t_estimate = ts.getLastEstimate(); // Now we can go back through and start running all of the WorkFragments that were not blocked // waiting for an input dependency. Note that we pack all the fragments into a single // CoordinatorFragment rather than sending each WorkFragment in its own message for (WorkFragment.Builder fragmentBuilder : fragmentBuilders) { assert(this.depTracker.isBlocked(ts, fragmentBuilder) == false); final int target_partition = fragmentBuilder.getPartitionId(); final int target_site = catalogContext.getSiteIdForPartitionId(target_partition); final PartitionSet doneNotifications = (notify != null ? notify.getNotifications(target_site) : null); // Make sure that this isn't a single-partition txn trying to access a remote partition if (predict_singlepartition && target_partition != this.partitionId) { if (debug.val) LOG.debug(String.format("%s - Txn on partition %d is suppose to be " + "single-partitioned, but it wants to execute a fragment on partition %d", ts, this.partitionId, target_partition)); need_restart = true; break; } // Make sure that this txn isn't trying to access a partition that we said we were // done with earlier else if (done_partitions.contains(target_partition)) { if (debug.val) LOG.warn(String.format("%s on partition %d was marked as done on partition %d " + "but now it wants to go back for more!", ts, this.partitionId, target_partition)); need_restart = true; break; } // Make sure we at least have something to do! else if (fragmentBuilder.getFragmentIdCount() == 0) { LOG.warn(String.format("%s - Trying to send a WorkFragment request with 0 fragments", ts)); continue; } // Add in the specexec query estimate at this partition if needed if (hstore_conf.site.specexec_enable && t_estimate != null && t_estimate.hasQueryEstimate(target_partition)) { List<CountedStatement> queryEst = t_estimate.getQueryEstimate(target_partition); // if (debug.val) if (target_partition == 0) if (debug.val) LOG.debug(String.format("%s - Sending remote query estimate to partition %d " + "containing %d queries\n%s", ts, target_partition, queryEst.size(), StringUtil.join("\n", queryEst))); assert(queryEst.isEmpty() == false); QueryEstimate.Builder estBuilder = QueryEstimate.newBuilder(); for (CountedStatement countedStmt : queryEst) { estBuilder.addStmtIds(countedStmt.statement.getId()); estBuilder.addStmtCounters(countedStmt.counter); } // FOR fragmentBuilder.setFutureStatements(estBuilder); } // Get the TransactionWorkRequest.Builder for the remote HStoreSite // We will use this store our serialized input dependencies TransactionWorkRequestBuilder requestBuilder = tmp_transactionRequestBuilders[target_site]; if (requestBuilder == null) { requestBuilder = tmp_transactionRequestBuilders[target_site] = new TransactionWorkRequestBuilder(); } TransactionWorkRequest.Builder builder = requestBuilder.getBuilder(ts, doneNotifications); // Also keep track of what Statements they are executing so that we know // we need to send over the wire to them. requestBuilder.addParamIndexes(fragmentBuilder.getParamIndexList()); // Input Dependencies if (fragmentBuilder.getNeedsInput()) { if (debug.val) LOG.debug(String.format("%s - Retrieving input dependencies at partition %d", ts, this.partitionId)); tmp_removeDependenciesMap.clear(); for (int i = 0, cnt = fragmentBuilder.getInputDepIdCount(); i < cnt; i++) { this.getFragmentInputs(ts, fragmentBuilder.getInputDepId(i), tmp_removeDependenciesMap); } // FOR for (Entry<Integer, List<VoltTable>> e : tmp_removeDependenciesMap.entrySet()) { if (requestBuilder.hasInputDependencyId(e.getKey())) continue; if (debug.val) LOG.debug(String.format("%s - Attaching %d input dependencies to be sent to %s", ts, e.getValue().size(), HStoreThreadManager.formatSiteName(target_site))); for (VoltTable vt : e.getValue()) { this.fs.clear(); try { this.fs.writeObject(vt); builder.addAttachedDepId(e.getKey().intValue()); builder.addAttachedData(ByteString.copyFrom(this.fs.getBBContainer().b)); } catch (Exception ex) { String msg = String.format("Failed to serialize input dependency %d for %s", e.getKey(), ts); throw new ServerFaultException(msg, ts.getTransactionId()); } if (debug.val) LOG.debug(String.format("%s - Storing %d rows for InputDependency %d to send " + "to partition %d [bytes=%d]", ts, vt.getRowCount(), e.getKey(), fragmentBuilder.getPartitionId(), CollectionUtil.last(builder.getAttachedDataList()).size())); } // FOR requestBuilder.addInputDependencyId(e.getKey()); } // FOR this.fs.getBBContainer().discard(); } builder.addFragments(fragmentBuilder); } // FOR (tasks) // Bad mojo! We need to throw a MispredictionException so that the VoltProcedure // will catch it and we can propagate the error message all the way back to the HStoreSite if (need_restart) { if (trace.val) LOG.trace(String.format("Aborting %s because it was mispredicted", ts)); // This is kind of screwy because we don't actually want to send the touched partitions // histogram because VoltProcedure will just do it for us... throw new MispredictionException(txn_id, null); } // Stick on the ParameterSets that each site needs into the TransactionWorkRequest for (int target_site = 0; target_site < tmp_transactionRequestBuilders.length; target_site++) { TransactionWorkRequestBuilder builder = tmp_transactionRequestBuilders[target_site]; if (builder == null || builder.isDirty() == false) { continue; } assert(builder != null); builder.addParameterSets(parameterSets); // Bombs away! this.hstore_coordinator.transactionWork(ts, target_site, builder.build(), this.request_work_callback); if (debug.val) LOG.debug(String.format("%s - Sent Work request to remote site %s", ts, HStoreThreadManager.formatSiteName(target_site))); } // FOR } /** * Figure out what partitions this transaction is done with. This will only return * a PartitionSet of what partitions we think we're done with. * For each partition that we idenitfy that the txn is done with, we will check to see * whether the txn is going to execute a query at its site in this batch. If it's not, * then we will notify that HStoreSite through the HStoreCoordinator. * If the partition that it doesn't need anymore is local (i.e., it's at the same * HStoreSite that we're at right now), then we'll just pass them a quick message * to let them know that they can prepare the txn. * @param ts * @param estimate * @param fragmentsPerPartition A histogram of the number of PlanFragments the * txn will execute in this batch at each partition. * @param finalTask Whether the txn has marked this as the last batch that they will ever execute * @return A notification object that can be used to notify partitions that this txn is done with them. */ private DonePartitionsNotification computeDonePartitions(final LocalTransaction ts, final Estimate estimate, final FastIntHistogram fragmentsPerPartition, final boolean finalTask) { final PartitionSet touchedPartitions = ts.getPredictTouchedPartitions(); final PartitionSet donePartitions = ts.getDonePartitions(); // Compute the partitions that the txn will be finished with after this batch PartitionSet estDonePartitions = null; // If the finalTask flag is set to true, then the new done partitions // is every partition that this txn has locked if (finalTask) { estDonePartitions = touchedPartitions; } // Otherwise, we'll rely on the transaction's current estimate to figure it out. else { if (estimate == null || estimate.isValid() == false) { if (debug.val) LOG.debug(String.format("%s - Unable to compute new done partitions because there " + "is no valid estimate for the txn", ts, estimate.getClass().getSimpleName())); return (null); } estDonePartitions = estimate.getDonePartitions(this.thresholds); if (estDonePartitions == null || estDonePartitions.isEmpty()) { if (debug.val) LOG.debug(String.format("%s - There are no new done partitions identified by %s", ts, estimate.getClass().getSimpleName())); return (null); } } assert(estDonePartitions != null) : "Null done partitions for " + ts; assert(estDonePartitions.isEmpty() == false) : "Empty done partitions for " + ts; if (debug.val) LOG.debug(String.format("%s - New estimated done partitions %s%s", ts, estDonePartitions, (trace.val ? "\n"+estimate : ""))); // Note that we can actually be done with ourself, if this txn is only going to execute queries // at remote partitions. But we can't actually execute anything because this partition's only // execution thread is going to be blocked. So we always do this so that we're not sending a // useless message estDonePartitions.remove(this.partitionId); // Make sure that we only tell partitions that we actually touched, otherwise they will // be stuck waiting for a finish request that will never come! DonePartitionsNotification notify = new DonePartitionsNotification(); for (int partition : estDonePartitions.values()) { // Only mark the txn done at this partition if the Estimate says we were done // with it after executing this batch and it's a partition that we've locked. if (donePartitions.contains(partition) || touchedPartitions.contains(partition) == false) continue; if (trace.val) LOG.trace(String.format("%s - Marking partition %d as done for txn", ts, partition)); notify.donePartitions.add(partition); if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.markEarly2PCPartition(partition); // Check whether we're executing a query at this partition in this batch. // If we're not, then we need to check whether we can piggyback the "done" message // in another WorkFragment going to that partition or whether we have to // send a separate TransactionPrepareRequest if (fragmentsPerPartition.get(partition, 0) == 0) { // We need to let them know that the party is over! if (hstore_site.isLocalPartition(partition)) { // if (debug.val) LOG.info(String.format("%s - Notifying local partition %d that txn is finished it", ts, partition)); hstore_site.getPartitionExecutor(partition).queuePrepare(ts); } // Check whether we can piggyback on another WorkFragment that is going to // the same site else { Site remoteSite = catalogContext.getSiteForPartition(partition); boolean found = false; for (Partition remotePartition : remoteSite.getPartitions().values()) { if (fragmentsPerPartition.get(remotePartition.getId(), 0) != 0) { found = true; break; } } // FOR notify.addSiteNotification(remoteSite, partition, (found == false)); } } } // FOR return (notify); } /** * Send asynchronous notification messages to any remote site to tell them that we * are done with partitions that they have. * @param ts * @param notify */ private void notifyDonePartitions(LocalTransaction ts, DonePartitionsNotification notify) { // BLAST OUT NOTIFICATIONS! for (int remoteSiteId : notify._sitesToNotify) { assert(notify.notificationsPerSite[remoteSiteId] != null); if (debug.val) LOG.info(String.format("%s - Notifying %s that txn is finished with partitions %s", ts, HStoreThreadManager.formatSiteName(remoteSiteId), notify.notificationsPerSite[remoteSiteId])); hstore_coordinator.transactionPrepare(ts, ts.getPrepareCallback(), notify.notificationsPerSite[remoteSiteId]); // Make sure that we remove the PartitionSet for this site so that we don't // try to send the notifications again. notify.notificationsPerSite[remoteSiteId] = null; } // FOR } /** * Execute the given tasks and then block the current thread waiting for the list of dependency_ids to come * back from whatever it was we were suppose to do... * This is the slowest way to execute a bunch of WorkFragments and therefore should only be invoked * for batches that need to access non-local partitions * @param ts The txn handle that is executing this query batch * @param batchSize The number of SQLStmts that the txn queued up using voltQueueSQL() * @param batchParams The input parameters for the SQLStmts * @param allFragmentBuilders * @param finalTask Whether the txn has marked this as the last batch that they will ever execute * @return */ public VoltTable[] dispatchWorkFragments(final LocalTransaction ts, final int batchSize, final ParameterSet batchParams[], final Collection<WorkFragment.Builder> allFragmentBuilders, boolean finalTask) { assert(allFragmentBuilders.isEmpty() == false) : "Unexpected empty WorkFragment list for " + ts; final boolean needs_profiling = (hstore_conf.site.txn_profiling && ts.profiler != null); // *********************************** DEBUG *********************************** if (debug.val) { LOG.debug(String.format("%s - Preparing to dispatch %d messages and wait for the results [needsProfiling=%s]", ts, allFragmentBuilders.size(), needs_profiling)); if (trace.val) { StringBuilder sb = new StringBuilder(); sb.append(ts + " - WorkFragments:\n"); for (WorkFragment.Builder fragment : allFragmentBuilders) { sb.append(StringBoxUtil.box(fragment.toString()) + "\n"); } // FOR sb.append(ts + " - ParameterSets:\n"); for (ParameterSet ps : batchParams) { sb.append(ps + "\n"); } // FOR LOG.trace(sb); } } // *********************************** DEBUG *********************************** // OPTIONAL: Check to make sure that this request is valid // (1) At least one of the WorkFragments needs to be executed on a remote partition // (2) All of the PlanFragments ids in the WorkFragments match this txn's Procedure if (hstore_conf.site.exec_validate_work && ts.isSysProc() == false) { LOG.warn(String.format("%s - Checking whether all of the WorkFragments are valid", ts)); boolean has_remote = false; for (WorkFragment.Builder frag : allFragmentBuilders) { if (frag.getPartitionId() != this.partitionId) { has_remote = true; } for (int frag_id : frag.getFragmentIdList()) { PlanFragment catalog_frag = CatalogUtil.getPlanFragment(catalogContext.database, frag_id); Statement catalog_stmt = catalog_frag.getParent(); assert(catalog_stmt != null); Procedure catalog_proc = catalog_stmt.getParent(); if (catalog_proc.equals(ts.getProcedure()) == false) { LOG.warn(ts.debug() + "\n" + allFragmentBuilders + "\n---- INVALID ----\n" + frag); String msg = String.format("%s - Unexpected %s", ts, catalog_frag.fullName()); throw new ServerFaultException(msg, ts.getTransactionId()); } } } // FOR if (has_remote == false) { LOG.warn(ts.debug() + "\n" + allFragmentBuilders); String msg = ts + "Trying to execute all local single-partition queries using the slow-path!"; throw new ServerFaultException(msg, ts.getTransactionId()); } } boolean first = true; boolean serializedParams = false; CountDownLatch latch = null; boolean all_local = true; boolean is_localSite; boolean is_localPartition; boolean is_localReadOnly = true; int num_localPartition = 0; int num_localSite = 0; int num_remote = 0; int num_skipped = 0; int total = 0; Collection<WorkFragment.Builder> fragmentBuilders = allFragmentBuilders; // Make sure our txn is in our DependencyTracker if (trace.val) LOG.trace(String.format("%s - Added transaction to %s", ts, this.depTracker.getClass().getSimpleName())); this.depTracker.addTransaction(ts); // Count the number of fragments that we're going to send to each partition and // figure out whether the txn will always be read-only at this partition tmp_fragmentsPerPartition.clearValues(); for (WorkFragment.Builder fragmentBuilder : allFragmentBuilders) { int partition = fragmentBuilder.getPartitionId(); tmp_fragmentsPerPartition.put(partition); if (this.partitionId == partition && fragmentBuilder.getReadOnly() == false) { is_localReadOnly = false; } } // FOR long undoToken = this.calculateNextUndoToken(ts, is_localReadOnly); ts.initFirstRound(undoToken, batchSize); final boolean predict_singlePartition = ts.isPredictSinglePartition(); // Calculate whether we are finished with partitions now final Estimate lastEstimate = ts.getLastEstimate(); DonePartitionsNotification notify = null; if (hstore_conf.site.exec_early_prepare && ts.isSysProc() == false && ts.allowEarlyPrepare()) { notify = this.computeDonePartitions(ts, lastEstimate, tmp_fragmentsPerPartition, finalTask); - if (notify.hasSitesToNotify()) this.notifyDonePartitions(ts, notify); + if (notify != null && notify.hasSitesToNotify()) + this.notifyDonePartitions(ts, notify); } // Attach the ParameterSets to our transaction handle so that anybody on this HStoreSite // can access them directly without needing to deserialize them from the WorkFragments ts.attachParameterSets(batchParams); // Now if we have some work sent out to other partitions, we need to wait until they come back // In the first part, we wait until all of our blocked WorkFragments become unblocked final BlockingDeque<Collection<WorkFragment.Builder>> queue = this.depTracker.getUnblockedWorkFragmentsQueue(ts); // Run through this loop if: // (1) We have no pending errors // (2) This is our first time in the loop (first == true) // (3) If we know that there are still messages being blocked // (4) If we know that there are still unblocked messages that we need to process // (5) The latch for this round is still greater than zero while (ts.hasPendingError() == false && (first == true || this.depTracker.stillHasWorkFragments(ts) || (latch != null && latch.getCount() > 0))) { if (trace.val) LOG.trace(String.format("%s - %s loop [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, ClassUtil.getCurrentMethodName(), first, this.depTracker.stillHasWorkFragments(ts), queue.size(), latch)); // If this is the not first time through the loop, then poll the queue // to get our list of fragments if (first == false) { all_local = true; is_localSite = false; is_localPartition = false; num_localPartition = 0; num_localSite = 0; num_remote = 0; num_skipped = 0; total = 0; if (trace.val) LOG.trace(String.format("%s - Waiting for unblocked tasks on partition %d", ts, this.partitionId)); fragmentBuilders = queue.poll(); // NON-BLOCKING // If we didn't get back a list of fragments here, then we will spin through // and invoke utilityWork() to try to do something useful until what we need shows up if (needs_profiling) ts.profiler.startExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.start(); try { while (fragmentBuilders == null) { // If there is more work that we could do, then we'll just poll the queue // without waiting so that we can go back and execute it again if we have // more time. if (this.utilityWork()) { fragmentBuilders = queue.poll(); } // Otherwise we will wait a little so that we don't spin the CPU else { fragmentBuilders = queue.poll(WORK_QUEUE_POLL_TIME, TimeUnit.MILLISECONDS); } } // WHILE } catch (InterruptedException ex) { if (this.hstore_site.isShuttingDown() == false) { LOG.error(String.format("%s - We were interrupted while waiting for blocked tasks", ts), ex); } return (null); } finally { if (needs_profiling) ts.profiler.stopExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.stopIfStarted(); } } assert(fragmentBuilders != null); // If the list to fragments unblock is empty, then we // know that we have dispatched all of the WorkFragments for the // transaction's current SQLStmt batch. That means we can just wait // until all the results return to us. if (fragmentBuilders.isEmpty()) { if (trace.val) LOG.trace(String.format("%s - Got an empty list of WorkFragments at partition %d. " + "Blocking until dependencies arrive", ts, this.partitionId)); break; } this.tmp_localWorkFragmentBuilders.clear(); if (predict_singlePartition == false) { this.tmp_remoteFragmentBuilders.clear(); this.tmp_localSiteFragmentBuilders.clear(); } // ------------------------------- // FAST PATH: Assume everything is local // ------------------------------- if (predict_singlePartition) { for (WorkFragment.Builder fragmentBuilder : fragmentBuilders) { if (first == false || this.depTracker.addWorkFragment(ts, fragmentBuilder, batchParams)) { this.tmp_localWorkFragmentBuilders.add(fragmentBuilder); total++; num_localPartition++; } } // FOR // We have to tell the transaction handle to start the round before we send off the // WorkFragments for execution, since they might start executing locally! if (first) { ts.startRound(this.partitionId); latch = this.depTracker.getDependencyLatch(ts); } // Execute all of our WorkFragments quickly at our local ExecutionEngine for (WorkFragment.Builder fragmentBuilder : this.tmp_localWorkFragmentBuilders) { if (debug.val) LOG.debug(String.format("%s - Got unblocked %s to execute locally", ts, fragmentBuilder.getClass().getSimpleName())); assert(fragmentBuilder.getPartitionId() == this.partitionId) : String.format("Trying to process %s for %s on partition %d but it should have been " + "sent to partition %d [singlePartition=%s]\n%s", fragmentBuilder.getClass().getSimpleName(), ts, this.partitionId, fragmentBuilder.getPartitionId(), predict_singlePartition, fragmentBuilder); WorkFragment fragment = fragmentBuilder.build(); this.processWorkFragment(ts, fragment, batchParams); } // FOR } // ------------------------------- // SLOW PATH: Mixed local and remote messages // ------------------------------- else { // Look at each task and figure out whether it needs to be executed at a remote // HStoreSite or whether we can execute it at one of our local PartitionExecutors. for (WorkFragment.Builder fragmentBuilder : fragmentBuilders) { int partition = fragmentBuilder.getPartitionId(); is_localSite = hstore_site.isLocalPartition(partition); is_localPartition = (partition == this.partitionId); all_local = all_local && is_localPartition; // If this is the last WorkFragment that we're going to send to this partition for // this batch, then we will want to check whether we know that this is the last // time this txn will ever need to go to that txn. If so, then we'll want to if (notify != null && notify.donePartitions.contains(partition) && tmp_fragmentsPerPartition.dec(partition) == 0) { if (debug.val) LOG.debug(String.format("%s - Setting last fragment flag in %s for partition %d", ts, WorkFragment.class.getSimpleName(), partition)); fragmentBuilder.setLastFragment(true); } if (first == false || this.depTracker.addWorkFragment(ts, fragmentBuilder, batchParams)) { total++; // At this point we know that all the WorkFragment has been registered // in the LocalTransaction, so then it's safe for us to look to see // whether we already have a prefetched result that we need // if (prefetch && is_localPartition == false) { // boolean skip_queue = true; // for (int i = 0, cnt = fragmentBuilder.getFragmentIdCount(); i < cnt; i++) { // int fragId = fragmentBuilder.getFragmentId(i); // int paramIdx = fragmentBuilder.getParamIndex(i); // // VoltTable vt = this.queryCache.getResult(ts.getTransactionId(), // fragId, // partition, // parameters[paramIdx]); // if (vt != null) { // if (trace.val) // LOG.trace(String.format("%s - Storing cached result from partition %d for fragment %d", // ts, partition, fragId)); // this.depTracker.addResult(ts, partition, fragmentBuilder.getOutputDepId(i), vt); // } else { // skip_queue = false; // } // } // FOR // // If we were able to get cached results for all of the fragmentIds in // // this WorkFragment, then there is no need for us to send the message // // So we'll just skip queuing it up! How nice! // if (skip_queue) { // if (debug.val) // LOG.debug(String.format("%s - Using prefetch result for all fragments from partition %d", // ts, partition)); // num_skipped++; // continue; // } // } // Otherwise add it to our list of WorkFragments that we want // queue up right now if (is_localPartition) { is_localReadOnly = (is_localReadOnly && fragmentBuilder.getReadOnly()); this.tmp_localWorkFragmentBuilders.add(fragmentBuilder); num_localPartition++; } else if (is_localSite) { this.tmp_localSiteFragmentBuilders.add(fragmentBuilder); num_localSite++; } else { this.tmp_remoteFragmentBuilders.add(fragmentBuilder); num_remote++; } } } // FOR assert(total == (num_remote + num_localSite + num_localPartition + num_skipped)) : String.format("Total:%d / Remote:%d / LocalSite:%d / LocalPartition:%d / Skipped:%d", total, num_remote, num_localSite, num_localPartition, num_skipped); // We have to tell the txn to start the round before we send off the // WorkFragments for execution, since they might start executing locally! if (first) { ts.startRound(this.partitionId); latch = this.depTracker.getDependencyLatch(ts); } // Now request the fragments that aren't local // We want to push these out as soon as possible if (num_remote > 0) { // We only need to serialize the ParameterSets once if (serializedParams == false) { if (needs_profiling) ts.profiler.startSerialization(); tmp_serializedParams.clear(); for (int i = 0; i < batchParams.length; i++) { if (batchParams[i] == null) { tmp_serializedParams.add(ByteString.EMPTY); } else { this.fs.clear(); try { batchParams[i].writeExternal(this.fs); ByteString bs = ByteString.copyFrom(this.fs.getBBContainer().b); tmp_serializedParams.add(bs); } catch (Exception ex) { String msg = "Failed to serialize ParameterSet " + i + " for " + ts; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } } } // FOR if (needs_profiling) ts.profiler.stopSerialization(); } if (trace.val) LOG.trace(String.format("%s - Requesting %d %s to be executed on remote partitions " + "[doneNotifications=%s]", ts, WorkFragment.class.getSimpleName(), num_remote, notify!=null)); this.requestWork(ts, tmp_remoteFragmentBuilders, tmp_serializedParams, notify); if (needs_profiling) ts.profiler.markRemoteQuery(); } // Then dispatch the task that are needed at the same HStoreSite but // at a different partition than this one if (num_localSite > 0) { if (trace.val) LOG.trace(String.format("%s - Executing %d WorkFragments on local site's partitions", ts, num_localSite)); for (WorkFragment.Builder builder : this.tmp_localSiteFragmentBuilders) { PartitionExecutor other = hstore_site.getPartitionExecutor(builder.getPartitionId()); other.queueWork(ts, builder.build()); } // FOR if (needs_profiling) ts.profiler.markRemoteQuery(); } // Then execute all of the tasks need to access the partitions at this HStoreSite // We'll dispatch the remote-partition-local-site fragments first because they're going // to need to get queued up by at the other PartitionExecutors if (num_localPartition > 0) { if (trace.val) LOG.trace(String.format("%s - Executing %d WorkFragments on local partition", ts, num_localPartition)); for (WorkFragment.Builder fragmentBuilder : this.tmp_localWorkFragmentBuilders) { this.processWorkFragment(ts, fragmentBuilder.build(), batchParams); } // FOR } } if (trace.val) LOG.trace(String.format("%s - Dispatched %d WorkFragments " + "[remoteSite=%d, localSite=%d, localPartition=%d]", ts, total, num_remote, num_localSite, num_localPartition)); first = false; } // WHILE this.fs.getBBContainer().discard(); if (trace.val) LOG.trace(String.format("%s - BREAK OUT [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, first, this.depTracker.stillHasWorkFragments(ts), latch)); // assert(ts.stillHasWorkFragments() == false) : // String.format("Trying to block %s before all of its WorkFragments have been dispatched!\n%s\n%s", // ts, // StringUtil.join("** ", "\n", tempDebug), // this.getVoltProcedure(ts.getProcedureName()).getLastBatchPlan()); // Now that we know all of our WorkFragments have been dispatched, we can then // wait for all of the results to come back in. if (latch == null) latch = this.depTracker.getDependencyLatch(ts); assert(latch != null) : String.format("Unexpected null dependency latch for " + ts); if (latch.getCount() > 0) { if (debug.val) { LOG.debug(String.format("%s - All blocked messages dispatched. Waiting for %d dependencies", ts, latch.getCount())); if (trace.val) LOG.trace(ts.toString()); } boolean timeout = false; long startTime = EstTime.currentTimeMillis(); if (needs_profiling) ts.profiler.startExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.start(); try { while (latch.getCount() > 0 && ts.hasPendingError() == false) { if (this.utilityWork() == false) { timeout = latch.await(WORK_QUEUE_POLL_TIME, TimeUnit.MILLISECONDS); if (timeout == false) break; } if ((EstTime.currentTimeMillis() - startTime) > hstore_conf.site.exec_response_timeout) { timeout = true; break; } } // WHILE } catch (InterruptedException ex) { if (this.hstore_site.isShuttingDown() == false) { LOG.error(String.format("%s - We were interrupted while waiting for results", ts), ex); } timeout = true; } catch (Throwable ex) { String msg = String.format("Fatal error for %s while waiting for results", ts); throw new ServerFaultException(msg, ex); } finally { if (needs_profiling) ts.profiler.stopExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.stopIfStarted(); } if (timeout && this.isShuttingDown() == false) { LOG.warn(String.format("Still waiting for responses for %s after %d ms [latch=%d]\n%s", ts, hstore_conf.site.exec_response_timeout, latch.getCount(), ts.debug())); LOG.warn("Procedure Parameters:\n" + ts.getProcedureParameters()); hstore_conf.site.exec_profiling = true; LOG.warn(hstore_site.statusSnapshot()); String msg = "The query responses for " + ts + " never arrived!"; throw new ServerFaultException(msg, ts.getTransactionId()); } } // Update done partitions if (notify != null && notify.donePartitions.isEmpty() == false) { if (debug.val) LOG.debug(String.format("%s - Marking new done partitions %s", ts, notify.donePartitions)); ts.getDonePartitions().addAll(notify.donePartitions); } // IMPORTANT: Check whether the fragments failed somewhere and we got a response with an error // We will rethrow this so that it pops the stack all the way back to VoltProcedure.call() // where we can generate a message to the client if (ts.hasPendingError()) { if (debug.val) LOG.warn(String.format("%s was hit with a %s", ts, ts.getPendingError().getClass().getSimpleName())); throw ts.getPendingError(); } // IMPORTANT: Don't try to check whether we got back the right number of tables because the batch // may have hit an error and we didn't execute all of them. VoltTable results[] = null; try { results = this.depTracker.getResults(ts); } catch (AssertionError ex) { LOG.error("Failed to get final results for batch\n" + ts.debug()); throw ex; } ts.finishRound(this.partitionId); if (debug.val) { if (trace.val) LOG.trace(ts + " is now running and looking for love in all the wrong places..."); LOG.debug(String.format("%s - Returning back %d tables to VoltProcedure", ts, results.length)); } return (results); } // --------------------------------------------------------------- // COMMIT + ABORT METHODS // --------------------------------------------------------------- /** * Queue a speculatively executed transaction to send its ClientResponseImpl message */ private void blockClientResponse(LocalTransaction ts, ClientResponseImpl cresponse) { assert(ts.isPredictSinglePartition() == true) : String.format("Specutatively executed multi-partition %s [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); assert(ts.isSpeculative() == true) : String.format("Blocking ClientResponse for non-specutative %s [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); assert(cresponse.getStatus() != Status.ABORT_MISPREDICT) : String.format("Trying to block ClientResponse for mispredicted %s [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); assert(this.currentExecMode != ExecutionMode.COMMIT_ALL) : String.format("Blocking ClientResponse for %s when in non-specutative mode [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); this.specExecBlocked.push(Pair.of(ts, cresponse)); this.specExecModified = this.specExecModified && ts.isExecReadOnly(this.partitionId); if (debug.val) LOG.debug(String.format("%s - Blocking %s ClientResponse [partitions=%s, blockQueue=%d]", ts, cresponse.getStatus(), ts.getTouchedPartitions().values(), this.specExecBlocked.size())); } /** * For the given transaction's ClientResponse, figure out whether we can send it back to the client * right now or whether we need to initiate two-phase commit. * @param ts * @param cresponse */ protected void processClientResponse(LocalTransaction ts, ClientResponseImpl cresponse) { // IMPORTANT: If we executed this locally and only touched our partition, then we need to commit/abort right here // 2010-11-14: The reason why we can do this is because we will just ignore the commit // message when it shows from the Dtxn.Coordinator. We should probably double check with Evan on this... Status status = cresponse.getStatus(); if (debug.val) { LOG.debug(String.format("%s - Processing ClientResponse at partition %d " + "[status=%s, singlePartition=%s, local=%s, clientHandle=%d]", ts, this.partitionId, status, ts.isPredictSinglePartition(), ts.isExecLocal(this.partitionId), cresponse.getClientHandle())); if (trace.val) { LOG.trace(ts + " Touched Partitions: " + ts.getTouchedPartitions().values()); if (ts.isPredictSinglePartition() == false) LOG.trace(ts + " Done Partitions: " + ts.getDonePartitions()); } } // ------------------------------- // ALL: Transactions that need to be internally restarted // ------------------------------- if (status == Status.ABORT_MISPREDICT || status == Status.ABORT_SPECULATIVE || status == Status.ABORT_EVICTEDACCESS) { // If the txn was mispredicted, then we will pass the information over to the // HStoreSite so that it can re-execute the transaction. We want to do this // first so that the txn gets re-executed as soon as possible... if (debug.val) LOG.debug(String.format("%s - Restarting because transaction was hit with %s", ts, (ts.getPendingError() != null ? ts.getPendingError().getClass().getSimpleName() : ""))); // We don't want to delete the transaction here because whoever is going to requeue it for // us will need to know what partitions that the transaction touched when it executed before if (ts.isPredictSinglePartition()) { this.finishTransaction(ts, status); this.hstore_site.transactionRequeue(ts, status); } // Send a message all the partitions involved that the party is over // and that they need to abort the transaction. We don't actually care when we get the // results back because we'll start working on new txns right away. // Note that when we call transactionFinish() right here this thread will then go on // to invoke HStoreSite.transactionFinish() for us. That means when it returns we will // have successfully aborted the txn at least at all of the local partitions at this site. else { if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPostFinish(); LocalFinishCallback finish_callback = ts.getFinishCallback(); finish_callback.init(ts, status); finish_callback.markForRequeue(); if (hstore_conf.site.exec_profiling) this.profiler.network_time.start(); this.hstore_coordinator.transactionFinish(ts, status, finish_callback); if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); } } // ------------------------------- // ALL: Single-Partition Transactions // ------------------------------- else if (ts.isPredictSinglePartition()) { // Commit or abort the transaction only if we haven't done it already // This can happen when we commit speculative txns out of order if (ts.isMarkedFinished(this.partitionId) == false) { this.finishTransaction(ts, status); } // We have to mark it as loggable to prevent the response // from getting sent back to the client if (hstore_conf.site.commandlog_enable) ts.markLogEnabled(); if (hstore_conf.site.exec_profiling) this.profiler.network_time.start(); this.hstore_site.responseSend(ts, cresponse); if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); this.hstore_site.queueDeleteTransaction(ts.getTransactionId(), status); } // ------------------------------- // COMMIT: Distributed Transaction // ------------------------------- else if (status == Status.OK) { // We need to set the new ExecutionMode before we invoke transactionPrepare // because the LocalTransaction handle might get cleaned up immediately ExecutionMode newMode = null; if (hstore_conf.site.specexec_enable) { newMode = (ts.isExecReadOnly(this.partitionId) ? ExecutionMode.COMMIT_READONLY : ExecutionMode.COMMIT_NONE); } else { newMode = ExecutionMode.DISABLED; } this.setExecutionMode(ts, newMode); // We have to send a prepare message to all of our remote HStoreSites // We want to make sure that we don't go back to ones that we've already told PartitionSet donePartitions = ts.getDonePartitions(); PartitionSet notifyPartitions = new PartitionSet(); for (int partition : ts.getPredictTouchedPartitions().values()) { if (donePartitions.contains(partition) == false) { notifyPartitions.add(partition); } } // FOR if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPostPrepare(); ts.setClientResponse(cresponse); if (hstore_conf.site.exec_profiling) { this.profiler.network_time.start(); this.profiler.sp3_local_time.start(); } LocalPrepareCallback callback = ts.getPrepareCallback(); callback.init(ts, notifyPartitions); this.hstore_coordinator.transactionPrepare(ts, callback, notifyPartitions); if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); } // ------------------------------- // ABORT: Distributed Transaction // ------------------------------- else { // Send back the result to the client right now, since there's no way // that we're magically going to be able to recover this and get them a result // This has to come before the network messages above because this will clean-up the // LocalTransaction state information this.hstore_site.responseSend(ts, cresponse); // Send a message all the partitions involved that the party is over // and that they need to abort the transaction. We don't actually care when we get the // results back because we'll start working on new txns right away. // Note that when we call transactionFinish() right here this thread will then go on // to invoke HStoreSite.transactionFinish() for us. That means when it returns we will // have successfully aborted the txn at least at all of the local partitions at this site. if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPostFinish(); LocalFinishCallback callback = ts.getFinishCallback(); callback.init(ts, status); if (hstore_conf.site.exec_profiling) this.profiler.network_time.start(); try { this.hstore_coordinator.transactionFinish(ts, status, callback); } finally { if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); } } } /** * Enable speculative execution mode for this partition. The given transaction is * the one that we will need to wait to finish before we can release the ClientResponses * for any speculatively executed transactions. * @param txn_id * @return true if speculative execution was enabled at this partition */ private Status prepareTransaction(AbstractTransaction ts) { assert(ts != null) : "Unexpected null transaction handle at partition " + this.partitionId; assert(ts.isInitialized()) : String.format("Trying to prepare uninitialized transaction %s at partition %d", ts, this.partitionId); assert(ts.isMarkedFinished(this.partitionId) == false) : String.format("Trying to prepare %s again after it was already finished at partition %d", ts, this.partitionId); Status status = Status.OK; // Skip if we've already invoked prepared for this txn at this partition if (ts.isMarkedPrepared(this.partitionId) == false) { if (debug.val) LOG.debug(String.format("%s - Preparing to commit txn at partition %d [specBlocked=%d]", ts, this.partitionId, this.specExecBlocked.size())); ExecutionMode newMode = ExecutionMode.COMMIT_NONE; if (hstore_conf.site.exec_profiling && this.partitionId != ts.getBasePartition() && ts.needsFinish(this.partitionId)) { profiler.sp3_remote_time.start(); } if (hstore_conf.site.specexec_enable) { // Check to see if there were any conflicts with the dtxn and any of its speculative // txns at this partition. If there were, then we know that we can't commit the txn here. LocalTransaction spec_ts; for (Pair<LocalTransaction, ClientResponseImpl> pair : this.specExecBlocked) { spec_ts = pair.getFirst(); if (debug.val) LOG.debug(String.format("%s - Checking for conflicts with speculative %s at partition %d [%s]", ts, spec_ts, this.partitionId, this.specExecChecker.getClass().getSimpleName())); if (this.specExecChecker.hasConflictAfter(ts, spec_ts, this.partitionId)) { if (debug.val) LOG.debug(String.format("%s - Conflict found with speculative txn %s at partition %d", ts, spec_ts, this.partitionId)); status = Status.ABORT_RESTART; break; } } // FOR // Check whether the txn that we're waiting for is read-only. // If it is, then that means all read-only transactions can commit right away if (status == Status.OK && ts.isExecReadOnly(this.partitionId)) { if (debug.val) LOG.debug(String.format("%s - Txn is read-only at partition %d [readOnly=%s]", ts, this.partitionId, ts.isExecReadOnly(this.partitionId))); newMode = ExecutionMode.COMMIT_READONLY; } } if (this.currentDtxn != null) this.setExecutionMode(ts, newMode); } // It's ok if they try to prepare the txn twice. That might just mean that they never // got the acknowledgement back in time if they tried to send an early commit message. else if (debug.val) { LOG.debug(String.format("%s - Already marked 2PC:PREPARE at partition %d", ts, this.partitionId)); } // IMPORTANT // When we do an early 2PC-PREPARE, we won't have this callback ready // because we don't know what callback to use to send the acknowledgements // back over the network PartitionCountingCallback<AbstractTransaction> callback = ts.getPrepareCallback(); if (status == Status.OK) { if (callback.isInitialized()) { try { callback.run(this.partitionId); } catch (Throwable ex) { LOG.warn("Unexpected error for " + ts, ex); } } // But we will always mark ourselves as prepared at this partition ts.markPrepared(this.partitionId); } else { if (debug.val) LOG.debug(String.format("%s - Aborting txn from partition %d [%s]", ts, this.partitionId, status)); callback.abort(this.partitionId, status); } return (status); } /** * Internal call to abort/commit the transaction down in the execution engine * @param ts * @param commit */ private void finishTransaction(AbstractTransaction ts, Status status) { assert(ts != null) : "Unexpected null transaction handle at partition " + this.partitionId; assert(ts.isInitialized()) : String.format("Trying to commit uninitialized transaction %s at partition %d", ts, this.partitionId); assert(ts.isMarkedFinished(this.partitionId) == false) : String.format("Trying to commit %s twice at partition %d", ts, this.partitionId); // This can be null if they haven't submitted anything boolean commit = (status == Status.OK); long undoToken = (commit ? ts.getLastUndoToken(this.partitionId) : ts.getFirstUndoToken(this.partitionId)); // Only commit/abort this transaction if: // (2) We have the last undo token used by this transaction // (3) The transaction was executed with undo buffers // (4) The transaction actually submitted work to the EE // (5) The transaction modified data at this partition if (ts.needsFinish(this.partitionId) && undoToken != HStoreConstants.NULL_UNDO_LOGGING_TOKEN) { if (trace.val) LOG.trace(String.format("%s - Invoking EE to finish work for txn [%s / speculative=%s]", ts, status, ts.isSpeculative())); this.finishWorkEE(ts, undoToken, commit); } // We always need to do the following things regardless if we hit up the EE or not if (commit) this.lastCommittedTxnId = ts.getTransactionId(); if (trace.val) LOG.trace(String.format("%s - Telling queue manager that txn is finished at partition %d", ts, this.partitionId)); this.queueManager.lockQueueFinished(ts, status, this.partitionId); if (debug.val) LOG.debug(String.format("%s - Successfully %sed transaction at partition %d", ts, (commit ? "committ" : "abort"), this.partitionId)); ts.markFinished(this.partitionId); } /** * The real method that actually reaches down into the EE and commits/undos the changes * for the given token. * Unless you know what you're doing, you probably want to be calling finishTransaction() * instead of calling this directly. * @param ts * @param undoToken * @param commit */ private void finishWorkEE(AbstractTransaction ts, long undoToken, boolean commit) { assert(ts.isMarkedFinished(this.partitionId) == false) : String.format("Trying to commit %s twice at partition %d", ts, this.partitionId); // If the txn is completely read-only and they didn't use undo-logging, then // there is nothing that we need to do, except to check to make sure we aren't // trying to abort this txn if (undoToken == HStoreConstants.DISABLE_UNDO_LOGGING_TOKEN) { // SANITY CHECK: Make sure that they're not trying to undo a transaction that // modified the database but did not use undo logging if (ts.isExecReadOnly(this.partitionId) == false && commit == false) { String msg = String.format("TRYING TO ABORT TRANSACTION ON PARTITION %d WITHOUT UNDO LOGGING [undoToken=%d]", this.partitionId, undoToken); LOG.fatal(msg + "\n" + ts.debug()); this.crash(new ServerFaultException(msg, ts.getTransactionId())); } if (debug.val) LOG.debug(String.format("%s - undoToken == DISABLE_UNDO_LOGGING_TOKEN", ts)); } // COMMIT / ABORT else { boolean needs_profiling = false; if (hstore_conf.site.txn_profiling && ts.isExecLocal(this.partitionId) && ((LocalTransaction)ts).profiler != null) { needs_profiling = true; ((LocalTransaction)ts).profiler.startPostEE(); } assert(this.lastCommittedUndoToken != undoToken) : String.format("Trying to %s undoToken %d for %s twice at partition %d", (commit ? "COMMIT" : "ABORT"), undoToken, ts, this.partitionId); // COMMIT! if (commit) { if (debug.val) { LOG.debug(String.format("%s - COMMITING txn on partition %d with undoToken %d " + "[lastTxnId=%d, lastUndoToken=%d, dtxn=%s]%s", ts, this.partitionId, undoToken, this.lastCommittedTxnId, this.lastCommittedUndoToken, this.currentDtxn, (ts instanceof LocalTransaction ? " - " + ((LocalTransaction)ts).getSpeculationType() : ""))); if (this.specExecBlocked.isEmpty() == false && ts.isPredictSinglePartition() == false) { LOG.debug(String.format("%s - # of Speculatively Executed Txns: %d ", ts, this.specExecBlocked.size())); } } assert(this.lastCommittedUndoToken < undoToken) : String.format("Trying to commit undoToken %d for %s but it is less than the " + "last committed undoToken %d at partition %d\n" + "Last Committed Txn: %d", undoToken, ts, this.lastCommittedUndoToken, this.partitionId, this.lastCommittedTxnId); this.ee.releaseUndoToken(undoToken); this.lastCommittedUndoToken = undoToken; } // ABORT! else { // Evan says that txns will be aborted LIFO. This means the first txn that // we get in abortWork() will have a the greatest undoToken, which means that // it will automagically rollback all other outstanding txns. // I'm lazy/tired, so for now I'll just rollback everything I get, but in theory // we should be able to check whether our undoToken has already been rolled back if (debug.val) { LOG.debug(String.format("%s - ABORTING txn on partition %d with undoToken %d " + "[lastTxnId=%d, lastUndoToken=%d, dtxn=%s]%s", ts, this.partitionId, undoToken, this.lastCommittedTxnId, this.lastCommittedUndoToken, this.currentDtxn, (ts instanceof LocalTransaction ? " - " + ((LocalTransaction)ts).getSpeculationType() : ""))); if (this.specExecBlocked.isEmpty() == false && ts.isPredictSinglePartition() == false) { LOG.debug(String.format("%s - # of Speculatively Executed Txns: %d ", ts, this.specExecBlocked.size())); } } assert(this.lastCommittedUndoToken < undoToken) : String.format("Trying to abort undoToken %d for %s but it is less than the " + "last committed undoToken %d at partition %d " + "[lastTxnId=%d, lastUndoToken=%d, dtxn=%s]%s", undoToken, ts, this.lastCommittedUndoToken, this.partitionId, this.lastCommittedTxnId, this.lastCommittedUndoToken, this.currentDtxn, (ts instanceof LocalTransaction ? " - " + ((LocalTransaction)ts).getSpeculationType() : "")); this.ee.undoUndoToken(undoToken); } if (needs_profiling) ((LocalTransaction)ts).profiler.stopPostEE(); } } /** * Somebody told us that our partition needs to abort/commit the given transaction id. * This method should only be used for distributed transactions, because * it will do some extra work for speculative execution * @param ts - The transaction to finish up. * @param status - The final status of the transaction */ private void finishDistributedTransaction(final AbstractTransaction ts, final Status status) { if (debug.val) LOG.debug(String.format("%s - Processing finish request at partition %d " + "[status=%s, readOnly=%s]", ts, this.partitionId, status, ts.isExecReadOnly(this.partitionId))); if (this.currentDtxn == ts) { // 2012-11-22 -- Yes, today is Thanksgiving and I'm working on my database. // That's just grad student life I guess. Anyway, if you're reading this then // you know that this is an important part of the system. We have a dtxn that // we have been told is completely finished and now we need to either commit // or abort any changes that it may have made at this partition. The tricky thing // is that if we have speculative execution enabled, then we need to make sure // that we process any transactions that were executed while the dtxn was running // in the right order to ensure that we maintain serializability. // Here is the basic logic of what's about to happen: // // (1) If the dtxn is commiting, then we just need to commit the the last txn that // was executed (since this will have the largest undo token). // The EE will automatically commit all undo tokens less than that. // (2) If the dtxn is aborting, then we can commit any speculative txn that was // executed before the dtxn's first non-readonly undo token. // // Note that none of the speculative txns in the blocked queue will need to be // aborted at this point, because we will have rolled back their changes immediately // when they aborted, so that our dtxn doesn't read dirty data. if (this.specExecBlocked.isEmpty() == false) { // First thing we need to do is get the latch that will be set by any transaction // that was in the middle of being executed when we were called if (debug.val) LOG.debug(String.format("%s - Checking %d blocked speculative transactions at " + "partition %d [currentMode=%s]", ts, this.specExecBlocked.size(), this.partitionId, this.currentExecMode)); LocalTransaction spec_ts = null; ClientResponseImpl spec_cr = null; // ------------------------------- // DTXN NON-READ-ONLY ABORT // If the dtxn did not modify this partition, then everthing can commit // Otherwise, we want to commit anything that was executed before the dtxn started // ------------------------------- if (status != Status.OK && ts.isExecReadOnly(this.partitionId) == false) { // We need to get the first undo tokens for our distributed transaction long dtxnUndoToken = ts.getFirstUndoToken(this.partitionId); if (debug.val) LOG.debug(String.format("%s - Looking for speculative txns to commit before we rollback undoToken %d", ts, dtxnUndoToken)); // Queue of speculative txns that need to be committed. final Queue<Pair<LocalTransaction, ClientResponseImpl>> txnsToCommit = new LinkedList<Pair<LocalTransaction,ClientResponseImpl>>(); // Queue of speculative txns that need to be aborted + restarted final Queue<Pair<LocalTransaction, ClientResponseImpl>> txnsToRestart = new LinkedList<Pair<LocalTransaction,ClientResponseImpl>>(); long spec_token; long max_token = HStoreConstants.NULL_UNDO_LOGGING_TOKEN; LocalTransaction max_ts = null; for (Pair<LocalTransaction, ClientResponseImpl> pair : this.specExecBlocked) { boolean shouldCommit = false; spec_ts = pair.getFirst(); spec_token = spec_ts.getFirstUndoToken(this.partitionId); if (debug.val) LOG.debug(String.format("Speculative Txn %s [undoToken=%d, %s]", spec_ts, spec_token, spec_ts.getSpeculationType())); // Speculative txns should never be executed without an undo token assert(spec_token != HStoreConstants.DISABLE_UNDO_LOGGING_TOKEN); assert(spec_ts.isSpeculative()) : spec_ts + " isn't marked as speculative!"; // If the speculative undoToken is null, then this txn didn't execute // any queries. That means we can always commit it if (spec_token == HStoreConstants.NULL_UNDO_LOGGING_TOKEN) { if (debug.val) LOG.debug(String.format("Speculative Txn %s has a null undoToken at partition %d", spec_ts, this.partitionId)); shouldCommit = true; } // Otherwise, look to see if this txn was speculatively executed before the // first undo token of the distributed txn. That means we know that this guy // didn't read any modifications made by the dtxn. else if (spec_token < dtxnUndoToken) { if (debug.val) LOG.debug(String.format("Speculative Txn %s has an undoToken less than the dtxn %s " + "at partition %d [%d < %d]", spec_ts, ts, this.partitionId, spec_token, dtxnUndoToken)); shouldCommit = true; } // Ok so at this point we know that our spec txn came *after* the distributed txn // started. So we need to use our checker to see whether there is a conflict else if (this.specExecChecker.hasConflictAfter(ts, spec_ts, this.partitionId) == false) { if (debug.val) LOG.debug(String.format("Speculative Txn %s does not conflict with dtxn %s at partition %d", spec_ts, ts, this.partitionId)); shouldCommit = true; } if (shouldCommit) { txnsToCommit.add(pair); if (spec_token != HStoreConstants.NULL_UNDO_LOGGING_TOKEN && spec_token > max_token) { max_token = spec_token; max_ts = spec_ts; } } else { txnsToRestart.add(pair); } } // FOR if (debug.val) LOG.debug(String.format("%s - Found %d speculative txns at partition %d that need to be " + "committed *before* we abort this txn", ts, txnsToCommit.size(), this.partitionId)); // (1) Commit the greatest token that we've seen. This means that // all our other txns can be safely processed without needing // to go down in the EE if (max_token != HStoreConstants.NULL_UNDO_LOGGING_TOKEN) { assert(max_ts != null); this.finishWorkEE(max_ts, max_token, true); } // (2) Process all the txns that need to be committed Pair<LocalTransaction, ClientResponseImpl> pair = null; while ((pair = txnsToCommit.poll()) != null) { spec_ts = pair.getFirst(); spec_cr = pair.getSecond(); spec_ts.markFinished(this.partitionId); try { if (debug.val) LOG.debug(String.format("%s - Releasing blocked ClientResponse for %s [status=%s]", ts, spec_ts, spec_cr.getStatus())); this.processClientResponse(spec_ts, spec_cr); } catch (Throwable ex) { String msg = "Failed to complete queued response for " + spec_ts; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } } // FOR // (3) Abort the distributed txn this.finishTransaction(ts, status); // (4) Restart all the other txns while ((pair = txnsToRestart.poll()) != null) { spec_ts = pair.getFirst(); spec_cr = pair.getSecond(); MispredictionException error = new MispredictionException(spec_ts.getTransactionId(), spec_ts.getTouchedPartitions()); spec_ts.setPendingError(error, false); spec_cr.setStatus(Status.ABORT_SPECULATIVE); this.processClientResponse(spec_ts, spec_cr); } // FOR } // ------------------------------- // DTXN READ-ONLY ABORT or DTXN COMMIT // ------------------------------- else { // **IMPORTANT** // If the dtxn needs to commit, then all we need to do is get the // last undoToken that we've generated (since we know that it had to // have been used either by our distributed txn or for one of our // speculative txns). // // If the read-only dtxn needs to abort, then there's nothing we need to // do, because it didn't make any changes. That means we can just // commit the last speculatively executed transaction // // Once we have this token, we can just make a direct call to the EE // to commit any changes that came before it. Note that we are using our // special 'finishWorkEE' method that does not require us to provide // the transaction that we're committing. long undoToken = this.lastUndoToken; if (debug.val) LOG.debug(String.format("%s - Last undoToken at partition %d => %d", ts, this.partitionId, undoToken)); // Bombs away! if (undoToken != this.lastCommittedUndoToken) { this.finishWorkEE(ts, undoToken, true); // IMPORTANT: Make sure that we remove the dtxn from the lock queue! // This is normally done in finishTransaction() but because we're trying // to be clever and invoke the EE directly, we have to make sure that // we call it ourselves. this.queueManager.lockQueueFinished(ts, status, this.partitionId); } // Make sure that we mark the dtxn as finished so that we don't // try to do anything with it later on. ts.markFinished(this.partitionId); // Now make sure that all of the speculative txns are processed without // committing (since we just committed any change that they could have made // up above). Pair<LocalTransaction, ClientResponseImpl> pair = null; while ((pair = this.specExecBlocked.pollFirst()) != null) { spec_ts = pair.getFirst(); spec_cr = pair.getSecond(); spec_ts.markFinished(this.partitionId); try { if (trace.val) LOG.trace(String.format("%s - Releasing blocked ClientResponse for %s [status=%s]", ts, spec_ts, spec_cr.getStatus())); this.processClientResponse(spec_ts, spec_cr); } catch (Throwable ex) { String msg = "Failed to complete queued response for " + spec_ts; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } } // WHILE } this.specExecBlocked.clear(); this.specExecModified = false; if (trace.val) LOG.trace(String.format("Finished processing all queued speculative txns for dtxn %s", ts)); } // ------------------------------- // NO SPECULATIVE TXNS // ------------------------------- else { // There are no speculative txns waiting for this dtxn, // so we can just commit it right away if (debug.val) LOG.debug(String.format("%s - No speculative txns at partition %d. Just %s txn by itself", ts, this.partitionId, (status == Status.OK ? "commiting" : "aborting"))); this.finishTransaction(ts, status); } // Clear our cached query results that are specific for this transaction // this.queryCache.purgeTransaction(ts.getTransactionId()); // TODO: Remove anything in our queue for this txn // if (ts.hasQueuedWork(this.partitionId)) { // } // Check whether this is the response that the speculatively executed txns have been waiting for // We could have turned off speculative execution mode beforehand if (debug.val) LOG.debug(String.format("%s - Attempting to unmark as the current DTXN at partition %d and " + "setting execution mode to %s", ts, this.partitionId, ExecutionMode.COMMIT_ALL)); try { // Resetting the current_dtxn variable has to come *before* we change the execution mode this.resetCurrentDtxn(); this.setExecutionMode(ts, ExecutionMode.COMMIT_ALL); // Release blocked transactions this.releaseBlockedTransactions(ts); } catch (Throwable ex) { String msg = String.format("Failed to finish %s at partition %d", ts, this.partitionId); throw new ServerFaultException(msg, ex, ts.getTransactionId()); } if (hstore_conf.site.exec_profiling) { this.profiler.sp3_local_time.stopIfStarted(); this.profiler.sp3_remote_time.stopIfStarted(); } } // We were told told to finish a dtxn that is not the current one // at this partition. That's ok as long as it's aborting and not trying // to commit. else { assert(status != Status.OK) : String.format("Trying to commit %s at partition %d but the current dtxn is %s", ts, this.partitionId, this.currentDtxn); this.queueManager.lockQueueFinished(ts, status, this.partitionId); } // ------------------------------- // FINISH CALLBACKS // ------------------------------- // MapReduceTransaction if (ts instanceof MapReduceTransaction) { PartitionCountingCallback<AbstractTransaction> callback = ((MapReduceTransaction)ts).getCleanupCallback(); // We don't want to invoke this callback at the basePartition's site // because we don't want the parent txn to actually get deleted. if (this.partitionId == ts.getBasePartition()) { if (debug.val) LOG.debug(String.format("%s - Notifying %s that the txn is finished at partition %d", ts, callback.getClass().getSimpleName(), this.partitionId)); callback.run(this.partitionId); } } else { PartitionCountingCallback<AbstractTransaction> callback = ts.getFinishCallback(); if (debug.val) LOG.debug(String.format("%s - Notifying %s that the txn is finished at partition %d", ts, callback.getClass().getSimpleName(), this.partitionId)); callback.run(this.partitionId); } } private void blockTransaction(InternalTxnMessage work) { if (debug.val) LOG.debug(String.format("%s - Adding %s work to blocked queue", work.getTransaction(), work.getClass().getSimpleName())); this.currentBlockedTxns.add(work); } private void blockTransaction(LocalTransaction ts) { this.blockTransaction(new StartTxnMessage(ts)); } /** * Release all the transactions that are currently in this partition's blocked queue * into the work queue. * @param ts */ private void releaseBlockedTransactions(AbstractTransaction ts) { if (this.currentBlockedTxns.isEmpty() == false) { if (debug.val) LOG.debug(String.format("Attempting to release %d blocked transactions at partition %d because of %s", this.currentBlockedTxns.size(), this.partitionId, ts)); this.work_queue.addAll(this.currentBlockedTxns); int released = this.currentBlockedTxns.size(); this.currentBlockedTxns.clear(); if (debug.val) LOG.debug(String.format("Released %d blocked transactions at partition %d because of %s", released, this.partitionId, ts)); } assert(this.currentBlockedTxns.isEmpty()); } // --------------------------------------------------------------- // SNAPSHOT METHODS // --------------------------------------------------------------- /** * Do snapshot work exclusively until there is no more. Also blocks * until the syncing and closing of snapshot data targets has completed. */ public void initiateSnapshots(Deque<SnapshotTableTask> tasks) { m_snapshotter.initiateSnapshots(ee, tasks); } public Collection<Exception> completeSnapshotWork() throws InterruptedException { return m_snapshotter.completeSnapshotWork(ee); } // --------------------------------------------------------------- // SHUTDOWN METHODS // --------------------------------------------------------------- /** * Cause this PartitionExecutor to make the entire HStore cluster shutdown * This won't return! */ public synchronized void crash(Throwable ex) { String msg = String.format("PartitionExecutor for Partition #%d is crashing", this.partitionId); if (ex == null) LOG.warn(msg); else LOG.warn(msg, ex); assert(this.hstore_coordinator != null); this.hstore_coordinator.shutdownClusterBlocking(ex); } @Override public boolean isShuttingDown() { return (this.hstore_site.isShuttingDown()); // shutdown_state == State.PREPARE_SHUTDOWN || this.shutdown_state == State.SHUTDOWN); } @Override public void prepareShutdown(boolean error) { this.shutdown_state = ShutdownState.PREPARE_SHUTDOWN; } /** * Somebody from the outside wants us to shutdown */ public synchronized void shutdown() { if (this.shutdown_state == ShutdownState.SHUTDOWN) { if (debug.val) LOG.debug(String.format("Partition #%d told to shutdown again. Ignoring...", this.partitionId)); return; } this.shutdown_state = ShutdownState.SHUTDOWN; if (debug.val) LOG.debug(String.format("Shutting down PartitionExecutor for Partition #%d", this.partitionId)); // Clear the queue this.work_queue.clear(); // Knock out this ma if (this.m_snapshotter != null) this.m_snapshotter.shutdown(); // Make sure we shutdown our threadpool // this.thread_pool.shutdownNow(); if (this.self != null) this.self.interrupt(); if (this.shutdown_latch != null) { try { this.shutdown_latch.acquire(); } catch (InterruptedException ex) { // Ignore } catch (Exception ex) { LOG.fatal("Unexpected error while shutting down", ex); } } } // ---------------------------------------------------------------------------- // DEBUG METHODS // ---------------------------------------------------------------------------- @Override public String toString() { return String.format("%s{%s}", this.getClass().getSimpleName(), HStoreThreadManager.formatPartitionName(siteId, partitionId)); } public class Debug implements DebugContext { public VoltProcedure getVoltProcedure(String procName) { Procedure proc = catalogContext.procedures.getIgnoreCase(procName); return (PartitionExecutor.this.getVoltProcedure(proc.getId())); } public SpecExecScheduler getSpecExecScheduler() { return (PartitionExecutor.this.specExecScheduler); } public AbstractConflictChecker getSpecExecConflictChecker() { return (PartitionExecutor.this.specExecChecker); } public Collection<BatchPlanner> getBatchPlanners() { return (PartitionExecutor.this.batchPlanners.values()); } public PartitionExecutorProfiler getProfiler() { return (PartitionExecutor.this.profiler); } public Thread getExecutionThread() { return (PartitionExecutor.this.self); } public Queue<InternalMessage> getWorkQueue() { return (PartitionExecutor.this.work_queue); } public void setExecutionMode(AbstractTransaction ts, ExecutionMode newMode) { PartitionExecutor.this.setExecutionMode(ts, newMode); } public ExecutionMode getExecutionMode() { return (PartitionExecutor.this.currentExecMode); } public Long getLastExecutedTxnId() { return (PartitionExecutor.this.lastExecutedTxnId); } public Long getLastCommittedTxnId() { return (PartitionExecutor.this.lastCommittedTxnId); } public long getLastCommittedIndoToken() { return (PartitionExecutor.this.lastCommittedUndoToken); } /** * Get the VoltProcedure handle of the current running txn. This could be null. * <B>FOR TESTING ONLY</B> */ public VoltProcedure getCurrentVoltProcedure() { return (PartitionExecutor.this.currentVoltProc); } /** * Get the txnId of the current distributed transaction at this partition * <B>FOR TESTING ONLY</B> */ public AbstractTransaction getCurrentDtxn() { return (PartitionExecutor.this.currentDtxn); } /** * Get the txnId of the current distributed transaction at this partition * <B>FOR TESTING ONLY</B> */ public Long getCurrentDtxnId() { Long ret = null; // This is a race condition, so we'll just ignore any errors if (PartitionExecutor.this.currentDtxn != null) { try { ret = PartitionExecutor.this.currentDtxn.getTransactionId(); } catch (NullPointerException ex) { // IGNORE } } return (ret); } public Long getCurrentTxnId() { return (PartitionExecutor.this.currentTxnId); } public int getBlockedWorkCount() { return (PartitionExecutor.this.currentBlockedTxns.size()); } /** * Return the number of spec exec txns have completed but are waiting * for the distributed txn to finish at this partition */ public int getBlockedSpecExecCount() { return (PartitionExecutor.this.specExecBlocked.size()); } public int getWorkQueueSize() { return (PartitionExecutor.this.work_queue.size()); } public void updateMemory() { PartitionExecutor.this.updateMemoryStats(EstTime.currentTimeMillis()); } /** * Replace the ConflictChecker. This should only be used for testing * @param checker */ protected void setConflictChecker(AbstractConflictChecker checker) { LOG.warn(String.format("Replacing original checker %s with %s at partition %d", specExecChecker.getClass().getSimpleName(), checker.getClass().getSimpleName(), partitionId)); specExecChecker = checker; specExecScheduler.getDebugContext().setConflictChecker(checker); } } private Debug cachedDebugContext; public Debug getDebugContext() { if (this.cachedDebugContext == null) { // We don't care if we're thread-safe here... this.cachedDebugContext = new Debug(); } return this.cachedDebugContext; } }
false
true
private void getFragmentInputs(AbstractTransaction ts, int input_dep_id, Map<Integer, List<VoltTable>> inputs) { if (input_dep_id == HStoreConstants.NULL_DEPENDENCY_ID) return; if (trace.val) LOG.trace(String.format("%s - Attempting to retrieve input dependencies for DependencyId #%d", ts, input_dep_id)); // If the Transaction is on the same HStoreSite, then all the // input dependencies will be internal and can be retrieved locally if (ts instanceof LocalTransaction) { DependencyTracker txnTracker = null; if (ts.getBasePartition() != this.partitionId) { txnTracker = hstore_site.getDependencyTracker(ts.getBasePartition()); } else { txnTracker = this.depTracker; } List<VoltTable> deps = txnTracker.getInternalDependency((LocalTransaction)ts, input_dep_id); assert(deps != null); assert(inputs.containsKey(input_dep_id) == false); inputs.put(input_dep_id, deps); if (trace.val) LOG.trace(String.format("%s - Retrieved %d INTERNAL VoltTables for DependencyId #%d", ts, deps.size(), input_dep_id, (trace.val ? "\n" + deps : ""))); } // Otherwise they will be "attached" inputs to the RemoteTransaction handle // We should really try to merge these two concepts into a single function call else if (ts.getAttachedInputDependencies().containsKey(input_dep_id)) { List<VoltTable> deps = ts.getAttachedInputDependencies().get(input_dep_id); List<VoltTable> pDeps = null; // We have to copy the tables if we have debugging enabled if (trace.val) { // this.firstPartition == false) { pDeps = new ArrayList<VoltTable>(); for (VoltTable vt : deps) { ByteBuffer buffer = vt.getTableDataReference(); byte arr[] = new byte[vt.getUnderlyingBufferSize()]; buffer.get(arr, 0, arr.length); pDeps.add(new VoltTable(ByteBuffer.wrap(arr), true)); } } else { pDeps = deps; } inputs.put(input_dep_id, pDeps); if (trace.val) LOG.trace(String.format("%s - Retrieved %d ATTACHED VoltTables for DependencyId #%d in %s", ts, deps.size(), input_dep_id)); } } /** * Set the given AbstractTransaction handle as the current distributed txn * that is running at this partition. Note that this will check to make sure * that no other txn is marked as the currentDtxn. * @param ts */ private void setCurrentDtxn(AbstractTransaction ts) { // There can never be another current dtxn still unfinished at this partition! assert(this.currentBlockedTxns.isEmpty()) : String.format("Concurrent multi-partition transactions at partition %d: " + "Orig[%s] <=> New[%s] / BlockedQueue:%d", this.partitionId, this.currentDtxn, ts, this.currentBlockedTxns.size()); assert(this.currentDtxn == null) : String.format("Concurrent multi-partition transactions at partition %d: " + "Orig[%s] <=> New[%s] / BlockedQueue:%d", this.partitionId, this.currentDtxn, ts, this.currentBlockedTxns.size()); // Check whether we should check for speculative txns to execute whenever this // dtxn is idle at this partition this.currentDtxn = ts; if (hstore_conf.site.specexec_enable && ts.isSysProc() == false && this.specExecScheduler.isDisabled() == false) { this.specExecIgnoreCurrent = this.specExecChecker.shouldIgnoreTransaction(ts); } else { this.specExecIgnoreCurrent = true; } if (debug.val) { LOG.debug(String.format("Set %s as the current DTXN for partition %d [specExecIgnore=%s, previous=%s]", ts, this.partitionId, this.specExecIgnoreCurrent, this.lastDtxnDebug)); this.lastDtxnDebug = this.currentDtxn.toString(); } if (hstore_conf.site.exec_profiling && ts.getBasePartition() != this.partitionId) { profiler.sp2_time.start(); } } /** * Reset the current dtxn for this partition */ private void resetCurrentDtxn() { assert(this.currentDtxn != null) : "Trying to reset the currentDtxn when it is already null"; if (debug.val) LOG.debug(String.format("Resetting current DTXN for partition %d to null [previous=%s]", this.partitionId, this.lastDtxnDebug)); this.currentDtxn = null; } /** * Store a new prefetch result for a transaction * @param txnId * @param fragmentId * @param partitionId * @param params * @param result */ public void addPrefetchResult(LocalTransaction ts, int stmtCounter, int fragmentId, int partitionId, int paramsHash, VoltTable result) { if (debug.val) LOG.debug(String.format("%s - Adding prefetch result for %s with %d rows from partition %d " + "[stmtCounter=%d / paramsHash=%d]", ts, CatalogUtil.getPlanFragment(catalogContext.catalog, fragmentId).fullName(), result.getRowCount(), partitionId, stmtCounter, paramsHash)); this.depTracker.addPrefetchResult(ts, stmtCounter, fragmentId, partitionId, paramsHash, result); } // --------------------------------------------------------------- // PartitionExecutor API // --------------------------------------------------------------- /** * Queue a new transaction initialization at this partition. This will cause the * transaction to get added to this partition's lock queue. This PartitionExecutor does * not have to be this txn's base partition/ * @param ts */ public void queueSetPartitionLock(AbstractTransaction ts) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; SetDistributedTxnMessage work = ts.getSetDistributedTxnMessage(); boolean success = this.work_queue.offer(work); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); if (debug.val) LOG.debug(String.format("%s - Added %s to front of partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); if (hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(work); } /** * New work from the coordinator that this local site needs to execute (non-blocking) * This method will simply chuck the task into the work queue. * We should not be sent an InitiateTaskMessage here! * @param ts * @param task */ public void queueWork(AbstractTransaction ts, WorkFragment fragment) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; WorkFragmentMessage work = ts.getWorkFragmentMessage(fragment); boolean success = this.work_queue.offer(work); // , true); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); ts.markQueuedWork(this.partitionId); if (debug.val) LOG.debug(String.format("%s - Added %s to partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); if (hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(work); } /** * Add a new work message to our utility queue * @param work */ public void queueUtilityWork(InternalMessage work) { if (debug.val) LOG.debug(String.format("Added utility work %s to partition %d", work.getClass().getSimpleName(), this.partitionId)); this.work_queue.offer(work); } /** * Put the prepare request for the transaction into the queue * @param task * @param status The final status of the transaction */ public void queuePrepare(AbstractTransaction ts) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; PrepareTxnMessage work = ts.getPrepareTxnMessage(); boolean success = this.work_queue.offer(work); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); if (debug.val) LOG.debug(String.format("%s - Added %s to partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); // if (hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(); } /** * Put the finish request for the transaction into the queue * @param task * @param status The final status of the transaction */ public void queueFinish(AbstractTransaction ts, Status status) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; FinishTxnMessage work = ts.getFinishTxnMessage(status); boolean success = this.work_queue.offer(work); // , true); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); if (debug.val) LOG.debug(String.format("%s - Added %s to partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); // if (success) this.specExecScheduler.haltSearch(); } /** * Queue a new transaction invocation request at this partition * @param serializedRequest * @param catalog_proc * @param procParams * @param clientCallback * @return */ public boolean queueNewTransaction(ByteBuffer serializedRequest, long initiateTime, Procedure catalog_proc, ParameterSet procParams, RpcCallback<ClientResponseImpl> clientCallback) { boolean sysproc = catalog_proc.getSystemproc(); if (this.currentExecMode == ExecutionMode.DISABLED_REJECT && sysproc == false) return (false); InitializeRequestMessage work = new InitializeRequestMessage(serializedRequest, initiateTime, catalog_proc, procParams, clientCallback); if (debug.val) LOG.debug(String.format("Queuing %s for '%s' request on partition %d " + "[currentDtxn=%s, queueSize=%d, mode=%s]", work.getClass().getSimpleName(), catalog_proc.getName(), this.partitionId, this.currentDtxn, this.work_queue.size(), this.currentExecMode)); return (this.work_queue.offer(work)); } /** * Queue a new transaction invocation request at this partition * @param ts * @param task * @param callback */ public boolean queueStartTransaction(LocalTransaction ts) { assert(ts != null) : "Unexpected null transaction handle!"; boolean singlePartitioned = ts.isPredictSinglePartition(); boolean force = (singlePartitioned == false) || ts.isMapReduce() || ts.isSysProc(); // UPDATED 2012-07-12 // We used to have a bunch of checks to determine whether we needed // put the new request in the blocked queue or not. This required us to // acquire the exec_lock to do the check and then another lock to actually put // the request into the work_queue. Now we'll just throw it right in // the queue (checking for throttling of course) and let the main // thread sort out the mess of whether the txn should get blocked or not if (this.currentExecMode == ExecutionMode.DISABLED_REJECT) { if (debug.val) LOG.warn(String.format("%s - Not queuing txn at partition %d because current mode is %s", ts, this.partitionId, this.currentExecMode)); return (false); } StartTxnMessage work = ts.getStartTxnMessage(); if (debug.val) LOG.debug(String.format("Queuing %s for '%s' request on partition %d " + "[currentDtxn=%s, queueSize=%d, mode=%s]", work.getClass().getSimpleName(), ts.getProcedure().getName(), this.partitionId, this.currentDtxn, this.work_queue.size(), this.currentExecMode)); boolean success = this.work_queue.offer(work); // , force); if (debug.val && force && success == false) { String msg = String.format("Failed to add %s even though force flag was true!", ts); throw new ServerFaultException(msg, ts.getTransactionId()); } if (success && hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(work); return (success); } // --------------------------------------------------------------- // WORK QUEUE PROCESSING METHODS // --------------------------------------------------------------- /** * Process a WorkResult and update the internal state the LocalTransaction accordingly * Note that this will always be invoked by a thread other than the main execution thread * for this PartitionExecutor. That means if something comes back that's bad, we need a way * to alert the other thread so that it can act on it. * @param ts * @param result */ private void processWorkResult(LocalTransaction ts, WorkResult result) { boolean needs_profiling = (hstore_conf.site.txn_profiling && ts.profiler != null); if (debug.val) LOG.debug(String.format("Processing WorkResult for %s on partition %d [srcPartition=%d, deps=%d]", ts, this.partitionId, result.getPartitionId(), result.getDepDataCount())); // If the Fragment failed to execute, then we need to abort the Transaction // Note that we have to do this before we add the responses to the TransactionState so that // we can be sure that the VoltProcedure knows about the problem when it wakes the stored // procedure back up if (result.getStatus() != Status.OK) { if (trace.val) LOG.trace(String.format("Received non-success response %s from partition %d for %s", result.getStatus(), result.getPartitionId(), ts)); SerializableException error = null; if (needs_profiling) ts.profiler.startDeserialization(); try { ByteBuffer buffer = result.getError().asReadOnlyByteBuffer(); error = SerializableException.deserializeFromBuffer(buffer); } catch (Exception ex) { String msg = String.format("Failed to deserialize SerializableException from partition %d " + "for %s [bytes=%d]", result.getPartitionId(), ts, result.getError().size()); throw new ServerFaultException(msg, ex); } finally { if (needs_profiling) ts.profiler.stopDeserialization(); } // At this point there is no need to even deserialize the rest of the message because // we know that we're going to have to abort the transaction if (error == null) { LOG.warn(ts + " - Unexpected null SerializableException\n" + result); } else { if (debug.val) LOG.error(String.format("%s - Got error from partition %d in %s", ts, result.getPartitionId(), result.getClass().getSimpleName()), error); ts.setPendingError(error, true); } return; } if (needs_profiling) ts.profiler.startDeserialization(); for (int i = 0, cnt = result.getDepDataCount(); i < cnt; i++) { if (trace.val) LOG.trace(String.format("Storing intermediate results from partition %d for %s", result.getPartitionId(), ts)); int depId = result.getDepId(i); ByteString bs = result.getDepData(i); VoltTable vt = null; if (bs.isEmpty() == false) { FastDeserializer fd = new FastDeserializer(bs.asReadOnlyByteBuffer()); try { vt = fd.readObject(VoltTable.class); } catch (Exception ex) { throw new ServerFaultException("Failed to deserialize VoltTable from partition " + result.getPartitionId() + " for " + ts, ex); } } this.depTracker.addResult(ts, result.getPartitionId(), depId, vt); } // FOR (dependencies) if (needs_profiling) ts.profiler.stopDeserialization(); } /** * Execute a new transaction at this partition. * This will invoke the run() method define in the VoltProcedure for this txn and * then process the ClientResponse. Only the PartitionExecutor itself should be calling * this directly, since it's the only thing that knows what's going on with the world... * @param ts */ private void executeTransaction(LocalTransaction ts) { assert(ts.isInitialized()) : String.format("Trying to execute uninitialized transaction %s at partition %d", ts, this.partitionId); assert(ts.isMarkedReleased(this.partitionId)) : String.format("Transaction %s was not marked released at partition %d before being executed", ts, this.partitionId); if (trace.val) LOG.debug(String.format("%s - Attempting to start transaction on partition %d", ts, this.partitionId)); // If this is a MapReduceTransaction handle, we actually want to get the // inner LocalTransaction handle for this partition. The MapReduceTransaction // is just a placeholder if (ts instanceof MapReduceTransaction) { MapReduceTransaction mr_ts = (MapReduceTransaction)ts; ts = mr_ts.getLocalTransaction(this.partitionId); assert(ts != null) : "Unexpected null LocalTransaction handle from " + mr_ts; } ExecutionMode before_mode = this.currentExecMode; boolean predict_singlePartition = ts.isPredictSinglePartition(); // ------------------------------- // DISTRIBUTED TXN // ------------------------------- if (predict_singlePartition == false) { // If there is already a dtxn running, then we need to throw this // mofo back into the blocked txn queue // TODO: If our dtxn is on the same site as us, then at this point we know that // it is done executing the control code and is sending around 2PC messages // to commit/abort. That means that we could assume that all of the other // remote partitions are going to agree on the same outcome and we can start // speculatively executing this dtxn. After all, if we're at this point in // the PartitionExecutor then we know that we got this partition's locks // from the TransactionQueueManager. if (this.currentDtxn != null && this.currentDtxn.equals(ts) == false) { assert(this.currentDtxn.equals(ts) == false) : String.format("New DTXN %s != Current DTXN %s", ts, this.currentDtxn); // If this is a local txn, then we can finagle things a bit. if (this.currentDtxn.isExecLocal(this.partitionId)) { // It would be safe for us to speculative execute this DTXN right here // if the currentDtxn has aborted... but we can never be in this state. assert(this.currentDtxn.isAborted() == false) : // Sanity Check String.format("We want to execute %s on partition %d but aborted %s is still hanging around\n", ts, this.partitionId, this.currentDtxn, this.work_queue); // So that means we know that it committed, which doesn't necessarily mean // that it will still commit, but we'll be able to abort, rollback, and requeue // if that happens. // TODO: Right now our current dtxn marker is a single value. We may want to // switch it to a FIFO queue so that we can multiple guys hanging around. // For now we will just do the default thing and block this txn this.blockTransaction(ts); return; } // If it's not local, then we just have to block it right away else { this.blockTransaction(ts); return; } } // If there is no other DTXN right now, then we're it! else if (this.currentDtxn == null) { // || this.currentDtxn.equals(ts) == false) { this.setCurrentDtxn(ts); } // 2011-11-14: We don't want to set the execution mode here, because we know that we // can check whether we were read-only after the txn finishes this.setExecutionMode(this.currentDtxn, ExecutionMode.COMMIT_NONE); if (debug.val) LOG.debug(String.format("Marking %s as current DTXN on Partition %d [isLocal=%s, execMode=%s]", ts, this.partitionId, true, this.currentExecMode)); } // ------------------------------- // SINGLE-PARTITION TXN // ------------------------------- else { // If this is a single-partition transaction, then we need to check whether we are // being executed under speculative execution mode. We have to check this here // because it may be the case that we queued a bunch of transactions when speculative // execution was enabled, but now the transaction that was ahead of this one is finished, // so now we're just executing them regularly if (this.currentDtxn != null) { // HACK: If we are currently under DISABLED mode when we get this, then we just // need to block the transaction and return back to the queue. This is easier than // having to set all sorts of crazy locks if (this.currentExecMode == ExecutionMode.DISABLED || hstore_conf.site.specexec_enable == false) { if (debug.val) LOG.debug(String.format("%s - Blocking single-partition %s until dtxn finishes [mode=%s]", this.currentDtxn, ts, this.currentExecMode)); this.blockTransaction(ts); return; } assert(ts.getSpeculationType() != null); if (debug.val) LOG.debug(String.format("Speculatively executing %s while waiting for dtxn %s [%s]", ts, this.currentDtxn, ts.getSpeculationType())); assert(ts.isSpeculative()) : ts + " was not marked as being speculative!"; } } // If we reach this point, we know that we're about to execute our homeboy here... if (hstore_conf.site.txn_profiling && ts.profiler != null) { ts.profiler.startExec(); } if (hstore_conf.site.exec_profiling) this.profiler.numTransactions++; // Make sure the dependency tracker knows about us if (ts.hasDependencyTracker()) this.depTracker.addTransaction(ts); // Grab a new ExecutionState for this txn ExecutionState execState = this.initExecutionState(); ts.setExecutionState(execState); VoltProcedure volt_proc = this.getVoltProcedure(ts.getProcedure().getId()); assert(volt_proc != null) : "No VoltProcedure for " + ts; if (debug.val) { LOG.debug(String.format("%s - Starting execution of txn on partition %d " + "[txnMode=%s, mode=%s]", ts, this.partitionId, before_mode, this.currentExecMode)); if (trace.val) LOG.trace(String.format("Current Transaction at partition #%d\n%s", this.partitionId, ts.debug())); } if (hstore_conf.site.txn_counters) TransactionCounter.EXECUTED.inc(ts.getProcedure()); ClientResponseImpl cresponse = null; VoltProcedure previous = this.currentVoltProc; try { this.currentVoltProc = volt_proc; cresponse = volt_proc.call(ts, ts.getProcedureParameters().toArray()); // Blocking... // VoltProcedure.call() should handle any exceptions thrown by the transaction // If we get anything out here then that's bad news } catch (Throwable ex) { if (this.isShuttingDown() == false) { SQLStmt last[] = volt_proc.voltLastQueriesExecuted(); LOG.fatal("Unexpected error while executing " + ts, ex); if (last.length > 0) { LOG.fatal(String.format("Last Queries Executed [%d]: %s", last.length, Arrays.toString(last))); } LOG.fatal("LocalTransactionState Dump:\n" + ts.debug()); this.crash(ex); } } finally { this.currentVoltProc = previous; ts.resetExecutionState(); execState.finish(); this.execStates.add(execState); this.finishVoltProcedure(volt_proc); if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPost(); // if (cresponse.getStatus() == Status.ABORT_UNEXPECTED) { // cresponse.getException().printStackTrace(); // } } // If this is a MapReduce job, then we can just ignore the ClientResponse // and return immediately. The VoltMapReduceProcedure is responsible for storing // the result at the proper location. if (ts.isMapReduce()) { return; } else if (cresponse == null) { assert(this.isShuttingDown()) : String.format("No ClientResponse for %s???", ts); return; } // ------------------------------- // PROCESS RESPONSE AND FIGURE OUT NEXT STEP // ------------------------------- Status status = cresponse.getStatus(); if (debug.val) { LOG.debug(String.format("%s - Finished execution of transaction control code " + "[status=%s, beforeMode=%s, currentMode=%s]", ts, status, before_mode, this.currentExecMode)); if (ts.hasPendingError()) { LOG.debug(String.format("%s - Txn finished with pending error: %s", ts, ts.getPendingErrorMessage())); } } // We assume that most transactions are not speculatively executed and are successful // Therefore we don't want to grab the exec_mode lock here. if (predict_singlePartition == false || this.canProcessClientResponseNow(ts, status, before_mode)) { this.processClientResponse(ts, cresponse); } // Otherwise always queue our response, since we know that whatever thread is out there // is waiting for us to finish before it drains the queued responses else { // If the transaction aborted, then we can't execute any transaction that touch the tables // that this guy touches. But since we can't just undo this transaction without undoing // everything that came before it, we'll just disable executing all transactions until the // current distributed transaction commits if (status != Status.OK && ts.isExecReadOnly(this.partitionId) == false) { this.setExecutionMode(ts, ExecutionMode.DISABLED); int blocked = this.work_queue.drainTo(this.currentBlockedTxns); if (debug.val) { if (trace.val && blocked > 0) LOG.trace(String.format("Blocking %d transactions at partition %d because ExecutionMode is now %s", blocked, this.partitionId, this.currentExecMode)); LOG.debug(String.format("Disabling execution on partition %d because speculative %s aborted", this.partitionId, ts)); } } if (trace.val) LOG.trace(String.format("%s - Queuing ClientResponse [status=%s, origMode=%s, newMode=%s, dtxn=%s]", ts, cresponse.getStatus(), before_mode, this.currentExecMode, this.currentDtxn)); this.blockClientResponse(ts, cresponse); } } /** * Determines whether a finished transaction that executed locally can have their ClientResponse processed immediately * or if it needs to wait for the response from the outstanding multi-partition transaction for this partition * (1) This is the multi-partition transaction that everyone is waiting for * (2) The transaction was not executed under speculative execution mode * (3) The transaction does not need to wait for the multi-partition transaction to finish first * @param ts * @param status * @param before_mode * @return */ private boolean canProcessClientResponseNow(LocalTransaction ts, Status status, ExecutionMode before_mode) { if (debug.val) LOG.debug(String.format("%s - Checking whether to process %s response now at partition %d " + "[singlePartition=%s, readOnly=%s, specExecModified=%s, before=%s, current=%s]", ts, status, this.partitionId, ts.isPredictSinglePartition(), ts.isExecReadOnly(this.partitionId), this.specExecModified, before_mode, this.currentExecMode)); // Commit All if (this.currentExecMode == ExecutionMode.COMMIT_ALL) { return (true); } // SPECIAL CASE // Any user-aborted, speculative single-partition transaction should be processed immediately. else if (status == Status.ABORT_USER && ts.isSpeculative()) { return (true); } // // SPECIAL CASE // // If this txn threw a user abort, and the current outstanding dtxn is read-only // // then it's safe for us to rollback // else if (status == Status.ABORT_USER && // this.currentDtxn != null && // this.currentDtxn.isExecReadOnly(this.partitionId)) { // return (true); // } // SPECIAL CASE // Anything mispredicted should be processed right away else if (status == Status.ABORT_MISPREDICT) { return (true); } // Process successful txns based on the mode that it was executed under else if (status == Status.OK) { switch (before_mode) { case COMMIT_ALL: return (true); case COMMIT_READONLY: // Read-only speculative txns can be committed right now // TODO: Right now we're going to use the specExecModified flag to disable // sending out any results from spec execed txns that may have read from // a modified database. We should switch to a bitmap of table ids so that we // have can be more selective. // return (false); return (this.specExecModified == false && ts.isExecReadOnly(this.partitionId)); case COMMIT_NONE: { // If this txn does not conflict with the current dtxn, then we should be able // to let it commit but we can't because of the way our undo tokens work return (false); } default: throw new ServerFaultException("Unexpected execution mode: " + before_mode, ts.getTransactionId()); } // SWITCH } // // If the transaction aborted and it was read-only thus far, then we want to process it immediately // else if (status != Status.OK && ts.isExecReadOnly(this.partitionId)) { // return (true); // } assert(this.currentExecMode != ExecutionMode.COMMIT_ALL) : String.format("Queuing ClientResponse for %s when in non-specutative mode [mode=%s, status=%s]", ts, this.currentExecMode, status); return (false); } /** * Process a WorkFragment for a transaction and execute it in this partition's underlying EE. * @param ts * @param fragment * @param allParameters The array of all the ParameterSets for the current SQLStmt batch. */ private void processWorkFragment(AbstractTransaction ts, WorkFragment fragment, ParameterSet allParameters[]) { assert(this.partitionId == fragment.getPartitionId()) : String.format("Tried to execute WorkFragment %s for %s at partition %d but it was suppose " + "to be executed on partition %d", fragment.getFragmentIdList(), ts, this.partitionId, fragment.getPartitionId()); assert(ts.isMarkedPrepared(this.partitionId) == false) : String.format("Tried to execute WorkFragment %s for %s at partition %d after it was marked 2PC:PREPARE", fragment.getFragmentIdList(), ts, this.partitionId); // A txn is "local" if the Java is executing at the same partition as this one boolean is_basepartition = (ts.getBasePartition() == this.partitionId); boolean is_remote = (ts instanceof LocalTransaction == false); boolean is_prefetch = fragment.getPrefetch(); boolean is_readonly = fragment.getReadOnly(); if (debug.val) LOG.debug(String.format("%s - Executing %s [isBasePartition=%s, isRemote=%s, isPrefetch=%s, isReadOnly=%s, fragments=%s]", ts, fragment.getClass().getSimpleName(), is_basepartition, is_remote, is_prefetch, is_readonly, fragment.getFragmentIdCount())); // If this WorkFragment isn't being executed at this txn's base partition, then // we need to start a new execution round if (is_basepartition == false) { long undoToken = this.calculateNextUndoToken(ts, is_readonly); ts.initRound(this.partitionId, undoToken); ts.startRound(this.partitionId); } DependencySet result = null; Status status = Status.OK; SerializableException error = null; // Check how many fragments are not marked as ignored // If the fragment is marked as ignore then it means that it was already // sent to this partition for prefetching. We need to make sure that we remove // it from the list of fragmentIds that we need to execute. int fragmentCount = fragment.getFragmentIdCount(); for (int i = 0; i < fragmentCount; i++) { if (fragment.getStmtIgnore(i)) { fragmentCount--; } } // FOR final ParameterSet parameters[] = tmp_fragmentParams.getParameterSet(fragmentCount); assert(parameters.length == fragmentCount); // Construct data given to the EE to execute this work fragment this.tmp_EEdependencies.clear(); long fragmentIds[] = tmp_fragmentIds.getArray(fragmentCount); int fragmentOffsets[] = tmp_fragmentOffsets.getArray(fragmentCount); int outputDepIds[] = tmp_outputDepIds.getArray(fragmentCount); int inputDepIds[] = tmp_inputDepIds.getArray(fragmentCount); int offset = 0; for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) { if (fragment.getStmtIgnore(i) == false) { fragmentIds[offset] = fragment.getFragmentId(i); fragmentOffsets[offset] = i; outputDepIds[offset] = fragment.getOutputDepId(i); inputDepIds[offset] = fragment.getInputDepId(i); parameters[offset] = allParameters[fragment.getParamIndex(i)]; this.getFragmentInputs(ts, inputDepIds[offset], this.tmp_EEdependencies); if (trace.val && ts.isSysProc() == false && is_basepartition == false) LOG.trace(String.format("%s - Offset:%d FragmentId:%d OutputDep:%d/%d InputDep:%d/%d", ts, offset, fragmentIds[offset], outputDepIds[offset], fragment.getOutputDepId(i), inputDepIds[offset], fragment.getInputDepId(i))); offset++; } } // FOR assert(offset == fragmentCount); try { result = this.executeFragmentIds(ts, ts.getLastUndoToken(this.partitionId), fragmentIds, parameters, outputDepIds, inputDepIds, this.tmp_EEdependencies); } catch (EvictedTupleAccessException ex) { // XXX: What do we do if this is not a single-partition txn? status = Status.ABORT_EVICTEDACCESS; error = ex; } catch (ConstraintFailureException ex) { status = Status.ABORT_UNEXPECTED; error = ex; } catch (SQLException ex) { status = Status.ABORT_UNEXPECTED; error = ex; } catch (EEException ex) { // this.crash(ex); status = Status.ABORT_UNEXPECTED; error = ex; } catch (Throwable ex) { status = Status.ABORT_UNEXPECTED; if (ex instanceof SerializableException) { error = (SerializableException)ex; } else { error = new SerializableException(ex); } } finally { if (error != null) { // error.printStackTrace(); LOG.warn(String.format("%s - Unexpected %s on partition %d", ts, error.getClass().getSimpleName(), this.partitionId), error); // (debug.val ? error : null)); } // Success, but without any results??? if (result == null && status == Status.OK) { String msg = String.format("The WorkFragment %s executed successfully on Partition %d but " + "result is null for %s", fragment.getFragmentIdList(), this.partitionId, ts); Exception ex = new Exception(msg); if (debug.val) LOG.warn(ex); status = Status.ABORT_UNEXPECTED; error = new SerializableException(ex); } } // For single-partition INSERT/UPDATE/DELETE queries, we don't directly // execute the SendPlanNode in order to get back the number of tuples that // were modified. So we have to rely on the output dependency ids set in the task assert(status != Status.OK || (status == Status.OK && result.size() == fragmentIds.length)) : "Got back " + result.size() + " results but was expecting " + fragmentIds.length; // Make sure that we mark the round as finished before we start sending results if (is_basepartition == false) { ts.finishRound(this.partitionId); } // ------------------------------- // PREFETCH QUERIES // ------------------------------- if (is_prefetch) { // Regardless of whether this txn is running at the same HStoreSite as this PartitionExecutor, // we always need to put the result inside of the local query cache // This is so that we can identify if we get request for a query that we have already executed // We'll only do this if it succeeded. If it failed, then we won't do anything and will // just wait until they come back to execute the query again before // we tell them that something went wrong. It's ghetto, but it's just easier this way... if (status == Status.OK) { // We're going to store the result in the base partition cache if they're // on the same HStoreSite as us if (is_remote == false) { PartitionExecutor other = this.hstore_site.getPartitionExecutor(ts.getBasePartition()); for (int i = 0, cnt = result.size(); i < cnt; i++) { if (trace.val) LOG.trace(String.format("%s - Storing %s prefetch result [params=%s]", ts, CatalogUtil.getPlanFragment(catalogContext.catalog, fragment.getFragmentId(fragmentOffsets[i])).fullName(), parameters[i])); other.addPrefetchResult((LocalTransaction)ts, fragment.getStmtCounter(fragmentOffsets[i]), fragment.getFragmentId(fragmentOffsets[i]), this.partitionId, parameters[i].hashCode(), result.dependencies[i]); } // FOR } } // Now if it's a remote transaction, we need to use the coordinator to send // them our result. Note that we want to send a single message per partition. Unlike // with the TransactionWorkRequests, we don't need to wait until all of the partitions // that are prefetching for this txn at our local HStoreSite to finish. if (is_remote) { WorkResult wr = this.buildWorkResult(ts, result, status, error); TransactionPrefetchResult.Builder builder = TransactionPrefetchResult.newBuilder() .setTransactionId(ts.getTransactionId().longValue()) .setSourcePartition(this.partitionId) .setResult(wr) .setStatus(status) .addAllFragmentId(fragment.getFragmentIdList()) .addAllStmtCounter(fragment.getStmtCounterList()); for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) { builder.addParamHash(parameters[i].hashCode()); } if (debug.val) LOG.debug(String.format("%s - Sending back %s to partition %d [numResults=%s, status=%s]", ts, wr.getClass().getSimpleName(), ts.getBasePartition(), result.size(), status)); hstore_coordinator.transactionPrefetchResult((RemoteTransaction)ts, builder.build()); } } // ------------------------------- // LOCAL TRANSACTION // ------------------------------- else if (is_remote == false) { LocalTransaction local_ts = (LocalTransaction)ts; // If the transaction is local, store the result directly in the local TransactionState if (status == Status.OK) { if (trace.val) LOG.trace(String.format("%s - Storing %d dependency results locally for successful work fragment", ts, result.size())); assert(result.size() == outputDepIds.length); DependencyTracker otherTracker = this.hstore_site.getDependencyTracker(ts.getBasePartition()); for (int i = 0; i < outputDepIds.length; i++) { if (trace.val) LOG.trace(String.format("%s - Storing DependencyId #%d [numRows=%d]\n%s", ts, outputDepIds[i], result.dependencies[i].getRowCount(), result.dependencies[i])); try { otherTracker.addResult(local_ts, this.partitionId, outputDepIds[i], result.dependencies[i]); } catch (Throwable ex) { // ex.printStackTrace(); String msg = String.format("Failed to stored Dependency #%d for %s [idx=%d, fragmentId=%d]", outputDepIds[i], ts, i, fragmentIds[i]); LOG.error(String.format("%s - WorkFragment:%d\nExpectedIds:%s\nOutputDepIds: %s\nResultDepIds: %s\n%s", msg, fragment.hashCode(), fragment.getOutputDepIdList(), Arrays.toString(outputDepIds), Arrays.toString(result.depIds), fragment)); throw new ServerFaultException(msg, ex); } } // FOR } else { local_ts.setPendingError(error, true); } } // ------------------------------- // REMOTE TRANSACTION // ------------------------------- else { if (trace.val) LOG.trace(String.format("%s - Constructing WorkResult with %d bytes from partition %d to send " + "back to initial partition %d [status=%s]", ts, (result != null ? result.size() : null), this.partitionId, ts.getBasePartition(), status)); RpcCallback<WorkResult> callback = ((RemoteTransaction)ts).getWorkCallback(); if (callback == null) { LOG.fatal("Unable to send FragmentResponseMessage for " + ts); LOG.fatal("Orignal WorkFragment:\n" + fragment); LOG.fatal(ts.toString()); throw new ServerFaultException("No RPC callback to HStoreSite for " + ts, ts.getTransactionId()); } WorkResult response = this.buildWorkResult((RemoteTransaction)ts, result, status, error); assert(response != null); callback.run(response); } // Check whether this is the last query that we're going to get // from this transaction. If it is, then we can go ahead and prepare the txn if (is_basepartition == false && fragment.getLastFragment()) { if (debug.val) LOG.debug(String.format("%s - Invoking early 2PC:PREPARE at partition %d", ts, this.partitionId)); this.queuePrepare(ts); } } /** * Executes a WorkFragment on behalf of some remote site and returns the * resulting DependencySet * @param fragment * @return * @throws Exception */ private DependencySet executeFragmentIds(AbstractTransaction ts, long undoToken, long fragmentIds[], ParameterSet parameters[], int output_depIds[], int input_depIds[], Map<Integer, List<VoltTable>> input_deps) throws Exception { if (fragmentIds.length == 0) { LOG.warn(String.format("Got a fragment batch for %s that does not have any fragments?", ts)); return (null); } // *********************************** DEBUG *********************************** if (trace.val) { LOG.trace(String.format("%s - Getting ready to kick %d fragments to partition %d EE [undoToken=%d]", ts, fragmentIds.length, this.partitionId, (undoToken != HStoreConstants.NULL_UNDO_LOGGING_TOKEN ? undoToken : "null"))); // if (trace.val) { // LOG.trace("WorkFragmentIds: " + Arrays.toString(fragmentIds)); // Map<String, Object> m = new LinkedHashMap<String, Object>(); // for (int i = 0; i < parameters.length; i++) { // m.put("Parameter[" + i + "]", parameters[i]); // } // FOR // LOG.trace("Parameters:\n" + StringUtil.formatMaps(m)); // } } // *********************************** DEBUG *********************************** DependencySet result = null; // ------------------------------- // SYSPROC FRAGMENTS // ------------------------------- if (ts.isSysProc()) { result = this.executeSysProcFragments(ts, undoToken, fragmentIds.length, fragmentIds, parameters, output_depIds, input_depIds, input_deps); // ------------------------------- // REGULAR FRAGMENTS // ------------------------------- } else { result = this.executePlanFragments(ts, undoToken, fragmentIds.length, fragmentIds, parameters, output_depIds, input_depIds, input_deps); if (result == null) { LOG.warn(String.format("Output DependencySet for %s in %s is null?", Arrays.toString(fragmentIds), ts)); } } return (result); } /** * Execute a BatchPlan directly on this PartitionExecutor without having to covert it * to WorkFragments first. This is big speed improvement over having to queue things up * @param ts * @param plan * @return */ private VoltTable[] executeLocalPlan(LocalTransaction ts, BatchPlanner.BatchPlan plan, ParameterSet parameterSets[]) { // Start the new execution round long undoToken = this.calculateNextUndoToken(ts, plan.isReadOnly()); ts.initFirstRound(undoToken, plan.getBatchSize()); int fragmentCount = plan.getFragmentCount(); long fragmentIds[] = plan.getFragmentIds(); int output_depIds[] = plan.getOutputDependencyIds(); int input_depIds[] = plan.getInputDependencyIds(); // Mark that we touched the local partition once for each query in the batch // ts.getTouchedPartitions().put(this.partitionId, plan.getBatchSize()); // Only notify other partitions that we're done with them if we're not // a single-partition transaction if (hstore_conf.site.specexec_enable && ts.isPredictSinglePartition() == false) { //FIXME //PartitionSet new_done = ts.calculateDonePartitions(this.thresholds); //if (new_done != null && new_done.isEmpty() == false) { // LocalPrepareCallback callback = ts.getPrepareCallback(); // assert(callback.isInitialized()); // this.hstore_coordinator.transactionPrepare(ts, callback, new_done); //} } if (trace.val) LOG.trace(String.format("Txn #%d - BATCHPLAN:\n" + " fragmentIds: %s\n" + " fragmentCount: %s\n" + " output_depIds: %s\n" + " input_depIds: %s", ts.getTransactionId(), Arrays.toString(plan.getFragmentIds()), plan.getFragmentCount(), Arrays.toString(plan.getOutputDependencyIds()), Arrays.toString(plan.getInputDependencyIds()))); // NOTE: There are no dependencies that we need to pass in because the entire // batch is local to this partition. DependencySet result = null; try { result = this.executePlanFragments(ts, undoToken, fragmentCount, fragmentIds, parameterSets, output_depIds, input_depIds, null); } finally { ts.fastFinishRound(this.partitionId); } // assert(result != null) : "Unexpected null DependencySet result for " + ts; if (trace.val) LOG.trace("Output:\n" + result); return (result != null ? result.dependencies : null); } /** * Execute the given fragment tasks on this site's underlying EE * @param ts * @param undoToken * @param batchSize * @param fragmentIds * @param parameterSets * @param output_depIds * @param input_depIds * @return */ private DependencySet executeSysProcFragments(AbstractTransaction ts, long undoToken, int batchSize, long fragmentIds[], ParameterSet parameters[], int output_depIds[], int input_depIds[], Map<Integer, List<VoltTable>> input_deps) { assert(fragmentIds.length == 1); assert(fragmentIds.length == parameters.length) : String.format("%s - Fragments:%d / Parameters:%d", ts, fragmentIds.length, parameters.length); VoltSystemProcedure volt_proc = this.m_registeredSysProcPlanFragments.get(fragmentIds[0]); if (volt_proc == null) { String msg = "No sysproc handle exists for FragmentID #" + fragmentIds[0] + " :: " + this.m_registeredSysProcPlanFragments; throw new ServerFaultException(msg, ts.getTransactionId()); } // HACK: We have to set the TransactionState for sysprocs manually volt_proc.setTransactionState(ts); ts.markExecNotReadOnly(this.partitionId); DependencySet result = null; try { result = volt_proc.executePlanFragment(ts.getTransactionId(), this.tmp_EEdependencies, (int)fragmentIds[0], parameters[0], this.m_systemProcedureContext); } catch (Throwable ex) { String msg = "Unexpected error when executing system procedure"; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } if (debug.val) LOG.debug(String.format("%s - Finished executing sysproc fragment for %s (#%d)%s", ts, m_registeredSysProcPlanFragments.get(fragmentIds[0]).getClass().getSimpleName(), fragmentIds[0], (trace.val ? "\n" + result : ""))); return (result); } /** * Execute the given fragment tasks on this site's underlying EE * @param ts * @param undoToken * @param batchSize * @param fragmentIds * @param parameterSets * @param output_depIds * @param input_depIds * @return */ private DependencySet executePlanFragments(AbstractTransaction ts, long undoToken, int batchSize, long fragmentIds[], ParameterSet parameterSets[], int output_depIds[], int input_depIds[], Map<Integer, List<VoltTable>> input_deps) { assert(this.ee != null) : "The EE object is null. This is bad!"; Long txn_id = ts.getTransactionId(); //LOG.info("in executePlanFragments()"); // *********************************** DEBUG *********************************** if (debug.val) { StringBuilder sb = new StringBuilder(); sb.append(String.format("%s - Executing %d fragments [lastTxnId=%d, undoToken=%d]", ts, batchSize, this.lastCommittedTxnId, undoToken)); // if (trace.val) { Map<String, Object> m = new LinkedHashMap<String, Object>(); m.put("Fragments", Arrays.toString(fragmentIds)); Map<Integer, Object> inner = new LinkedHashMap<Integer, Object>(); for (int i = 0; i < batchSize; i++) inner.put(i, parameterSets[i].toString()); m.put("Parameters", inner); if (batchSize > 0 && input_depIds[0] != HStoreConstants.NULL_DEPENDENCY_ID) { inner = new LinkedHashMap<Integer, Object>(); for (int i = 0; i < batchSize; i++) { List<VoltTable> deps = input_deps.get(input_depIds[i]); inner.put(input_depIds[i], (deps != null ? StringUtil.join("\n", deps) : "???")); } // FOR m.put("Input Dependencies", inner); } m.put("Output Dependencies", Arrays.toString(output_depIds)); sb.append("\n" + StringUtil.formatMaps(m)); // } LOG.debug(sb.toString().trim()); } // *********************************** DEBUG *********************************** // pass attached dependencies to the EE (for non-sysproc work). if (input_deps != null && input_deps.isEmpty() == false) { if (debug.val) LOG.debug(String.format("%s - Stashing %d InputDependencies at partition %d", ts, input_deps.size(), this.partitionId)); this.ee.stashWorkUnitDependencies(input_deps); } // Java-based Table Read-Write Sets boolean readonly = true; boolean speculative = ts.isSpeculative(); boolean singlePartition = ts.isPredictSinglePartition(); int tableIds[] = null; for (int i = 0; i < batchSize; i++) { boolean fragReadOnly = PlanFragmentIdGenerator.isPlanFragmentReadOnly(fragmentIds[i]); // We don't need to maintain read/write sets for non-speculative txns if (speculative || singlePartition == false) { if (fragReadOnly) { tableIds = catalogContext.getReadTableIds(Long.valueOf(fragmentIds[i])); if (tableIds != null) ts.markTableIdsRead(this.partitionId, tableIds); } else { tableIds = catalogContext.getWriteTableIds(Long.valueOf(fragmentIds[i])); if (tableIds != null) ts.markTableIdsWritten(this.partitionId, tableIds); } } readonly = readonly && fragReadOnly; } // Enable read/write set tracking if (hstore_conf.site.exec_readwrite_tracking && ts.hasExecutedWork(this.partitionId) == false) { if (trace.val) LOG.trace(String.format("%s - Enabling read/write set tracking in EE at partition %d", ts, this.partitionId)); this.ee.trackingEnable(txn_id); } // Check whether the txn has only exeuted read-only queries up to this point if (ts.isExecReadOnly(this.partitionId)) { if (readonly == false) { if (trace.val) LOG.trace(String.format("%s - Marking txn as not read-only %s", ts, Arrays.toString(fragmentIds))); ts.markExecNotReadOnly(this.partitionId); } // We can do this here because the only way that we're not read-only is if // we actually modify data at this partition ts.markExecutedWork(this.partitionId); } DependencySet result = null; boolean needs_profiling = false; if (ts.isExecLocal(this.partitionId)) { if (hstore_conf.site.txn_profiling && ((LocalTransaction)ts).profiler != null) { needs_profiling = true; ((LocalTransaction)ts).profiler.startExecEE(); } } Throwable error = null; try { assert(this.lastCommittedUndoToken < undoToken) : String.format("Trying to execute work using undoToken %d for %s but " + "it is less than the last committed undoToken %d at partition %d", undoToken, ts, this.lastCommittedUndoToken, this.partitionId); if (trace.val) LOG.trace(String.format("%s - Executing fragments %s at partition %d [undoToken=%d]", ts, Arrays.toString(fragmentIds), this.partitionId, undoToken)); result = this.ee.executeQueryPlanFragmentsAndGetDependencySet( fragmentIds, batchSize, input_depIds, output_depIds, parameterSets, batchSize, txn_id.longValue(), this.lastCommittedTxnId.longValue(), undoToken); } catch (AssertionError ex) { LOG.error("Fatal error when processing " + ts + "\n" + ts.debug()); error = ex; throw ex; } catch (EvictedTupleAccessException ex) { if (debug.val) LOG.warn("Caught EvictedTupleAccessException."); error = ex; throw ex; } catch (SerializableException ex) { if (debug.val) LOG.error(String.format("%s - Unexpected error in the ExecutionEngine on partition %d", ts, this.partitionId), ex); error = ex; throw ex; } catch (Throwable ex) { error = ex; String msg = String.format("%s - Failed to execute PlanFragments: %s", ts, Arrays.toString(fragmentIds)); throw new ServerFaultException(msg, ex); } finally { if (needs_profiling) ((LocalTransaction)ts).profiler.stopExecEE(); if (error == null && result == null) { LOG.warn(String.format("%s - Finished executing fragments but got back null results [fragmentIds=%s]", ts, Arrays.toString(fragmentIds))); } } // *********************************** DEBUG *********************************** if (debug.val) { if (result != null) { LOG.debug(String.format("%s - Finished executing fragments and got back %d results", ts, result.depIds.length)); } else { LOG.warn(String.format("%s - Finished executing fragments but got back null results? That seems bad...", ts)); } } // *********************************** DEBUG *********************************** return (result); } /** * Load a VoltTable directly into the EE at this partition. * <B>NOTE:</B> This should only be invoked by a system stored procedure. * @param txn_id * @param clusterName * @param databaseName * @param tableName * @param data * @param allowELT * @throws VoltAbortException */ public void loadTable(AbstractTransaction ts, String clusterName, String databaseName, String tableName, VoltTable data, int allowELT) throws VoltAbortException { Table table = this.catalogContext.database.getTables().getIgnoreCase(tableName); if (table == null) { throw new VoltAbortException("Table '" + tableName + "' does not exist in database " + clusterName + "." + databaseName); } if (debug.val) LOG.debug(String.format("Loading %d row(s) into %s [txnId=%d]", data.getRowCount(), table.getName(), ts.getTransactionId())); ts.markExecutedWork(this.partitionId); this.ee.loadTable(table.getRelativeIndex(), data, ts.getTransactionId(), this.lastCommittedTxnId.longValue(), ts.getLastUndoToken(this.partitionId), allowELT != 0); } /** * Load a VoltTable directly into the EE at this partition. * <B>NOTE:</B> This should only be used for testing * @param txnId * @param table * @param data * @param allowELT * @throws VoltAbortException */ protected void loadTable(Long txnId, Table table, VoltTable data, boolean allowELT) throws VoltAbortException { if (debug.val) LOG.debug(String.format("Loading %d row(s) into %s [txnId=%d]", data.getRowCount(), table.getName(), txnId)); this.ee.loadTable(table.getRelativeIndex(), data, txnId.longValue(), this.lastCommittedTxnId.longValue(), HStoreConstants.NULL_UNDO_LOGGING_TOKEN, allowELT); } /** * Execute a SQLStmt batch at this partition. This is the main entry point from * VoltProcedure for where we will execute a SQLStmt batch from a txn. * @param ts The txn handle that is executing this query batch * @param batchSize The number of SQLStmts that the txn queued up using voltQueueSQL() * @param batchStmts The SQLStmts that the txn is trying to execute * @param batchParams The input parameters for the SQLStmts * @param finalTask Whether the txn has marked this as the last batch that they will ever execute * @param forceSinglePartition Whether to force the BatchPlanner to only generate a single-partition plan * @return */ public VoltTable[] executeSQLStmtBatch(LocalTransaction ts, int batchSize, SQLStmt batchStmts[], ParameterSet batchParams[], boolean finalTask, boolean forceSinglePartition) { boolean needs_profiling = (hstore_conf.site.txn_profiling && ts.profiler != null); if (needs_profiling) { ts.profiler.addBatch(batchSize); ts.profiler.stopExecJava(); ts.profiler.startExecPlanning(); } // HACK: This is needed to handle updates on replicated tables properly // when there is only one partition in the cluster. if (catalogContext.numberOfPartitions == 1) { this.depTracker.addTransaction(ts); } if (hstore_conf.site.exec_deferrable_queries) { // TODO: Loop through batchStmts and check whether their corresponding Statement // is marked as deferrable. If so, then remove them from batchStmts and batchParams // (sliding everyone over by one in the arrays). Queue up the deferred query. // Be sure decrement batchSize after you finished processing this. // EXAMPLE: batchStmts[0].getStatement().getDeferrable() } // Calculate the hash code for this batch to see whether we already have a planner final Integer batchHashCode = VoltProcedure.getBatchHashCode(batchStmts, batchSize); BatchPlanner planner = this.batchPlanners.get(batchHashCode); if (planner == null) { // Assume fast case planner = new BatchPlanner(batchStmts, batchSize, ts.getProcedure(), this.p_estimator, forceSinglePartition); this.batchPlanners.put(batchHashCode, planner); } assert(planner != null); // At this point we have to calculate exactly what we need to do on each partition // for this batch. So somehow right now we need to fire this off to either our // local executor or to Evan's magical distributed transaction manager BatchPlanner.BatchPlan plan = planner.plan(ts.getTransactionId(), this.partitionId, ts.getPredictTouchedPartitions(), ts.getTouchedPartitions(), batchParams); assert(plan != null); if (trace.val) { LOG.trace(ts + " - Touched Partitions: " + ts.getTouchedPartitions().values()); LOG.trace(ts + " - Next BatchPlan:\n" + plan.toString()); } if (needs_profiling) ts.profiler.stopExecPlanning(); // Tell the TransactionEstimator that we're about to execute these mofos EstimatorState t_state = ts.getEstimatorState(); if (this.localTxnEstimator != null && t_state != null && t_state.isUpdatesEnabled()) { if (needs_profiling) ts.profiler.startExecEstimation(); try { this.localTxnEstimator.executeQueries(t_state, planner.getStatements(), plan.getStatementPartitions()); } finally { if (needs_profiling) ts.profiler.stopExecEstimation(); } } else if (t_state != null && t_state.shouldAllowUpdates()) { LOG.warn("Skipping estimator updates for " + ts); } // Check whether our plan was caused a mispredict // Doing it this way allows us to update the TransactionEstimator before we abort the txn if (plan.getMisprediction() != null) { MispredictionException ex = plan.getMisprediction(); ts.setPendingError(ex, false); assert(ex.getPartitions().isEmpty() == false) : "Unexpected empty PartitionSet for mispredicated txn " + ts; // Print Misprediction Debug if (hstore_conf.site.exec_mispredict_crash) { // Use a lock so that only dump out the first txn that fails synchronized (PartitionExecutor.class) { LOG.warn("\n" + EstimatorUtil.mispredictDebug(ts, planner, batchStmts, batchParams)); LOG.fatal(String.format("Crashing because site.exec_mispredict_crash is true [txn=%s]", ts)); this.crash(ex); } // SYNCH } else if (debug.val) { if (trace.val) LOG.warn("\n" + EstimatorUtil.mispredictDebug(ts, planner, batchStmts, batchParams)); LOG.debug(ts + " - Aborting and restarting mispredicted txn."); } throw ex; } // Keep track of the number of times that we've executed each query for this transaction int stmtCounters[] = this.tmp_stmtCounters.getArray(batchSize); for (int i = 0; i < batchSize; i++) { stmtCounters[i] = ts.updateStatementCounter(batchStmts[i].getStatement()); } // FOR if (ts.hasPrefetchQueries()) { PartitionSet stmtPartitions[] = plan.getStatementPartitions(); PrefetchState prefetchState = ts.getPrefetchState(); QueryTracker queryTracker = prefetchState.getExecQueryTracker(); assert(prefetchState != null); for (int i = 0; i < batchSize; i++) { // We always have to update the query tracker regardless of whether // the query was prefetched or not. This is so that we can ensure // that we execute the queries in the right order. Statement stmt = batchStmts[i].getStatement(); stmtCounters[i] = queryTracker.addQuery(stmt, stmtPartitions[i], batchParams[i]); } // FOR // FIXME PrefetchQueryUtil.checkSQLStmtBatch(this, ts, plan, batchSize, batchStmts, batchParams); } // PREFETCH VoltTable results[] = null; // FAST-PATH: Single-partition + Local // If the BatchPlan only has WorkFragments that are for this partition, then // we can use the fast-path executeLocalPlan() method if (plan.isSingledPartitionedAndLocal()) { if (trace.val) LOG.trace(String.format("%s - Sending %s directly to the ExecutionEngine at partition %d", ts, plan.getClass().getSimpleName(), this.partitionId)); // If this the finalTask flag is set to true, and we're only executing queries at this // partition, then we need to notify the other partitions that we're done with them. if (hstore_conf.site.exec_early_prepare && finalTask == true && ts.isPredictSinglePartition() == false && ts.isSysProc() == false && ts.allowEarlyPrepare() == true) { tmp_fragmentsPerPartition.clearValues(); tmp_fragmentsPerPartition.put(this.partitionId, batchSize); DonePartitionsNotification notify = this.computeDonePartitions(ts, null, tmp_fragmentsPerPartition, finalTask); if (notify.hasSitesToNotify()) this.notifyDonePartitions(ts, notify); } // Execute the queries right away. results = this.executeLocalPlan(ts, plan, batchParams); } // DISTRIBUTED EXECUTION // Otherwise, we need to generate WorkFragments and then send the messages out // to our remote partitions using the HStoreCoordinator else { ExecutionState execState = ts.getExecutionState(); execState.tmp_partitionFragments.clear(); plan.getWorkFragmentsBuilders(ts.getTransactionId(), stmtCounters, execState.tmp_partitionFragments); if (debug.val) LOG.debug(String.format("%s - Using dispatchWorkFragments to execute %d %ss", ts, execState.tmp_partitionFragments.size(), WorkFragment.class.getSimpleName())); if (needs_profiling) { int remote_cnt = 0; PartitionSet stmtPartitions[] = plan.getStatementPartitions(); for (int i = 0; i < batchSize; i++) { if (stmtPartitions[i].get() != ts.getBasePartition()) remote_cnt++; if (trace.val) LOG.trace(String.format("%s - [%02d] stmt:%s / partitions:%s", ts, i, batchStmts[i].getStatement().getName(), stmtPartitions[i])); } // FOR if (trace.val) LOG.trace(String.format("%s - Remote Queries Count = %d", ts, remote_cnt)); ts.profiler.addRemoteQuery(remote_cnt); } // Block until we get all of our responses. results = this.dispatchWorkFragments(ts, batchSize, batchParams, execState.tmp_partitionFragments, finalTask); } if (debug.val && results == null) LOG.warn("Got back a null results array for " + ts + "\n" + plan.toString()); if (needs_profiling) ts.profiler.startExecJava(); return (results); } /** * * @param fresponse */ protected WorkResult buildWorkResult(AbstractTransaction ts, DependencySet result, Status status, SerializableException error) { WorkResult.Builder builder = WorkResult.newBuilder(); // Partition Id builder.setPartitionId(this.partitionId); // Status builder.setStatus(status); // SerializableException if (error != null) { int size = error.getSerializedSize(); BBContainer bc = this.buffer_pool.acquire(size); try { error.serializeToBuffer(bc.b); } catch (IOException ex) { String msg = "Failed to serialize error for " + ts; throw new ServerFaultException(msg, ex); } bc.b.rewind(); builder.setError(ByteString.copyFrom(bc.b)); bc.discard(); } // Push dependencies back to the remote partition that needs it if (status == Status.OK) { for (int i = 0, cnt = result.size(); i < cnt; i++) { builder.addDepId(result.depIds[i]); this.fs.clear(); try { result.dependencies[i].writeExternal(this.fs); ByteString bs = ByteString.copyFrom(this.fs.getBBContainer().b); builder.addDepData(bs); } catch (Exception ex) { throw new ServerFaultException(String.format("Failed to serialize output dependency %d for %s", result.depIds[i], ts), ex); } if (trace.val) LOG.trace(String.format("%s - Serialized Output Dependency %d\n%s", ts, result.depIds[i], result.dependencies[i])); } // FOR this.fs.getBBContainer().discard(); } return (builder.build()); } /** * This method is invoked when the PartitionExecutor wants to execute work at a remote HStoreSite. * The doneNotificationsPerSite is an array where each offset (based on SiteId) may contain * a PartitionSet of the partitions that this txn is finished with at the remote node and will * not be executing any work in the current batch. * @param ts * @param fragmentBuilders * @param parameterSets * @param doneNotificationsPerSite */ private void requestWork(LocalTransaction ts, Collection<WorkFragment.Builder> fragmentBuilders, List<ByteString> parameterSets, DonePartitionsNotification notify) { assert(fragmentBuilders.isEmpty() == false); assert(ts != null); Long txn_id = ts.getTransactionId(); if (trace.val) LOG.trace(String.format("%s - Wrapping %d %s into a %s", ts, fragmentBuilders.size(), WorkFragment.class.getSimpleName(), TransactionWorkRequest.class.getSimpleName())); // If our transaction was originally designated as a single-partitioned, then we need to make // sure that we don't touch any partition other than our local one. If we do, then we need abort // it and restart it as multi-partitioned boolean need_restart = false; boolean predict_singlepartition = ts.isPredictSinglePartition(); PartitionSet done_partitions = ts.getDonePartitions(); Estimate t_estimate = ts.getLastEstimate(); // Now we can go back through and start running all of the WorkFragments that were not blocked // waiting for an input dependency. Note that we pack all the fragments into a single // CoordinatorFragment rather than sending each WorkFragment in its own message for (WorkFragment.Builder fragmentBuilder : fragmentBuilders) { assert(this.depTracker.isBlocked(ts, fragmentBuilder) == false); final int target_partition = fragmentBuilder.getPartitionId(); final int target_site = catalogContext.getSiteIdForPartitionId(target_partition); final PartitionSet doneNotifications = (notify != null ? notify.getNotifications(target_site) : null); // Make sure that this isn't a single-partition txn trying to access a remote partition if (predict_singlepartition && target_partition != this.partitionId) { if (debug.val) LOG.debug(String.format("%s - Txn on partition %d is suppose to be " + "single-partitioned, but it wants to execute a fragment on partition %d", ts, this.partitionId, target_partition)); need_restart = true; break; } // Make sure that this txn isn't trying to access a partition that we said we were // done with earlier else if (done_partitions.contains(target_partition)) { if (debug.val) LOG.warn(String.format("%s on partition %d was marked as done on partition %d " + "but now it wants to go back for more!", ts, this.partitionId, target_partition)); need_restart = true; break; } // Make sure we at least have something to do! else if (fragmentBuilder.getFragmentIdCount() == 0) { LOG.warn(String.format("%s - Trying to send a WorkFragment request with 0 fragments", ts)); continue; } // Add in the specexec query estimate at this partition if needed if (hstore_conf.site.specexec_enable && t_estimate != null && t_estimate.hasQueryEstimate(target_partition)) { List<CountedStatement> queryEst = t_estimate.getQueryEstimate(target_partition); // if (debug.val) if (target_partition == 0) if (debug.val) LOG.debug(String.format("%s - Sending remote query estimate to partition %d " + "containing %d queries\n%s", ts, target_partition, queryEst.size(), StringUtil.join("\n", queryEst))); assert(queryEst.isEmpty() == false); QueryEstimate.Builder estBuilder = QueryEstimate.newBuilder(); for (CountedStatement countedStmt : queryEst) { estBuilder.addStmtIds(countedStmt.statement.getId()); estBuilder.addStmtCounters(countedStmt.counter); } // FOR fragmentBuilder.setFutureStatements(estBuilder); } // Get the TransactionWorkRequest.Builder for the remote HStoreSite // We will use this store our serialized input dependencies TransactionWorkRequestBuilder requestBuilder = tmp_transactionRequestBuilders[target_site]; if (requestBuilder == null) { requestBuilder = tmp_transactionRequestBuilders[target_site] = new TransactionWorkRequestBuilder(); } TransactionWorkRequest.Builder builder = requestBuilder.getBuilder(ts, doneNotifications); // Also keep track of what Statements they are executing so that we know // we need to send over the wire to them. requestBuilder.addParamIndexes(fragmentBuilder.getParamIndexList()); // Input Dependencies if (fragmentBuilder.getNeedsInput()) { if (debug.val) LOG.debug(String.format("%s - Retrieving input dependencies at partition %d", ts, this.partitionId)); tmp_removeDependenciesMap.clear(); for (int i = 0, cnt = fragmentBuilder.getInputDepIdCount(); i < cnt; i++) { this.getFragmentInputs(ts, fragmentBuilder.getInputDepId(i), tmp_removeDependenciesMap); } // FOR for (Entry<Integer, List<VoltTable>> e : tmp_removeDependenciesMap.entrySet()) { if (requestBuilder.hasInputDependencyId(e.getKey())) continue; if (debug.val) LOG.debug(String.format("%s - Attaching %d input dependencies to be sent to %s", ts, e.getValue().size(), HStoreThreadManager.formatSiteName(target_site))); for (VoltTable vt : e.getValue()) { this.fs.clear(); try { this.fs.writeObject(vt); builder.addAttachedDepId(e.getKey().intValue()); builder.addAttachedData(ByteString.copyFrom(this.fs.getBBContainer().b)); } catch (Exception ex) { String msg = String.format("Failed to serialize input dependency %d for %s", e.getKey(), ts); throw new ServerFaultException(msg, ts.getTransactionId()); } if (debug.val) LOG.debug(String.format("%s - Storing %d rows for InputDependency %d to send " + "to partition %d [bytes=%d]", ts, vt.getRowCount(), e.getKey(), fragmentBuilder.getPartitionId(), CollectionUtil.last(builder.getAttachedDataList()).size())); } // FOR requestBuilder.addInputDependencyId(e.getKey()); } // FOR this.fs.getBBContainer().discard(); } builder.addFragments(fragmentBuilder); } // FOR (tasks) // Bad mojo! We need to throw a MispredictionException so that the VoltProcedure // will catch it and we can propagate the error message all the way back to the HStoreSite if (need_restart) { if (trace.val) LOG.trace(String.format("Aborting %s because it was mispredicted", ts)); // This is kind of screwy because we don't actually want to send the touched partitions // histogram because VoltProcedure will just do it for us... throw new MispredictionException(txn_id, null); } // Stick on the ParameterSets that each site needs into the TransactionWorkRequest for (int target_site = 0; target_site < tmp_transactionRequestBuilders.length; target_site++) { TransactionWorkRequestBuilder builder = tmp_transactionRequestBuilders[target_site]; if (builder == null || builder.isDirty() == false) { continue; } assert(builder != null); builder.addParameterSets(parameterSets); // Bombs away! this.hstore_coordinator.transactionWork(ts, target_site, builder.build(), this.request_work_callback); if (debug.val) LOG.debug(String.format("%s - Sent Work request to remote site %s", ts, HStoreThreadManager.formatSiteName(target_site))); } // FOR } /** * Figure out what partitions this transaction is done with. This will only return * a PartitionSet of what partitions we think we're done with. * For each partition that we idenitfy that the txn is done with, we will check to see * whether the txn is going to execute a query at its site in this batch. If it's not, * then we will notify that HStoreSite through the HStoreCoordinator. * If the partition that it doesn't need anymore is local (i.e., it's at the same * HStoreSite that we're at right now), then we'll just pass them a quick message * to let them know that they can prepare the txn. * @param ts * @param estimate * @param fragmentsPerPartition A histogram of the number of PlanFragments the * txn will execute in this batch at each partition. * @param finalTask Whether the txn has marked this as the last batch that they will ever execute * @return A notification object that can be used to notify partitions that this txn is done with them. */ private DonePartitionsNotification computeDonePartitions(final LocalTransaction ts, final Estimate estimate, final FastIntHistogram fragmentsPerPartition, final boolean finalTask) { final PartitionSet touchedPartitions = ts.getPredictTouchedPartitions(); final PartitionSet donePartitions = ts.getDonePartitions(); // Compute the partitions that the txn will be finished with after this batch PartitionSet estDonePartitions = null; // If the finalTask flag is set to true, then the new done partitions // is every partition that this txn has locked if (finalTask) { estDonePartitions = touchedPartitions; } // Otherwise, we'll rely on the transaction's current estimate to figure it out. else { if (estimate == null || estimate.isValid() == false) { if (debug.val) LOG.debug(String.format("%s - Unable to compute new done partitions because there " + "is no valid estimate for the txn", ts, estimate.getClass().getSimpleName())); return (null); } estDonePartitions = estimate.getDonePartitions(this.thresholds); if (estDonePartitions == null || estDonePartitions.isEmpty()) { if (debug.val) LOG.debug(String.format("%s - There are no new done partitions identified by %s", ts, estimate.getClass().getSimpleName())); return (null); } } assert(estDonePartitions != null) : "Null done partitions for " + ts; assert(estDonePartitions.isEmpty() == false) : "Empty done partitions for " + ts; if (debug.val) LOG.debug(String.format("%s - New estimated done partitions %s%s", ts, estDonePartitions, (trace.val ? "\n"+estimate : ""))); // Note that we can actually be done with ourself, if this txn is only going to execute queries // at remote partitions. But we can't actually execute anything because this partition's only // execution thread is going to be blocked. So we always do this so that we're not sending a // useless message estDonePartitions.remove(this.partitionId); // Make sure that we only tell partitions that we actually touched, otherwise they will // be stuck waiting for a finish request that will never come! DonePartitionsNotification notify = new DonePartitionsNotification(); for (int partition : estDonePartitions.values()) { // Only mark the txn done at this partition if the Estimate says we were done // with it after executing this batch and it's a partition that we've locked. if (donePartitions.contains(partition) || touchedPartitions.contains(partition) == false) continue; if (trace.val) LOG.trace(String.format("%s - Marking partition %d as done for txn", ts, partition)); notify.donePartitions.add(partition); if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.markEarly2PCPartition(partition); // Check whether we're executing a query at this partition in this batch. // If we're not, then we need to check whether we can piggyback the "done" message // in another WorkFragment going to that partition or whether we have to // send a separate TransactionPrepareRequest if (fragmentsPerPartition.get(partition, 0) == 0) { // We need to let them know that the party is over! if (hstore_site.isLocalPartition(partition)) { // if (debug.val) LOG.info(String.format("%s - Notifying local partition %d that txn is finished it", ts, partition)); hstore_site.getPartitionExecutor(partition).queuePrepare(ts); } // Check whether we can piggyback on another WorkFragment that is going to // the same site else { Site remoteSite = catalogContext.getSiteForPartition(partition); boolean found = false; for (Partition remotePartition : remoteSite.getPartitions().values()) { if (fragmentsPerPartition.get(remotePartition.getId(), 0) != 0) { found = true; break; } } // FOR notify.addSiteNotification(remoteSite, partition, (found == false)); } } } // FOR return (notify); } /** * Send asynchronous notification messages to any remote site to tell them that we * are done with partitions that they have. * @param ts * @param notify */ private void notifyDonePartitions(LocalTransaction ts, DonePartitionsNotification notify) { // BLAST OUT NOTIFICATIONS! for (int remoteSiteId : notify._sitesToNotify) { assert(notify.notificationsPerSite[remoteSiteId] != null); if (debug.val) LOG.info(String.format("%s - Notifying %s that txn is finished with partitions %s", ts, HStoreThreadManager.formatSiteName(remoteSiteId), notify.notificationsPerSite[remoteSiteId])); hstore_coordinator.transactionPrepare(ts, ts.getPrepareCallback(), notify.notificationsPerSite[remoteSiteId]); // Make sure that we remove the PartitionSet for this site so that we don't // try to send the notifications again. notify.notificationsPerSite[remoteSiteId] = null; } // FOR } /** * Execute the given tasks and then block the current thread waiting for the list of dependency_ids to come * back from whatever it was we were suppose to do... * This is the slowest way to execute a bunch of WorkFragments and therefore should only be invoked * for batches that need to access non-local partitions * @param ts The txn handle that is executing this query batch * @param batchSize The number of SQLStmts that the txn queued up using voltQueueSQL() * @param batchParams The input parameters for the SQLStmts * @param allFragmentBuilders * @param finalTask Whether the txn has marked this as the last batch that they will ever execute * @return */ public VoltTable[] dispatchWorkFragments(final LocalTransaction ts, final int batchSize, final ParameterSet batchParams[], final Collection<WorkFragment.Builder> allFragmentBuilders, boolean finalTask) { assert(allFragmentBuilders.isEmpty() == false) : "Unexpected empty WorkFragment list for " + ts; final boolean needs_profiling = (hstore_conf.site.txn_profiling && ts.profiler != null); // *********************************** DEBUG *********************************** if (debug.val) { LOG.debug(String.format("%s - Preparing to dispatch %d messages and wait for the results [needsProfiling=%s]", ts, allFragmentBuilders.size(), needs_profiling)); if (trace.val) { StringBuilder sb = new StringBuilder(); sb.append(ts + " - WorkFragments:\n"); for (WorkFragment.Builder fragment : allFragmentBuilders) { sb.append(StringBoxUtil.box(fragment.toString()) + "\n"); } // FOR sb.append(ts + " - ParameterSets:\n"); for (ParameterSet ps : batchParams) { sb.append(ps + "\n"); } // FOR LOG.trace(sb); } } // *********************************** DEBUG *********************************** // OPTIONAL: Check to make sure that this request is valid // (1) At least one of the WorkFragments needs to be executed on a remote partition // (2) All of the PlanFragments ids in the WorkFragments match this txn's Procedure if (hstore_conf.site.exec_validate_work && ts.isSysProc() == false) { LOG.warn(String.format("%s - Checking whether all of the WorkFragments are valid", ts)); boolean has_remote = false; for (WorkFragment.Builder frag : allFragmentBuilders) { if (frag.getPartitionId() != this.partitionId) { has_remote = true; } for (int frag_id : frag.getFragmentIdList()) { PlanFragment catalog_frag = CatalogUtil.getPlanFragment(catalogContext.database, frag_id); Statement catalog_stmt = catalog_frag.getParent(); assert(catalog_stmt != null); Procedure catalog_proc = catalog_stmt.getParent(); if (catalog_proc.equals(ts.getProcedure()) == false) { LOG.warn(ts.debug() + "\n" + allFragmentBuilders + "\n---- INVALID ----\n" + frag); String msg = String.format("%s - Unexpected %s", ts, catalog_frag.fullName()); throw new ServerFaultException(msg, ts.getTransactionId()); } } } // FOR if (has_remote == false) { LOG.warn(ts.debug() + "\n" + allFragmentBuilders); String msg = ts + "Trying to execute all local single-partition queries using the slow-path!"; throw new ServerFaultException(msg, ts.getTransactionId()); } } boolean first = true; boolean serializedParams = false; CountDownLatch latch = null; boolean all_local = true; boolean is_localSite; boolean is_localPartition; boolean is_localReadOnly = true; int num_localPartition = 0; int num_localSite = 0; int num_remote = 0; int num_skipped = 0; int total = 0; Collection<WorkFragment.Builder> fragmentBuilders = allFragmentBuilders; // Make sure our txn is in our DependencyTracker if (trace.val) LOG.trace(String.format("%s - Added transaction to %s", ts, this.depTracker.getClass().getSimpleName())); this.depTracker.addTransaction(ts); // Count the number of fragments that we're going to send to each partition and // figure out whether the txn will always be read-only at this partition tmp_fragmentsPerPartition.clearValues(); for (WorkFragment.Builder fragmentBuilder : allFragmentBuilders) { int partition = fragmentBuilder.getPartitionId(); tmp_fragmentsPerPartition.put(partition); if (this.partitionId == partition && fragmentBuilder.getReadOnly() == false) { is_localReadOnly = false; } } // FOR long undoToken = this.calculateNextUndoToken(ts, is_localReadOnly); ts.initFirstRound(undoToken, batchSize); final boolean predict_singlePartition = ts.isPredictSinglePartition(); // Calculate whether we are finished with partitions now final Estimate lastEstimate = ts.getLastEstimate(); DonePartitionsNotification notify = null; if (hstore_conf.site.exec_early_prepare && ts.isSysProc() == false && ts.allowEarlyPrepare()) { notify = this.computeDonePartitions(ts, lastEstimate, tmp_fragmentsPerPartition, finalTask); if (notify.hasSitesToNotify()) this.notifyDonePartitions(ts, notify); } // Attach the ParameterSets to our transaction handle so that anybody on this HStoreSite // can access them directly without needing to deserialize them from the WorkFragments ts.attachParameterSets(batchParams); // Now if we have some work sent out to other partitions, we need to wait until they come back // In the first part, we wait until all of our blocked WorkFragments become unblocked final BlockingDeque<Collection<WorkFragment.Builder>> queue = this.depTracker.getUnblockedWorkFragmentsQueue(ts); // Run through this loop if: // (1) We have no pending errors // (2) This is our first time in the loop (first == true) // (3) If we know that there are still messages being blocked // (4) If we know that there are still unblocked messages that we need to process // (5) The latch for this round is still greater than zero while (ts.hasPendingError() == false && (first == true || this.depTracker.stillHasWorkFragments(ts) || (latch != null && latch.getCount() > 0))) { if (trace.val) LOG.trace(String.format("%s - %s loop [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, ClassUtil.getCurrentMethodName(), first, this.depTracker.stillHasWorkFragments(ts), queue.size(), latch)); // If this is the not first time through the loop, then poll the queue // to get our list of fragments if (first == false) { all_local = true; is_localSite = false; is_localPartition = false; num_localPartition = 0; num_localSite = 0; num_remote = 0; num_skipped = 0; total = 0; if (trace.val) LOG.trace(String.format("%s - Waiting for unblocked tasks on partition %d", ts, this.partitionId)); fragmentBuilders = queue.poll(); // NON-BLOCKING // If we didn't get back a list of fragments here, then we will spin through // and invoke utilityWork() to try to do something useful until what we need shows up if (needs_profiling) ts.profiler.startExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.start(); try { while (fragmentBuilders == null) { // If there is more work that we could do, then we'll just poll the queue // without waiting so that we can go back and execute it again if we have // more time. if (this.utilityWork()) { fragmentBuilders = queue.poll(); } // Otherwise we will wait a little so that we don't spin the CPU else { fragmentBuilders = queue.poll(WORK_QUEUE_POLL_TIME, TimeUnit.MILLISECONDS); } } // WHILE } catch (InterruptedException ex) { if (this.hstore_site.isShuttingDown() == false) { LOG.error(String.format("%s - We were interrupted while waiting for blocked tasks", ts), ex); } return (null); } finally { if (needs_profiling) ts.profiler.stopExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.stopIfStarted(); } } assert(fragmentBuilders != null); // If the list to fragments unblock is empty, then we // know that we have dispatched all of the WorkFragments for the // transaction's current SQLStmt batch. That means we can just wait // until all the results return to us. if (fragmentBuilders.isEmpty()) { if (trace.val) LOG.trace(String.format("%s - Got an empty list of WorkFragments at partition %d. " + "Blocking until dependencies arrive", ts, this.partitionId)); break; } this.tmp_localWorkFragmentBuilders.clear(); if (predict_singlePartition == false) { this.tmp_remoteFragmentBuilders.clear(); this.tmp_localSiteFragmentBuilders.clear(); } // ------------------------------- // FAST PATH: Assume everything is local // ------------------------------- if (predict_singlePartition) { for (WorkFragment.Builder fragmentBuilder : fragmentBuilders) { if (first == false || this.depTracker.addWorkFragment(ts, fragmentBuilder, batchParams)) { this.tmp_localWorkFragmentBuilders.add(fragmentBuilder); total++; num_localPartition++; } } // FOR // We have to tell the transaction handle to start the round before we send off the // WorkFragments for execution, since they might start executing locally! if (first) { ts.startRound(this.partitionId); latch = this.depTracker.getDependencyLatch(ts); } // Execute all of our WorkFragments quickly at our local ExecutionEngine for (WorkFragment.Builder fragmentBuilder : this.tmp_localWorkFragmentBuilders) { if (debug.val) LOG.debug(String.format("%s - Got unblocked %s to execute locally", ts, fragmentBuilder.getClass().getSimpleName())); assert(fragmentBuilder.getPartitionId() == this.partitionId) : String.format("Trying to process %s for %s on partition %d but it should have been " + "sent to partition %d [singlePartition=%s]\n%s", fragmentBuilder.getClass().getSimpleName(), ts, this.partitionId, fragmentBuilder.getPartitionId(), predict_singlePartition, fragmentBuilder); WorkFragment fragment = fragmentBuilder.build(); this.processWorkFragment(ts, fragment, batchParams); } // FOR } // ------------------------------- // SLOW PATH: Mixed local and remote messages // ------------------------------- else { // Look at each task and figure out whether it needs to be executed at a remote // HStoreSite or whether we can execute it at one of our local PartitionExecutors. for (WorkFragment.Builder fragmentBuilder : fragmentBuilders) { int partition = fragmentBuilder.getPartitionId(); is_localSite = hstore_site.isLocalPartition(partition); is_localPartition = (partition == this.partitionId); all_local = all_local && is_localPartition; // If this is the last WorkFragment that we're going to send to this partition for // this batch, then we will want to check whether we know that this is the last // time this txn will ever need to go to that txn. If so, then we'll want to if (notify != null && notify.donePartitions.contains(partition) && tmp_fragmentsPerPartition.dec(partition) == 0) { if (debug.val) LOG.debug(String.format("%s - Setting last fragment flag in %s for partition %d", ts, WorkFragment.class.getSimpleName(), partition)); fragmentBuilder.setLastFragment(true); } if (first == false || this.depTracker.addWorkFragment(ts, fragmentBuilder, batchParams)) { total++; // At this point we know that all the WorkFragment has been registered // in the LocalTransaction, so then it's safe for us to look to see // whether we already have a prefetched result that we need // if (prefetch && is_localPartition == false) { // boolean skip_queue = true; // for (int i = 0, cnt = fragmentBuilder.getFragmentIdCount(); i < cnt; i++) { // int fragId = fragmentBuilder.getFragmentId(i); // int paramIdx = fragmentBuilder.getParamIndex(i); // // VoltTable vt = this.queryCache.getResult(ts.getTransactionId(), // fragId, // partition, // parameters[paramIdx]); // if (vt != null) { // if (trace.val) // LOG.trace(String.format("%s - Storing cached result from partition %d for fragment %d", // ts, partition, fragId)); // this.depTracker.addResult(ts, partition, fragmentBuilder.getOutputDepId(i), vt); // } else { // skip_queue = false; // } // } // FOR // // If we were able to get cached results for all of the fragmentIds in // // this WorkFragment, then there is no need for us to send the message // // So we'll just skip queuing it up! How nice! // if (skip_queue) { // if (debug.val) // LOG.debug(String.format("%s - Using prefetch result for all fragments from partition %d", // ts, partition)); // num_skipped++; // continue; // } // } // Otherwise add it to our list of WorkFragments that we want // queue up right now if (is_localPartition) { is_localReadOnly = (is_localReadOnly && fragmentBuilder.getReadOnly()); this.tmp_localWorkFragmentBuilders.add(fragmentBuilder); num_localPartition++; } else if (is_localSite) { this.tmp_localSiteFragmentBuilders.add(fragmentBuilder); num_localSite++; } else { this.tmp_remoteFragmentBuilders.add(fragmentBuilder); num_remote++; } } } // FOR assert(total == (num_remote + num_localSite + num_localPartition + num_skipped)) : String.format("Total:%d / Remote:%d / LocalSite:%d / LocalPartition:%d / Skipped:%d", total, num_remote, num_localSite, num_localPartition, num_skipped); // We have to tell the txn to start the round before we send off the // WorkFragments for execution, since they might start executing locally! if (first) { ts.startRound(this.partitionId); latch = this.depTracker.getDependencyLatch(ts); } // Now request the fragments that aren't local // We want to push these out as soon as possible if (num_remote > 0) { // We only need to serialize the ParameterSets once if (serializedParams == false) { if (needs_profiling) ts.profiler.startSerialization(); tmp_serializedParams.clear(); for (int i = 0; i < batchParams.length; i++) { if (batchParams[i] == null) { tmp_serializedParams.add(ByteString.EMPTY); } else { this.fs.clear(); try { batchParams[i].writeExternal(this.fs); ByteString bs = ByteString.copyFrom(this.fs.getBBContainer().b); tmp_serializedParams.add(bs); } catch (Exception ex) { String msg = "Failed to serialize ParameterSet " + i + " for " + ts; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } } } // FOR if (needs_profiling) ts.profiler.stopSerialization(); } if (trace.val) LOG.trace(String.format("%s - Requesting %d %s to be executed on remote partitions " + "[doneNotifications=%s]", ts, WorkFragment.class.getSimpleName(), num_remote, notify!=null)); this.requestWork(ts, tmp_remoteFragmentBuilders, tmp_serializedParams, notify); if (needs_profiling) ts.profiler.markRemoteQuery(); } // Then dispatch the task that are needed at the same HStoreSite but // at a different partition than this one if (num_localSite > 0) { if (trace.val) LOG.trace(String.format("%s - Executing %d WorkFragments on local site's partitions", ts, num_localSite)); for (WorkFragment.Builder builder : this.tmp_localSiteFragmentBuilders) { PartitionExecutor other = hstore_site.getPartitionExecutor(builder.getPartitionId()); other.queueWork(ts, builder.build()); } // FOR if (needs_profiling) ts.profiler.markRemoteQuery(); } // Then execute all of the tasks need to access the partitions at this HStoreSite // We'll dispatch the remote-partition-local-site fragments first because they're going // to need to get queued up by at the other PartitionExecutors if (num_localPartition > 0) { if (trace.val) LOG.trace(String.format("%s - Executing %d WorkFragments on local partition", ts, num_localPartition)); for (WorkFragment.Builder fragmentBuilder : this.tmp_localWorkFragmentBuilders) { this.processWorkFragment(ts, fragmentBuilder.build(), batchParams); } // FOR } } if (trace.val) LOG.trace(String.format("%s - Dispatched %d WorkFragments " + "[remoteSite=%d, localSite=%d, localPartition=%d]", ts, total, num_remote, num_localSite, num_localPartition)); first = false; } // WHILE this.fs.getBBContainer().discard(); if (trace.val) LOG.trace(String.format("%s - BREAK OUT [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, first, this.depTracker.stillHasWorkFragments(ts), latch)); // assert(ts.stillHasWorkFragments() == false) : // String.format("Trying to block %s before all of its WorkFragments have been dispatched!\n%s\n%s", // ts, // StringUtil.join("** ", "\n", tempDebug), // this.getVoltProcedure(ts.getProcedureName()).getLastBatchPlan()); // Now that we know all of our WorkFragments have been dispatched, we can then // wait for all of the results to come back in. if (latch == null) latch = this.depTracker.getDependencyLatch(ts); assert(latch != null) : String.format("Unexpected null dependency latch for " + ts); if (latch.getCount() > 0) { if (debug.val) { LOG.debug(String.format("%s - All blocked messages dispatched. Waiting for %d dependencies", ts, latch.getCount())); if (trace.val) LOG.trace(ts.toString()); } boolean timeout = false; long startTime = EstTime.currentTimeMillis(); if (needs_profiling) ts.profiler.startExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.start(); try { while (latch.getCount() > 0 && ts.hasPendingError() == false) { if (this.utilityWork() == false) { timeout = latch.await(WORK_QUEUE_POLL_TIME, TimeUnit.MILLISECONDS); if (timeout == false) break; } if ((EstTime.currentTimeMillis() - startTime) > hstore_conf.site.exec_response_timeout) { timeout = true; break; } } // WHILE } catch (InterruptedException ex) { if (this.hstore_site.isShuttingDown() == false) { LOG.error(String.format("%s - We were interrupted while waiting for results", ts), ex); } timeout = true; } catch (Throwable ex) { String msg = String.format("Fatal error for %s while waiting for results", ts); throw new ServerFaultException(msg, ex); } finally { if (needs_profiling) ts.profiler.stopExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.stopIfStarted(); } if (timeout && this.isShuttingDown() == false) { LOG.warn(String.format("Still waiting for responses for %s after %d ms [latch=%d]\n%s", ts, hstore_conf.site.exec_response_timeout, latch.getCount(), ts.debug())); LOG.warn("Procedure Parameters:\n" + ts.getProcedureParameters()); hstore_conf.site.exec_profiling = true; LOG.warn(hstore_site.statusSnapshot()); String msg = "The query responses for " + ts + " never arrived!"; throw new ServerFaultException(msg, ts.getTransactionId()); } } // Update done partitions if (notify != null && notify.donePartitions.isEmpty() == false) { if (debug.val) LOG.debug(String.format("%s - Marking new done partitions %s", ts, notify.donePartitions)); ts.getDonePartitions().addAll(notify.donePartitions); } // IMPORTANT: Check whether the fragments failed somewhere and we got a response with an error // We will rethrow this so that it pops the stack all the way back to VoltProcedure.call() // where we can generate a message to the client if (ts.hasPendingError()) { if (debug.val) LOG.warn(String.format("%s was hit with a %s", ts, ts.getPendingError().getClass().getSimpleName())); throw ts.getPendingError(); } // IMPORTANT: Don't try to check whether we got back the right number of tables because the batch // may have hit an error and we didn't execute all of them. VoltTable results[] = null; try { results = this.depTracker.getResults(ts); } catch (AssertionError ex) { LOG.error("Failed to get final results for batch\n" + ts.debug()); throw ex; } ts.finishRound(this.partitionId); if (debug.val) { if (trace.val) LOG.trace(ts + " is now running and looking for love in all the wrong places..."); LOG.debug(String.format("%s - Returning back %d tables to VoltProcedure", ts, results.length)); } return (results); } // --------------------------------------------------------------- // COMMIT + ABORT METHODS // --------------------------------------------------------------- /** * Queue a speculatively executed transaction to send its ClientResponseImpl message */ private void blockClientResponse(LocalTransaction ts, ClientResponseImpl cresponse) { assert(ts.isPredictSinglePartition() == true) : String.format("Specutatively executed multi-partition %s [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); assert(ts.isSpeculative() == true) : String.format("Blocking ClientResponse for non-specutative %s [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); assert(cresponse.getStatus() != Status.ABORT_MISPREDICT) : String.format("Trying to block ClientResponse for mispredicted %s [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); assert(this.currentExecMode != ExecutionMode.COMMIT_ALL) : String.format("Blocking ClientResponse for %s when in non-specutative mode [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); this.specExecBlocked.push(Pair.of(ts, cresponse)); this.specExecModified = this.specExecModified && ts.isExecReadOnly(this.partitionId); if (debug.val) LOG.debug(String.format("%s - Blocking %s ClientResponse [partitions=%s, blockQueue=%d]", ts, cresponse.getStatus(), ts.getTouchedPartitions().values(), this.specExecBlocked.size())); } /** * For the given transaction's ClientResponse, figure out whether we can send it back to the client * right now or whether we need to initiate two-phase commit. * @param ts * @param cresponse */ protected void processClientResponse(LocalTransaction ts, ClientResponseImpl cresponse) { // IMPORTANT: If we executed this locally and only touched our partition, then we need to commit/abort right here // 2010-11-14: The reason why we can do this is because we will just ignore the commit // message when it shows from the Dtxn.Coordinator. We should probably double check with Evan on this... Status status = cresponse.getStatus(); if (debug.val) { LOG.debug(String.format("%s - Processing ClientResponse at partition %d " + "[status=%s, singlePartition=%s, local=%s, clientHandle=%d]", ts, this.partitionId, status, ts.isPredictSinglePartition(), ts.isExecLocal(this.partitionId), cresponse.getClientHandle())); if (trace.val) { LOG.trace(ts + " Touched Partitions: " + ts.getTouchedPartitions().values()); if (ts.isPredictSinglePartition() == false) LOG.trace(ts + " Done Partitions: " + ts.getDonePartitions()); } } // ------------------------------- // ALL: Transactions that need to be internally restarted // ------------------------------- if (status == Status.ABORT_MISPREDICT || status == Status.ABORT_SPECULATIVE || status == Status.ABORT_EVICTEDACCESS) { // If the txn was mispredicted, then we will pass the information over to the // HStoreSite so that it can re-execute the transaction. We want to do this // first so that the txn gets re-executed as soon as possible... if (debug.val) LOG.debug(String.format("%s - Restarting because transaction was hit with %s", ts, (ts.getPendingError() != null ? ts.getPendingError().getClass().getSimpleName() : ""))); // We don't want to delete the transaction here because whoever is going to requeue it for // us will need to know what partitions that the transaction touched when it executed before if (ts.isPredictSinglePartition()) { this.finishTransaction(ts, status); this.hstore_site.transactionRequeue(ts, status); } // Send a message all the partitions involved that the party is over // and that they need to abort the transaction. We don't actually care when we get the // results back because we'll start working on new txns right away. // Note that when we call transactionFinish() right here this thread will then go on // to invoke HStoreSite.transactionFinish() for us. That means when it returns we will // have successfully aborted the txn at least at all of the local partitions at this site. else { if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPostFinish(); LocalFinishCallback finish_callback = ts.getFinishCallback(); finish_callback.init(ts, status); finish_callback.markForRequeue(); if (hstore_conf.site.exec_profiling) this.profiler.network_time.start(); this.hstore_coordinator.transactionFinish(ts, status, finish_callback); if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); } } // ------------------------------- // ALL: Single-Partition Transactions // ------------------------------- else if (ts.isPredictSinglePartition()) { // Commit or abort the transaction only if we haven't done it already // This can happen when we commit speculative txns out of order if (ts.isMarkedFinished(this.partitionId) == false) { this.finishTransaction(ts, status); } // We have to mark it as loggable to prevent the response // from getting sent back to the client if (hstore_conf.site.commandlog_enable) ts.markLogEnabled(); if (hstore_conf.site.exec_profiling) this.profiler.network_time.start(); this.hstore_site.responseSend(ts, cresponse); if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); this.hstore_site.queueDeleteTransaction(ts.getTransactionId(), status); } // ------------------------------- // COMMIT: Distributed Transaction // ------------------------------- else if (status == Status.OK) { // We need to set the new ExecutionMode before we invoke transactionPrepare // because the LocalTransaction handle might get cleaned up immediately ExecutionMode newMode = null; if (hstore_conf.site.specexec_enable) { newMode = (ts.isExecReadOnly(this.partitionId) ? ExecutionMode.COMMIT_READONLY : ExecutionMode.COMMIT_NONE); } else { newMode = ExecutionMode.DISABLED; } this.setExecutionMode(ts, newMode); // We have to send a prepare message to all of our remote HStoreSites // We want to make sure that we don't go back to ones that we've already told PartitionSet donePartitions = ts.getDonePartitions(); PartitionSet notifyPartitions = new PartitionSet(); for (int partition : ts.getPredictTouchedPartitions().values()) { if (donePartitions.contains(partition) == false) { notifyPartitions.add(partition); } } // FOR if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPostPrepare(); ts.setClientResponse(cresponse); if (hstore_conf.site.exec_profiling) { this.profiler.network_time.start(); this.profiler.sp3_local_time.start(); } LocalPrepareCallback callback = ts.getPrepareCallback(); callback.init(ts, notifyPartitions); this.hstore_coordinator.transactionPrepare(ts, callback, notifyPartitions); if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); } // ------------------------------- // ABORT: Distributed Transaction // ------------------------------- else { // Send back the result to the client right now, since there's no way // that we're magically going to be able to recover this and get them a result // This has to come before the network messages above because this will clean-up the // LocalTransaction state information this.hstore_site.responseSend(ts, cresponse); // Send a message all the partitions involved that the party is over // and that they need to abort the transaction. We don't actually care when we get the // results back because we'll start working on new txns right away. // Note that when we call transactionFinish() right here this thread will then go on // to invoke HStoreSite.transactionFinish() for us. That means when it returns we will // have successfully aborted the txn at least at all of the local partitions at this site. if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPostFinish(); LocalFinishCallback callback = ts.getFinishCallback(); callback.init(ts, status); if (hstore_conf.site.exec_profiling) this.profiler.network_time.start(); try { this.hstore_coordinator.transactionFinish(ts, status, callback); } finally { if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); } } } /** * Enable speculative execution mode for this partition. The given transaction is * the one that we will need to wait to finish before we can release the ClientResponses * for any speculatively executed transactions. * @param txn_id * @return true if speculative execution was enabled at this partition */ private Status prepareTransaction(AbstractTransaction ts) { assert(ts != null) : "Unexpected null transaction handle at partition " + this.partitionId; assert(ts.isInitialized()) : String.format("Trying to prepare uninitialized transaction %s at partition %d", ts, this.partitionId); assert(ts.isMarkedFinished(this.partitionId) == false) : String.format("Trying to prepare %s again after it was already finished at partition %d", ts, this.partitionId); Status status = Status.OK; // Skip if we've already invoked prepared for this txn at this partition if (ts.isMarkedPrepared(this.partitionId) == false) { if (debug.val) LOG.debug(String.format("%s - Preparing to commit txn at partition %d [specBlocked=%d]", ts, this.partitionId, this.specExecBlocked.size())); ExecutionMode newMode = ExecutionMode.COMMIT_NONE; if (hstore_conf.site.exec_profiling && this.partitionId != ts.getBasePartition() && ts.needsFinish(this.partitionId)) { profiler.sp3_remote_time.start(); } if (hstore_conf.site.specexec_enable) { // Check to see if there were any conflicts with the dtxn and any of its speculative // txns at this partition. If there were, then we know that we can't commit the txn here. LocalTransaction spec_ts; for (Pair<LocalTransaction, ClientResponseImpl> pair : this.specExecBlocked) { spec_ts = pair.getFirst(); if (debug.val) LOG.debug(String.format("%s - Checking for conflicts with speculative %s at partition %d [%s]", ts, spec_ts, this.partitionId, this.specExecChecker.getClass().getSimpleName())); if (this.specExecChecker.hasConflictAfter(ts, spec_ts, this.partitionId)) { if (debug.val) LOG.debug(String.format("%s - Conflict found with speculative txn %s at partition %d", ts, spec_ts, this.partitionId)); status = Status.ABORT_RESTART; break; } } // FOR // Check whether the txn that we're waiting for is read-only. // If it is, then that means all read-only transactions can commit right away if (status == Status.OK && ts.isExecReadOnly(this.partitionId)) { if (debug.val) LOG.debug(String.format("%s - Txn is read-only at partition %d [readOnly=%s]", ts, this.partitionId, ts.isExecReadOnly(this.partitionId))); newMode = ExecutionMode.COMMIT_READONLY; } } if (this.currentDtxn != null) this.setExecutionMode(ts, newMode); } // It's ok if they try to prepare the txn twice. That might just mean that they never // got the acknowledgement back in time if they tried to send an early commit message. else if (debug.val) { LOG.debug(String.format("%s - Already marked 2PC:PREPARE at partition %d", ts, this.partitionId)); } // IMPORTANT // When we do an early 2PC-PREPARE, we won't have this callback ready // because we don't know what callback to use to send the acknowledgements // back over the network PartitionCountingCallback<AbstractTransaction> callback = ts.getPrepareCallback(); if (status == Status.OK) { if (callback.isInitialized()) { try { callback.run(this.partitionId); } catch (Throwable ex) { LOG.warn("Unexpected error for " + ts, ex); } } // But we will always mark ourselves as prepared at this partition ts.markPrepared(this.partitionId); } else { if (debug.val) LOG.debug(String.format("%s - Aborting txn from partition %d [%s]", ts, this.partitionId, status)); callback.abort(this.partitionId, status); } return (status); } /** * Internal call to abort/commit the transaction down in the execution engine * @param ts * @param commit */ private void finishTransaction(AbstractTransaction ts, Status status) { assert(ts != null) : "Unexpected null transaction handle at partition " + this.partitionId; assert(ts.isInitialized()) : String.format("Trying to commit uninitialized transaction %s at partition %d", ts, this.partitionId); assert(ts.isMarkedFinished(this.partitionId) == false) : String.format("Trying to commit %s twice at partition %d", ts, this.partitionId); // This can be null if they haven't submitted anything boolean commit = (status == Status.OK); long undoToken = (commit ? ts.getLastUndoToken(this.partitionId) : ts.getFirstUndoToken(this.partitionId)); // Only commit/abort this transaction if: // (2) We have the last undo token used by this transaction // (3) The transaction was executed with undo buffers // (4) The transaction actually submitted work to the EE // (5) The transaction modified data at this partition if (ts.needsFinish(this.partitionId) && undoToken != HStoreConstants.NULL_UNDO_LOGGING_TOKEN) { if (trace.val) LOG.trace(String.format("%s - Invoking EE to finish work for txn [%s / speculative=%s]", ts, status, ts.isSpeculative())); this.finishWorkEE(ts, undoToken, commit); } // We always need to do the following things regardless if we hit up the EE or not if (commit) this.lastCommittedTxnId = ts.getTransactionId(); if (trace.val) LOG.trace(String.format("%s - Telling queue manager that txn is finished at partition %d", ts, this.partitionId)); this.queueManager.lockQueueFinished(ts, status, this.partitionId); if (debug.val) LOG.debug(String.format("%s - Successfully %sed transaction at partition %d", ts, (commit ? "committ" : "abort"), this.partitionId)); ts.markFinished(this.partitionId); } /** * The real method that actually reaches down into the EE and commits/undos the changes * for the given token. * Unless you know what you're doing, you probably want to be calling finishTransaction() * instead of calling this directly. * @param ts * @param undoToken * @param commit */ private void finishWorkEE(AbstractTransaction ts, long undoToken, boolean commit) { assert(ts.isMarkedFinished(this.partitionId) == false) : String.format("Trying to commit %s twice at partition %d", ts, this.partitionId); // If the txn is completely read-only and they didn't use undo-logging, then // there is nothing that we need to do, except to check to make sure we aren't // trying to abort this txn if (undoToken == HStoreConstants.DISABLE_UNDO_LOGGING_TOKEN) { // SANITY CHECK: Make sure that they're not trying to undo a transaction that // modified the database but did not use undo logging if (ts.isExecReadOnly(this.partitionId) == false && commit == false) { String msg = String.format("TRYING TO ABORT TRANSACTION ON PARTITION %d WITHOUT UNDO LOGGING [undoToken=%d]", this.partitionId, undoToken); LOG.fatal(msg + "\n" + ts.debug()); this.crash(new ServerFaultException(msg, ts.getTransactionId())); } if (debug.val) LOG.debug(String.format("%s - undoToken == DISABLE_UNDO_LOGGING_TOKEN", ts)); } // COMMIT / ABORT else { boolean needs_profiling = false; if (hstore_conf.site.txn_profiling && ts.isExecLocal(this.partitionId) && ((LocalTransaction)ts).profiler != null) { needs_profiling = true; ((LocalTransaction)ts).profiler.startPostEE(); } assert(this.lastCommittedUndoToken != undoToken) : String.format("Trying to %s undoToken %d for %s twice at partition %d", (commit ? "COMMIT" : "ABORT"), undoToken, ts, this.partitionId); // COMMIT! if (commit) { if (debug.val) { LOG.debug(String.format("%s - COMMITING txn on partition %d with undoToken %d " + "[lastTxnId=%d, lastUndoToken=%d, dtxn=%s]%s", ts, this.partitionId, undoToken, this.lastCommittedTxnId, this.lastCommittedUndoToken, this.currentDtxn, (ts instanceof LocalTransaction ? " - " + ((LocalTransaction)ts).getSpeculationType() : ""))); if (this.specExecBlocked.isEmpty() == false && ts.isPredictSinglePartition() == false) { LOG.debug(String.format("%s - # of Speculatively Executed Txns: %d ", ts, this.specExecBlocked.size())); } } assert(this.lastCommittedUndoToken < undoToken) : String.format("Trying to commit undoToken %d for %s but it is less than the " + "last committed undoToken %d at partition %d\n" + "Last Committed Txn: %d", undoToken, ts, this.lastCommittedUndoToken, this.partitionId, this.lastCommittedTxnId); this.ee.releaseUndoToken(undoToken); this.lastCommittedUndoToken = undoToken; } // ABORT! else { // Evan says that txns will be aborted LIFO. This means the first txn that // we get in abortWork() will have a the greatest undoToken, which means that // it will automagically rollback all other outstanding txns. // I'm lazy/tired, so for now I'll just rollback everything I get, but in theory // we should be able to check whether our undoToken has already been rolled back if (debug.val) { LOG.debug(String.format("%s - ABORTING txn on partition %d with undoToken %d " + "[lastTxnId=%d, lastUndoToken=%d, dtxn=%s]%s", ts, this.partitionId, undoToken, this.lastCommittedTxnId, this.lastCommittedUndoToken, this.currentDtxn, (ts instanceof LocalTransaction ? " - " + ((LocalTransaction)ts).getSpeculationType() : ""))); if (this.specExecBlocked.isEmpty() == false && ts.isPredictSinglePartition() == false) { LOG.debug(String.format("%s - # of Speculatively Executed Txns: %d ", ts, this.specExecBlocked.size())); } } assert(this.lastCommittedUndoToken < undoToken) : String.format("Trying to abort undoToken %d for %s but it is less than the " + "last committed undoToken %d at partition %d " + "[lastTxnId=%d, lastUndoToken=%d, dtxn=%s]%s", undoToken, ts, this.lastCommittedUndoToken, this.partitionId, this.lastCommittedTxnId, this.lastCommittedUndoToken, this.currentDtxn, (ts instanceof LocalTransaction ? " - " + ((LocalTransaction)ts).getSpeculationType() : "")); this.ee.undoUndoToken(undoToken); } if (needs_profiling) ((LocalTransaction)ts).profiler.stopPostEE(); } } /** * Somebody told us that our partition needs to abort/commit the given transaction id. * This method should only be used for distributed transactions, because * it will do some extra work for speculative execution * @param ts - The transaction to finish up. * @param status - The final status of the transaction */ private void finishDistributedTransaction(final AbstractTransaction ts, final Status status) { if (debug.val) LOG.debug(String.format("%s - Processing finish request at partition %d " + "[status=%s, readOnly=%s]", ts, this.partitionId, status, ts.isExecReadOnly(this.partitionId))); if (this.currentDtxn == ts) { // 2012-11-22 -- Yes, today is Thanksgiving and I'm working on my database. // That's just grad student life I guess. Anyway, if you're reading this then // you know that this is an important part of the system. We have a dtxn that // we have been told is completely finished and now we need to either commit // or abort any changes that it may have made at this partition. The tricky thing // is that if we have speculative execution enabled, then we need to make sure // that we process any transactions that were executed while the dtxn was running // in the right order to ensure that we maintain serializability. // Here is the basic logic of what's about to happen: // // (1) If the dtxn is commiting, then we just need to commit the the last txn that // was executed (since this will have the largest undo token). // The EE will automatically commit all undo tokens less than that. // (2) If the dtxn is aborting, then we can commit any speculative txn that was // executed before the dtxn's first non-readonly undo token. // // Note that none of the speculative txns in the blocked queue will need to be // aborted at this point, because we will have rolled back their changes immediately // when they aborted, so that our dtxn doesn't read dirty data. if (this.specExecBlocked.isEmpty() == false) { // First thing we need to do is get the latch that will be set by any transaction // that was in the middle of being executed when we were called if (debug.val) LOG.debug(String.format("%s - Checking %d blocked speculative transactions at " + "partition %d [currentMode=%s]", ts, this.specExecBlocked.size(), this.partitionId, this.currentExecMode)); LocalTransaction spec_ts = null; ClientResponseImpl spec_cr = null; // ------------------------------- // DTXN NON-READ-ONLY ABORT // If the dtxn did not modify this partition, then everthing can commit // Otherwise, we want to commit anything that was executed before the dtxn started // ------------------------------- if (status != Status.OK && ts.isExecReadOnly(this.partitionId) == false) { // We need to get the first undo tokens for our distributed transaction long dtxnUndoToken = ts.getFirstUndoToken(this.partitionId); if (debug.val) LOG.debug(String.format("%s - Looking for speculative txns to commit before we rollback undoToken %d", ts, dtxnUndoToken)); // Queue of speculative txns that need to be committed. final Queue<Pair<LocalTransaction, ClientResponseImpl>> txnsToCommit = new LinkedList<Pair<LocalTransaction,ClientResponseImpl>>(); // Queue of speculative txns that need to be aborted + restarted final Queue<Pair<LocalTransaction, ClientResponseImpl>> txnsToRestart = new LinkedList<Pair<LocalTransaction,ClientResponseImpl>>(); long spec_token; long max_token = HStoreConstants.NULL_UNDO_LOGGING_TOKEN; LocalTransaction max_ts = null; for (Pair<LocalTransaction, ClientResponseImpl> pair : this.specExecBlocked) { boolean shouldCommit = false; spec_ts = pair.getFirst(); spec_token = spec_ts.getFirstUndoToken(this.partitionId); if (debug.val) LOG.debug(String.format("Speculative Txn %s [undoToken=%d, %s]", spec_ts, spec_token, spec_ts.getSpeculationType())); // Speculative txns should never be executed without an undo token assert(spec_token != HStoreConstants.DISABLE_UNDO_LOGGING_TOKEN); assert(spec_ts.isSpeculative()) : spec_ts + " isn't marked as speculative!"; // If the speculative undoToken is null, then this txn didn't execute // any queries. That means we can always commit it if (spec_token == HStoreConstants.NULL_UNDO_LOGGING_TOKEN) { if (debug.val) LOG.debug(String.format("Speculative Txn %s has a null undoToken at partition %d", spec_ts, this.partitionId)); shouldCommit = true; } // Otherwise, look to see if this txn was speculatively executed before the // first undo token of the distributed txn. That means we know that this guy // didn't read any modifications made by the dtxn. else if (spec_token < dtxnUndoToken) { if (debug.val) LOG.debug(String.format("Speculative Txn %s has an undoToken less than the dtxn %s " + "at partition %d [%d < %d]", spec_ts, ts, this.partitionId, spec_token, dtxnUndoToken)); shouldCommit = true; } // Ok so at this point we know that our spec txn came *after* the distributed txn // started. So we need to use our checker to see whether there is a conflict else if (this.specExecChecker.hasConflictAfter(ts, spec_ts, this.partitionId) == false) { if (debug.val) LOG.debug(String.format("Speculative Txn %s does not conflict with dtxn %s at partition %d", spec_ts, ts, this.partitionId)); shouldCommit = true; } if (shouldCommit) { txnsToCommit.add(pair); if (spec_token != HStoreConstants.NULL_UNDO_LOGGING_TOKEN && spec_token > max_token) { max_token = spec_token; max_ts = spec_ts; } } else { txnsToRestart.add(pair); } } // FOR if (debug.val) LOG.debug(String.format("%s - Found %d speculative txns at partition %d that need to be " + "committed *before* we abort this txn", ts, txnsToCommit.size(), this.partitionId)); // (1) Commit the greatest token that we've seen. This means that // all our other txns can be safely processed without needing // to go down in the EE if (max_token != HStoreConstants.NULL_UNDO_LOGGING_TOKEN) { assert(max_ts != null); this.finishWorkEE(max_ts, max_token, true); } // (2) Process all the txns that need to be committed Pair<LocalTransaction, ClientResponseImpl> pair = null; while ((pair = txnsToCommit.poll()) != null) { spec_ts = pair.getFirst(); spec_cr = pair.getSecond(); spec_ts.markFinished(this.partitionId); try { if (debug.val) LOG.debug(String.format("%s - Releasing blocked ClientResponse for %s [status=%s]", ts, spec_ts, spec_cr.getStatus())); this.processClientResponse(spec_ts, spec_cr); } catch (Throwable ex) { String msg = "Failed to complete queued response for " + spec_ts; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } } // FOR // (3) Abort the distributed txn this.finishTransaction(ts, status); // (4) Restart all the other txns while ((pair = txnsToRestart.poll()) != null) { spec_ts = pair.getFirst(); spec_cr = pair.getSecond(); MispredictionException error = new MispredictionException(spec_ts.getTransactionId(), spec_ts.getTouchedPartitions()); spec_ts.setPendingError(error, false); spec_cr.setStatus(Status.ABORT_SPECULATIVE); this.processClientResponse(spec_ts, spec_cr); } // FOR } // ------------------------------- // DTXN READ-ONLY ABORT or DTXN COMMIT // ------------------------------- else { // **IMPORTANT** // If the dtxn needs to commit, then all we need to do is get the // last undoToken that we've generated (since we know that it had to // have been used either by our distributed txn or for one of our // speculative txns). // // If the read-only dtxn needs to abort, then there's nothing we need to // do, because it didn't make any changes. That means we can just // commit the last speculatively executed transaction // // Once we have this token, we can just make a direct call to the EE // to commit any changes that came before it. Note that we are using our // special 'finishWorkEE' method that does not require us to provide // the transaction that we're committing. long undoToken = this.lastUndoToken; if (debug.val) LOG.debug(String.format("%s - Last undoToken at partition %d => %d", ts, this.partitionId, undoToken)); // Bombs away! if (undoToken != this.lastCommittedUndoToken) { this.finishWorkEE(ts, undoToken, true); // IMPORTANT: Make sure that we remove the dtxn from the lock queue! // This is normally done in finishTransaction() but because we're trying // to be clever and invoke the EE directly, we have to make sure that // we call it ourselves. this.queueManager.lockQueueFinished(ts, status, this.partitionId); } // Make sure that we mark the dtxn as finished so that we don't // try to do anything with it later on. ts.markFinished(this.partitionId); // Now make sure that all of the speculative txns are processed without // committing (since we just committed any change that they could have made // up above). Pair<LocalTransaction, ClientResponseImpl> pair = null; while ((pair = this.specExecBlocked.pollFirst()) != null) { spec_ts = pair.getFirst(); spec_cr = pair.getSecond(); spec_ts.markFinished(this.partitionId); try { if (trace.val) LOG.trace(String.format("%s - Releasing blocked ClientResponse for %s [status=%s]", ts, spec_ts, spec_cr.getStatus())); this.processClientResponse(spec_ts, spec_cr); } catch (Throwable ex) { String msg = "Failed to complete queued response for " + spec_ts; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } } // WHILE } this.specExecBlocked.clear(); this.specExecModified = false; if (trace.val) LOG.trace(String.format("Finished processing all queued speculative txns for dtxn %s", ts)); } // ------------------------------- // NO SPECULATIVE TXNS // ------------------------------- else { // There are no speculative txns waiting for this dtxn, // so we can just commit it right away if (debug.val) LOG.debug(String.format("%s - No speculative txns at partition %d. Just %s txn by itself", ts, this.partitionId, (status == Status.OK ? "commiting" : "aborting"))); this.finishTransaction(ts, status); } // Clear our cached query results that are specific for this transaction // this.queryCache.purgeTransaction(ts.getTransactionId()); // TODO: Remove anything in our queue for this txn // if (ts.hasQueuedWork(this.partitionId)) { // } // Check whether this is the response that the speculatively executed txns have been waiting for // We could have turned off speculative execution mode beforehand if (debug.val) LOG.debug(String.format("%s - Attempting to unmark as the current DTXN at partition %d and " + "setting execution mode to %s", ts, this.partitionId, ExecutionMode.COMMIT_ALL)); try { // Resetting the current_dtxn variable has to come *before* we change the execution mode this.resetCurrentDtxn(); this.setExecutionMode(ts, ExecutionMode.COMMIT_ALL); // Release blocked transactions this.releaseBlockedTransactions(ts); } catch (Throwable ex) { String msg = String.format("Failed to finish %s at partition %d", ts, this.partitionId); throw new ServerFaultException(msg, ex, ts.getTransactionId()); } if (hstore_conf.site.exec_profiling) { this.profiler.sp3_local_time.stopIfStarted(); this.profiler.sp3_remote_time.stopIfStarted(); } } // We were told told to finish a dtxn that is not the current one // at this partition. That's ok as long as it's aborting and not trying // to commit. else { assert(status != Status.OK) : String.format("Trying to commit %s at partition %d but the current dtxn is %s", ts, this.partitionId, this.currentDtxn); this.queueManager.lockQueueFinished(ts, status, this.partitionId); } // ------------------------------- // FINISH CALLBACKS // ------------------------------- // MapReduceTransaction if (ts instanceof MapReduceTransaction) { PartitionCountingCallback<AbstractTransaction> callback = ((MapReduceTransaction)ts).getCleanupCallback(); // We don't want to invoke this callback at the basePartition's site // because we don't want the parent txn to actually get deleted. if (this.partitionId == ts.getBasePartition()) { if (debug.val) LOG.debug(String.format("%s - Notifying %s that the txn is finished at partition %d", ts, callback.getClass().getSimpleName(), this.partitionId)); callback.run(this.partitionId); } } else { PartitionCountingCallback<AbstractTransaction> callback = ts.getFinishCallback(); if (debug.val) LOG.debug(String.format("%s - Notifying %s that the txn is finished at partition %d", ts, callback.getClass().getSimpleName(), this.partitionId)); callback.run(this.partitionId); } } private void blockTransaction(InternalTxnMessage work) { if (debug.val) LOG.debug(String.format("%s - Adding %s work to blocked queue", work.getTransaction(), work.getClass().getSimpleName())); this.currentBlockedTxns.add(work); } private void blockTransaction(LocalTransaction ts) { this.blockTransaction(new StartTxnMessage(ts)); } /** * Release all the transactions that are currently in this partition's blocked queue * into the work queue. * @param ts */ private void releaseBlockedTransactions(AbstractTransaction ts) { if (this.currentBlockedTxns.isEmpty() == false) { if (debug.val) LOG.debug(String.format("Attempting to release %d blocked transactions at partition %d because of %s", this.currentBlockedTxns.size(), this.partitionId, ts)); this.work_queue.addAll(this.currentBlockedTxns); int released = this.currentBlockedTxns.size(); this.currentBlockedTxns.clear(); if (debug.val) LOG.debug(String.format("Released %d blocked transactions at partition %d because of %s", released, this.partitionId, ts)); } assert(this.currentBlockedTxns.isEmpty()); } // --------------------------------------------------------------- // SNAPSHOT METHODS // --------------------------------------------------------------- /** * Do snapshot work exclusively until there is no more. Also blocks * until the syncing and closing of snapshot data targets has completed. */ public void initiateSnapshots(Deque<SnapshotTableTask> tasks) { m_snapshotter.initiateSnapshots(ee, tasks); } public Collection<Exception> completeSnapshotWork() throws InterruptedException { return m_snapshotter.completeSnapshotWork(ee); } // --------------------------------------------------------------- // SHUTDOWN METHODS // --------------------------------------------------------------- /** * Cause this PartitionExecutor to make the entire HStore cluster shutdown * This won't return! */ public synchronized void crash(Throwable ex) { String msg = String.format("PartitionExecutor for Partition #%d is crashing", this.partitionId); if (ex == null) LOG.warn(msg); else LOG.warn(msg, ex); assert(this.hstore_coordinator != null); this.hstore_coordinator.shutdownClusterBlocking(ex); } @Override public boolean isShuttingDown() { return (this.hstore_site.isShuttingDown()); // shutdown_state == State.PREPARE_SHUTDOWN || this.shutdown_state == State.SHUTDOWN); } @Override public void prepareShutdown(boolean error) { this.shutdown_state = ShutdownState.PREPARE_SHUTDOWN; } /** * Somebody from the outside wants us to shutdown */ public synchronized void shutdown() { if (this.shutdown_state == ShutdownState.SHUTDOWN) { if (debug.val) LOG.debug(String.format("Partition #%d told to shutdown again. Ignoring...", this.partitionId)); return; } this.shutdown_state = ShutdownState.SHUTDOWN; if (debug.val) LOG.debug(String.format("Shutting down PartitionExecutor for Partition #%d", this.partitionId)); // Clear the queue this.work_queue.clear(); // Knock out this ma if (this.m_snapshotter != null) this.m_snapshotter.shutdown(); // Make sure we shutdown our threadpool // this.thread_pool.shutdownNow(); if (this.self != null) this.self.interrupt(); if (this.shutdown_latch != null) { try { this.shutdown_latch.acquire(); } catch (InterruptedException ex) { // Ignore } catch (Exception ex) { LOG.fatal("Unexpected error while shutting down", ex); } } } // ---------------------------------------------------------------------------- // DEBUG METHODS // ---------------------------------------------------------------------------- @Override public String toString() { return String.format("%s{%s}", this.getClass().getSimpleName(), HStoreThreadManager.formatPartitionName(siteId, partitionId)); } public class Debug implements DebugContext { public VoltProcedure getVoltProcedure(String procName) { Procedure proc = catalogContext.procedures.getIgnoreCase(procName); return (PartitionExecutor.this.getVoltProcedure(proc.getId())); } public SpecExecScheduler getSpecExecScheduler() { return (PartitionExecutor.this.specExecScheduler); } public AbstractConflictChecker getSpecExecConflictChecker() { return (PartitionExecutor.this.specExecChecker); } public Collection<BatchPlanner> getBatchPlanners() { return (PartitionExecutor.this.batchPlanners.values()); } public PartitionExecutorProfiler getProfiler() { return (PartitionExecutor.this.profiler); } public Thread getExecutionThread() { return (PartitionExecutor.this.self); } public Queue<InternalMessage> getWorkQueue() { return (PartitionExecutor.this.work_queue); } public void setExecutionMode(AbstractTransaction ts, ExecutionMode newMode) { PartitionExecutor.this.setExecutionMode(ts, newMode); } public ExecutionMode getExecutionMode() { return (PartitionExecutor.this.currentExecMode); } public Long getLastExecutedTxnId() { return (PartitionExecutor.this.lastExecutedTxnId); } public Long getLastCommittedTxnId() { return (PartitionExecutor.this.lastCommittedTxnId); } public long getLastCommittedIndoToken() { return (PartitionExecutor.this.lastCommittedUndoToken); } /** * Get the VoltProcedure handle of the current running txn. This could be null. * <B>FOR TESTING ONLY</B> */ public VoltProcedure getCurrentVoltProcedure() { return (PartitionExecutor.this.currentVoltProc); } /** * Get the txnId of the current distributed transaction at this partition * <B>FOR TESTING ONLY</B> */ public AbstractTransaction getCurrentDtxn() { return (PartitionExecutor.this.currentDtxn); } /** * Get the txnId of the current distributed transaction at this partition * <B>FOR TESTING ONLY</B> */ public Long getCurrentDtxnId() { Long ret = null; // This is a race condition, so we'll just ignore any errors if (PartitionExecutor.this.currentDtxn != null) { try { ret = PartitionExecutor.this.currentDtxn.getTransactionId(); } catch (NullPointerException ex) { // IGNORE } } return (ret); } public Long getCurrentTxnId() { return (PartitionExecutor.this.currentTxnId); } public int getBlockedWorkCount() { return (PartitionExecutor.this.currentBlockedTxns.size()); } /** * Return the number of spec exec txns have completed but are waiting * for the distributed txn to finish at this partition */ public int getBlockedSpecExecCount() { return (PartitionExecutor.this.specExecBlocked.size()); } public int getWorkQueueSize() { return (PartitionExecutor.this.work_queue.size()); } public void updateMemory() { PartitionExecutor.this.updateMemoryStats(EstTime.currentTimeMillis()); } /** * Replace the ConflictChecker. This should only be used for testing * @param checker */ protected void setConflictChecker(AbstractConflictChecker checker) { LOG.warn(String.format("Replacing original checker %s with %s at partition %d", specExecChecker.getClass().getSimpleName(), checker.getClass().getSimpleName(), partitionId)); specExecChecker = checker; specExecScheduler.getDebugContext().setConflictChecker(checker); } } private Debug cachedDebugContext; public Debug getDebugContext() { if (this.cachedDebugContext == null) { // We don't care if we're thread-safe here... this.cachedDebugContext = new Debug(); } return this.cachedDebugContext; } }
private void getFragmentInputs(AbstractTransaction ts, int input_dep_id, Map<Integer, List<VoltTable>> inputs) { if (input_dep_id == HStoreConstants.NULL_DEPENDENCY_ID) return; if (trace.val) LOG.trace(String.format("%s - Attempting to retrieve input dependencies for DependencyId #%d", ts, input_dep_id)); // If the Transaction is on the same HStoreSite, then all the // input dependencies will be internal and can be retrieved locally if (ts instanceof LocalTransaction) { DependencyTracker txnTracker = null; if (ts.getBasePartition() != this.partitionId) { txnTracker = hstore_site.getDependencyTracker(ts.getBasePartition()); } else { txnTracker = this.depTracker; } List<VoltTable> deps = txnTracker.getInternalDependency((LocalTransaction)ts, input_dep_id); assert(deps != null); assert(inputs.containsKey(input_dep_id) == false); inputs.put(input_dep_id, deps); if (trace.val) LOG.trace(String.format("%s - Retrieved %d INTERNAL VoltTables for DependencyId #%d", ts, deps.size(), input_dep_id, (trace.val ? "\n" + deps : ""))); } // Otherwise they will be "attached" inputs to the RemoteTransaction handle // We should really try to merge these two concepts into a single function call else if (ts.getAttachedInputDependencies().containsKey(input_dep_id)) { List<VoltTable> deps = ts.getAttachedInputDependencies().get(input_dep_id); List<VoltTable> pDeps = null; // We have to copy the tables if we have debugging enabled if (trace.val) { // this.firstPartition == false) { pDeps = new ArrayList<VoltTable>(); for (VoltTable vt : deps) { ByteBuffer buffer = vt.getTableDataReference(); byte arr[] = new byte[vt.getUnderlyingBufferSize()]; buffer.get(arr, 0, arr.length); pDeps.add(new VoltTable(ByteBuffer.wrap(arr), true)); } } else { pDeps = deps; } inputs.put(input_dep_id, pDeps); if (trace.val) LOG.trace(String.format("%s - Retrieved %d ATTACHED VoltTables for DependencyId #%d in %s", ts, deps.size(), input_dep_id)); } } /** * Set the given AbstractTransaction handle as the current distributed txn * that is running at this partition. Note that this will check to make sure * that no other txn is marked as the currentDtxn. * @param ts */ private void setCurrentDtxn(AbstractTransaction ts) { // There can never be another current dtxn still unfinished at this partition! assert(this.currentBlockedTxns.isEmpty()) : String.format("Concurrent multi-partition transactions at partition %d: " + "Orig[%s] <=> New[%s] / BlockedQueue:%d", this.partitionId, this.currentDtxn, ts, this.currentBlockedTxns.size()); assert(this.currentDtxn == null) : String.format("Concurrent multi-partition transactions at partition %d: " + "Orig[%s] <=> New[%s] / BlockedQueue:%d", this.partitionId, this.currentDtxn, ts, this.currentBlockedTxns.size()); // Check whether we should check for speculative txns to execute whenever this // dtxn is idle at this partition this.currentDtxn = ts; if (hstore_conf.site.specexec_enable && ts.isSysProc() == false && this.specExecScheduler.isDisabled() == false) { this.specExecIgnoreCurrent = this.specExecChecker.shouldIgnoreTransaction(ts); } else { this.specExecIgnoreCurrent = true; } if (debug.val) { LOG.debug(String.format("Set %s as the current DTXN for partition %d [specExecIgnore=%s, previous=%s]", ts, this.partitionId, this.specExecIgnoreCurrent, this.lastDtxnDebug)); this.lastDtxnDebug = this.currentDtxn.toString(); } if (hstore_conf.site.exec_profiling && ts.getBasePartition() != this.partitionId) { profiler.sp2_time.start(); } } /** * Reset the current dtxn for this partition */ private void resetCurrentDtxn() { assert(this.currentDtxn != null) : "Trying to reset the currentDtxn when it is already null"; if (debug.val) LOG.debug(String.format("Resetting current DTXN for partition %d to null [previous=%s]", this.partitionId, this.lastDtxnDebug)); this.currentDtxn = null; } /** * Store a new prefetch result for a transaction * @param txnId * @param fragmentId * @param partitionId * @param params * @param result */ public void addPrefetchResult(LocalTransaction ts, int stmtCounter, int fragmentId, int partitionId, int paramsHash, VoltTable result) { if (debug.val) LOG.debug(String.format("%s - Adding prefetch result for %s with %d rows from partition %d " + "[stmtCounter=%d / paramsHash=%d]", ts, CatalogUtil.getPlanFragment(catalogContext.catalog, fragmentId).fullName(), result.getRowCount(), partitionId, stmtCounter, paramsHash)); this.depTracker.addPrefetchResult(ts, stmtCounter, fragmentId, partitionId, paramsHash, result); } // --------------------------------------------------------------- // PartitionExecutor API // --------------------------------------------------------------- /** * Queue a new transaction initialization at this partition. This will cause the * transaction to get added to this partition's lock queue. This PartitionExecutor does * not have to be this txn's base partition/ * @param ts */ public void queueSetPartitionLock(AbstractTransaction ts) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; SetDistributedTxnMessage work = ts.getSetDistributedTxnMessage(); boolean success = this.work_queue.offer(work); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); if (debug.val) LOG.debug(String.format("%s - Added %s to front of partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); if (hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(work); } /** * New work from the coordinator that this local site needs to execute (non-blocking) * This method will simply chuck the task into the work queue. * We should not be sent an InitiateTaskMessage here! * @param ts * @param task */ public void queueWork(AbstractTransaction ts, WorkFragment fragment) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; WorkFragmentMessage work = ts.getWorkFragmentMessage(fragment); boolean success = this.work_queue.offer(work); // , true); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); ts.markQueuedWork(this.partitionId); if (debug.val) LOG.debug(String.format("%s - Added %s to partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); if (hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(work); } /** * Add a new work message to our utility queue * @param work */ public void queueUtilityWork(InternalMessage work) { if (debug.val) LOG.debug(String.format("Added utility work %s to partition %d", work.getClass().getSimpleName(), this.partitionId)); this.work_queue.offer(work); } /** * Put the prepare request for the transaction into the queue * @param task * @param status The final status of the transaction */ public void queuePrepare(AbstractTransaction ts) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; PrepareTxnMessage work = ts.getPrepareTxnMessage(); boolean success = this.work_queue.offer(work); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); if (debug.val) LOG.debug(String.format("%s - Added %s to partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); // if (hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(); } /** * Put the finish request for the transaction into the queue * @param task * @param status The final status of the transaction */ public void queueFinish(AbstractTransaction ts, Status status) { assert(ts.isInitialized()) : "Unexpected uninitialized transaction: " + ts; FinishTxnMessage work = ts.getFinishTxnMessage(status); boolean success = this.work_queue.offer(work); // , true); assert(success) : String.format("Failed to queue %s at partition %d for %s", work, this.partitionId, ts); if (debug.val) LOG.debug(String.format("%s - Added %s to partition %d " + "work queue [size=%d]", ts, work.getClass().getSimpleName(), this.partitionId, this.work_queue.size())); // if (success) this.specExecScheduler.haltSearch(); } /** * Queue a new transaction invocation request at this partition * @param serializedRequest * @param catalog_proc * @param procParams * @param clientCallback * @return */ public boolean queueNewTransaction(ByteBuffer serializedRequest, long initiateTime, Procedure catalog_proc, ParameterSet procParams, RpcCallback<ClientResponseImpl> clientCallback) { boolean sysproc = catalog_proc.getSystemproc(); if (this.currentExecMode == ExecutionMode.DISABLED_REJECT && sysproc == false) return (false); InitializeRequestMessage work = new InitializeRequestMessage(serializedRequest, initiateTime, catalog_proc, procParams, clientCallback); if (debug.val) LOG.debug(String.format("Queuing %s for '%s' request on partition %d " + "[currentDtxn=%s, queueSize=%d, mode=%s]", work.getClass().getSimpleName(), catalog_proc.getName(), this.partitionId, this.currentDtxn, this.work_queue.size(), this.currentExecMode)); return (this.work_queue.offer(work)); } /** * Queue a new transaction invocation request at this partition * @param ts * @param task * @param callback */ public boolean queueStartTransaction(LocalTransaction ts) { assert(ts != null) : "Unexpected null transaction handle!"; boolean singlePartitioned = ts.isPredictSinglePartition(); boolean force = (singlePartitioned == false) || ts.isMapReduce() || ts.isSysProc(); // UPDATED 2012-07-12 // We used to have a bunch of checks to determine whether we needed // put the new request in the blocked queue or not. This required us to // acquire the exec_lock to do the check and then another lock to actually put // the request into the work_queue. Now we'll just throw it right in // the queue (checking for throttling of course) and let the main // thread sort out the mess of whether the txn should get blocked or not if (this.currentExecMode == ExecutionMode.DISABLED_REJECT) { if (debug.val) LOG.warn(String.format("%s - Not queuing txn at partition %d because current mode is %s", ts, this.partitionId, this.currentExecMode)); return (false); } StartTxnMessage work = ts.getStartTxnMessage(); if (debug.val) LOG.debug(String.format("Queuing %s for '%s' request on partition %d " + "[currentDtxn=%s, queueSize=%d, mode=%s]", work.getClass().getSimpleName(), ts.getProcedure().getName(), this.partitionId, this.currentDtxn, this.work_queue.size(), this.currentExecMode)); boolean success = this.work_queue.offer(work); // , force); if (debug.val && force && success == false) { String msg = String.format("Failed to add %s even though force flag was true!", ts); throw new ServerFaultException(msg, ts.getTransactionId()); } if (success && hstore_conf.site.specexec_enable) this.specExecScheduler.interruptSearch(work); return (success); } // --------------------------------------------------------------- // WORK QUEUE PROCESSING METHODS // --------------------------------------------------------------- /** * Process a WorkResult and update the internal state the LocalTransaction accordingly * Note that this will always be invoked by a thread other than the main execution thread * for this PartitionExecutor. That means if something comes back that's bad, we need a way * to alert the other thread so that it can act on it. * @param ts * @param result */ private void processWorkResult(LocalTransaction ts, WorkResult result) { boolean needs_profiling = (hstore_conf.site.txn_profiling && ts.profiler != null); if (debug.val) LOG.debug(String.format("Processing WorkResult for %s on partition %d [srcPartition=%d, deps=%d]", ts, this.partitionId, result.getPartitionId(), result.getDepDataCount())); // If the Fragment failed to execute, then we need to abort the Transaction // Note that we have to do this before we add the responses to the TransactionState so that // we can be sure that the VoltProcedure knows about the problem when it wakes the stored // procedure back up if (result.getStatus() != Status.OK) { if (trace.val) LOG.trace(String.format("Received non-success response %s from partition %d for %s", result.getStatus(), result.getPartitionId(), ts)); SerializableException error = null; if (needs_profiling) ts.profiler.startDeserialization(); try { ByteBuffer buffer = result.getError().asReadOnlyByteBuffer(); error = SerializableException.deserializeFromBuffer(buffer); } catch (Exception ex) { String msg = String.format("Failed to deserialize SerializableException from partition %d " + "for %s [bytes=%d]", result.getPartitionId(), ts, result.getError().size()); throw new ServerFaultException(msg, ex); } finally { if (needs_profiling) ts.profiler.stopDeserialization(); } // At this point there is no need to even deserialize the rest of the message because // we know that we're going to have to abort the transaction if (error == null) { LOG.warn(ts + " - Unexpected null SerializableException\n" + result); } else { if (debug.val) LOG.error(String.format("%s - Got error from partition %d in %s", ts, result.getPartitionId(), result.getClass().getSimpleName()), error); ts.setPendingError(error, true); } return; } if (needs_profiling) ts.profiler.startDeserialization(); for (int i = 0, cnt = result.getDepDataCount(); i < cnt; i++) { if (trace.val) LOG.trace(String.format("Storing intermediate results from partition %d for %s", result.getPartitionId(), ts)); int depId = result.getDepId(i); ByteString bs = result.getDepData(i); VoltTable vt = null; if (bs.isEmpty() == false) { FastDeserializer fd = new FastDeserializer(bs.asReadOnlyByteBuffer()); try { vt = fd.readObject(VoltTable.class); } catch (Exception ex) { throw new ServerFaultException("Failed to deserialize VoltTable from partition " + result.getPartitionId() + " for " + ts, ex); } } this.depTracker.addResult(ts, result.getPartitionId(), depId, vt); } // FOR (dependencies) if (needs_profiling) ts.profiler.stopDeserialization(); } /** * Execute a new transaction at this partition. * This will invoke the run() method define in the VoltProcedure for this txn and * then process the ClientResponse. Only the PartitionExecutor itself should be calling * this directly, since it's the only thing that knows what's going on with the world... * @param ts */ private void executeTransaction(LocalTransaction ts) { assert(ts.isInitialized()) : String.format("Trying to execute uninitialized transaction %s at partition %d", ts, this.partitionId); assert(ts.isMarkedReleased(this.partitionId)) : String.format("Transaction %s was not marked released at partition %d before being executed", ts, this.partitionId); if (trace.val) LOG.debug(String.format("%s - Attempting to start transaction on partition %d", ts, this.partitionId)); // If this is a MapReduceTransaction handle, we actually want to get the // inner LocalTransaction handle for this partition. The MapReduceTransaction // is just a placeholder if (ts instanceof MapReduceTransaction) { MapReduceTransaction mr_ts = (MapReduceTransaction)ts; ts = mr_ts.getLocalTransaction(this.partitionId); assert(ts != null) : "Unexpected null LocalTransaction handle from " + mr_ts; } ExecutionMode before_mode = this.currentExecMode; boolean predict_singlePartition = ts.isPredictSinglePartition(); // ------------------------------- // DISTRIBUTED TXN // ------------------------------- if (predict_singlePartition == false) { // If there is already a dtxn running, then we need to throw this // mofo back into the blocked txn queue // TODO: If our dtxn is on the same site as us, then at this point we know that // it is done executing the control code and is sending around 2PC messages // to commit/abort. That means that we could assume that all of the other // remote partitions are going to agree on the same outcome and we can start // speculatively executing this dtxn. After all, if we're at this point in // the PartitionExecutor then we know that we got this partition's locks // from the TransactionQueueManager. if (this.currentDtxn != null && this.currentDtxn.equals(ts) == false) { assert(this.currentDtxn.equals(ts) == false) : String.format("New DTXN %s != Current DTXN %s", ts, this.currentDtxn); // If this is a local txn, then we can finagle things a bit. if (this.currentDtxn.isExecLocal(this.partitionId)) { // It would be safe for us to speculative execute this DTXN right here // if the currentDtxn has aborted... but we can never be in this state. assert(this.currentDtxn.isAborted() == false) : // Sanity Check String.format("We want to execute %s on partition %d but aborted %s is still hanging around\n", ts, this.partitionId, this.currentDtxn, this.work_queue); // So that means we know that it committed, which doesn't necessarily mean // that it will still commit, but we'll be able to abort, rollback, and requeue // if that happens. // TODO: Right now our current dtxn marker is a single value. We may want to // switch it to a FIFO queue so that we can multiple guys hanging around. // For now we will just do the default thing and block this txn this.blockTransaction(ts); return; } // If it's not local, then we just have to block it right away else { this.blockTransaction(ts); return; } } // If there is no other DTXN right now, then we're it! else if (this.currentDtxn == null) { // || this.currentDtxn.equals(ts) == false) { this.setCurrentDtxn(ts); } // 2011-11-14: We don't want to set the execution mode here, because we know that we // can check whether we were read-only after the txn finishes this.setExecutionMode(this.currentDtxn, ExecutionMode.COMMIT_NONE); if (debug.val) LOG.debug(String.format("Marking %s as current DTXN on Partition %d [isLocal=%s, execMode=%s]", ts, this.partitionId, true, this.currentExecMode)); } // ------------------------------- // SINGLE-PARTITION TXN // ------------------------------- else { // If this is a single-partition transaction, then we need to check whether we are // being executed under speculative execution mode. We have to check this here // because it may be the case that we queued a bunch of transactions when speculative // execution was enabled, but now the transaction that was ahead of this one is finished, // so now we're just executing them regularly if (this.currentDtxn != null) { // HACK: If we are currently under DISABLED mode when we get this, then we just // need to block the transaction and return back to the queue. This is easier than // having to set all sorts of crazy locks if (this.currentExecMode == ExecutionMode.DISABLED || hstore_conf.site.specexec_enable == false) { if (debug.val) LOG.debug(String.format("%s - Blocking single-partition %s until dtxn finishes [mode=%s]", this.currentDtxn, ts, this.currentExecMode)); this.blockTransaction(ts); return; } assert(ts.getSpeculationType() != null); if (debug.val) LOG.debug(String.format("Speculatively executing %s while waiting for dtxn %s [%s]", ts, this.currentDtxn, ts.getSpeculationType())); assert(ts.isSpeculative()) : ts + " was not marked as being speculative!"; } } // If we reach this point, we know that we're about to execute our homeboy here... if (hstore_conf.site.txn_profiling && ts.profiler != null) { ts.profiler.startExec(); } if (hstore_conf.site.exec_profiling) this.profiler.numTransactions++; // Make sure the dependency tracker knows about us if (ts.hasDependencyTracker()) this.depTracker.addTransaction(ts); // Grab a new ExecutionState for this txn ExecutionState execState = this.initExecutionState(); ts.setExecutionState(execState); VoltProcedure volt_proc = this.getVoltProcedure(ts.getProcedure().getId()); assert(volt_proc != null) : "No VoltProcedure for " + ts; if (debug.val) { LOG.debug(String.format("%s - Starting execution of txn on partition %d " + "[txnMode=%s, mode=%s]", ts, this.partitionId, before_mode, this.currentExecMode)); if (trace.val) LOG.trace(String.format("Current Transaction at partition #%d\n%s", this.partitionId, ts.debug())); } if (hstore_conf.site.txn_counters) TransactionCounter.EXECUTED.inc(ts.getProcedure()); ClientResponseImpl cresponse = null; VoltProcedure previous = this.currentVoltProc; try { this.currentVoltProc = volt_proc; cresponse = volt_proc.call(ts, ts.getProcedureParameters().toArray()); // Blocking... // VoltProcedure.call() should handle any exceptions thrown by the transaction // If we get anything out here then that's bad news } catch (Throwable ex) { if (this.isShuttingDown() == false) { SQLStmt last[] = volt_proc.voltLastQueriesExecuted(); LOG.fatal("Unexpected error while executing " + ts, ex); if (last.length > 0) { LOG.fatal(String.format("Last Queries Executed [%d]: %s", last.length, Arrays.toString(last))); } LOG.fatal("LocalTransactionState Dump:\n" + ts.debug()); this.crash(ex); } } finally { this.currentVoltProc = previous; ts.resetExecutionState(); execState.finish(); this.execStates.add(execState); this.finishVoltProcedure(volt_proc); if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPost(); // if (cresponse.getStatus() == Status.ABORT_UNEXPECTED) { // cresponse.getException().printStackTrace(); // } } // If this is a MapReduce job, then we can just ignore the ClientResponse // and return immediately. The VoltMapReduceProcedure is responsible for storing // the result at the proper location. if (ts.isMapReduce()) { return; } else if (cresponse == null) { assert(this.isShuttingDown()) : String.format("No ClientResponse for %s???", ts); return; } // ------------------------------- // PROCESS RESPONSE AND FIGURE OUT NEXT STEP // ------------------------------- Status status = cresponse.getStatus(); if (debug.val) { LOG.debug(String.format("%s - Finished execution of transaction control code " + "[status=%s, beforeMode=%s, currentMode=%s]", ts, status, before_mode, this.currentExecMode)); if (ts.hasPendingError()) { LOG.debug(String.format("%s - Txn finished with pending error: %s", ts, ts.getPendingErrorMessage())); } } // We assume that most transactions are not speculatively executed and are successful // Therefore we don't want to grab the exec_mode lock here. if (predict_singlePartition == false || this.canProcessClientResponseNow(ts, status, before_mode)) { this.processClientResponse(ts, cresponse); } // Otherwise always queue our response, since we know that whatever thread is out there // is waiting for us to finish before it drains the queued responses else { // If the transaction aborted, then we can't execute any transaction that touch the tables // that this guy touches. But since we can't just undo this transaction without undoing // everything that came before it, we'll just disable executing all transactions until the // current distributed transaction commits if (status != Status.OK && ts.isExecReadOnly(this.partitionId) == false) { this.setExecutionMode(ts, ExecutionMode.DISABLED); int blocked = this.work_queue.drainTo(this.currentBlockedTxns); if (debug.val) { if (trace.val && blocked > 0) LOG.trace(String.format("Blocking %d transactions at partition %d because ExecutionMode is now %s", blocked, this.partitionId, this.currentExecMode)); LOG.debug(String.format("Disabling execution on partition %d because speculative %s aborted", this.partitionId, ts)); } } if (trace.val) LOG.trace(String.format("%s - Queuing ClientResponse [status=%s, origMode=%s, newMode=%s, dtxn=%s]", ts, cresponse.getStatus(), before_mode, this.currentExecMode, this.currentDtxn)); this.blockClientResponse(ts, cresponse); } } /** * Determines whether a finished transaction that executed locally can have their ClientResponse processed immediately * or if it needs to wait for the response from the outstanding multi-partition transaction for this partition * (1) This is the multi-partition transaction that everyone is waiting for * (2) The transaction was not executed under speculative execution mode * (3) The transaction does not need to wait for the multi-partition transaction to finish first * @param ts * @param status * @param before_mode * @return */ private boolean canProcessClientResponseNow(LocalTransaction ts, Status status, ExecutionMode before_mode) { if (debug.val) LOG.debug(String.format("%s - Checking whether to process %s response now at partition %d " + "[singlePartition=%s, readOnly=%s, specExecModified=%s, before=%s, current=%s]", ts, status, this.partitionId, ts.isPredictSinglePartition(), ts.isExecReadOnly(this.partitionId), this.specExecModified, before_mode, this.currentExecMode)); // Commit All if (this.currentExecMode == ExecutionMode.COMMIT_ALL) { return (true); } // SPECIAL CASE // Any user-aborted, speculative single-partition transaction should be processed immediately. else if (status == Status.ABORT_USER && ts.isSpeculative()) { return (true); } // // SPECIAL CASE // // If this txn threw a user abort, and the current outstanding dtxn is read-only // // then it's safe for us to rollback // else if (status == Status.ABORT_USER && // this.currentDtxn != null && // this.currentDtxn.isExecReadOnly(this.partitionId)) { // return (true); // } // SPECIAL CASE // Anything mispredicted should be processed right away else if (status == Status.ABORT_MISPREDICT) { return (true); } // Process successful txns based on the mode that it was executed under else if (status == Status.OK) { switch (before_mode) { case COMMIT_ALL: return (true); case COMMIT_READONLY: // Read-only speculative txns can be committed right now // TODO: Right now we're going to use the specExecModified flag to disable // sending out any results from spec execed txns that may have read from // a modified database. We should switch to a bitmap of table ids so that we // have can be more selective. // return (false); return (this.specExecModified == false && ts.isExecReadOnly(this.partitionId)); case COMMIT_NONE: { // If this txn does not conflict with the current dtxn, then we should be able // to let it commit but we can't because of the way our undo tokens work return (false); } default: throw new ServerFaultException("Unexpected execution mode: " + before_mode, ts.getTransactionId()); } // SWITCH } // // If the transaction aborted and it was read-only thus far, then we want to process it immediately // else if (status != Status.OK && ts.isExecReadOnly(this.partitionId)) { // return (true); // } assert(this.currentExecMode != ExecutionMode.COMMIT_ALL) : String.format("Queuing ClientResponse for %s when in non-specutative mode [mode=%s, status=%s]", ts, this.currentExecMode, status); return (false); } /** * Process a WorkFragment for a transaction and execute it in this partition's underlying EE. * @param ts * @param fragment * @param allParameters The array of all the ParameterSets for the current SQLStmt batch. */ private void processWorkFragment(AbstractTransaction ts, WorkFragment fragment, ParameterSet allParameters[]) { assert(this.partitionId == fragment.getPartitionId()) : String.format("Tried to execute WorkFragment %s for %s at partition %d but it was suppose " + "to be executed on partition %d", fragment.getFragmentIdList(), ts, this.partitionId, fragment.getPartitionId()); assert(ts.isMarkedPrepared(this.partitionId) == false) : String.format("Tried to execute WorkFragment %s for %s at partition %d after it was marked 2PC:PREPARE", fragment.getFragmentIdList(), ts, this.partitionId); // A txn is "local" if the Java is executing at the same partition as this one boolean is_basepartition = (ts.getBasePartition() == this.partitionId); boolean is_remote = (ts instanceof LocalTransaction == false); boolean is_prefetch = fragment.getPrefetch(); boolean is_readonly = fragment.getReadOnly(); if (debug.val) LOG.debug(String.format("%s - Executing %s [isBasePartition=%s, isRemote=%s, isPrefetch=%s, isReadOnly=%s, fragments=%s]", ts, fragment.getClass().getSimpleName(), is_basepartition, is_remote, is_prefetch, is_readonly, fragment.getFragmentIdCount())); // If this WorkFragment isn't being executed at this txn's base partition, then // we need to start a new execution round if (is_basepartition == false) { long undoToken = this.calculateNextUndoToken(ts, is_readonly); ts.initRound(this.partitionId, undoToken); ts.startRound(this.partitionId); } DependencySet result = null; Status status = Status.OK; SerializableException error = null; // Check how many fragments are not marked as ignored // If the fragment is marked as ignore then it means that it was already // sent to this partition for prefetching. We need to make sure that we remove // it from the list of fragmentIds that we need to execute. int fragmentCount = fragment.getFragmentIdCount(); for (int i = 0; i < fragmentCount; i++) { if (fragment.getStmtIgnore(i)) { fragmentCount--; } } // FOR final ParameterSet parameters[] = tmp_fragmentParams.getParameterSet(fragmentCount); assert(parameters.length == fragmentCount); // Construct data given to the EE to execute this work fragment this.tmp_EEdependencies.clear(); long fragmentIds[] = tmp_fragmentIds.getArray(fragmentCount); int fragmentOffsets[] = tmp_fragmentOffsets.getArray(fragmentCount); int outputDepIds[] = tmp_outputDepIds.getArray(fragmentCount); int inputDepIds[] = tmp_inputDepIds.getArray(fragmentCount); int offset = 0; for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) { if (fragment.getStmtIgnore(i) == false) { fragmentIds[offset] = fragment.getFragmentId(i); fragmentOffsets[offset] = i; outputDepIds[offset] = fragment.getOutputDepId(i); inputDepIds[offset] = fragment.getInputDepId(i); parameters[offset] = allParameters[fragment.getParamIndex(i)]; this.getFragmentInputs(ts, inputDepIds[offset], this.tmp_EEdependencies); if (trace.val && ts.isSysProc() == false && is_basepartition == false) LOG.trace(String.format("%s - Offset:%d FragmentId:%d OutputDep:%d/%d InputDep:%d/%d", ts, offset, fragmentIds[offset], outputDepIds[offset], fragment.getOutputDepId(i), inputDepIds[offset], fragment.getInputDepId(i))); offset++; } } // FOR assert(offset == fragmentCount); try { result = this.executeFragmentIds(ts, ts.getLastUndoToken(this.partitionId), fragmentIds, parameters, outputDepIds, inputDepIds, this.tmp_EEdependencies); } catch (EvictedTupleAccessException ex) { // XXX: What do we do if this is not a single-partition txn? status = Status.ABORT_EVICTEDACCESS; error = ex; } catch (ConstraintFailureException ex) { status = Status.ABORT_UNEXPECTED; error = ex; } catch (SQLException ex) { status = Status.ABORT_UNEXPECTED; error = ex; } catch (EEException ex) { // this.crash(ex); status = Status.ABORT_UNEXPECTED; error = ex; } catch (Throwable ex) { status = Status.ABORT_UNEXPECTED; if (ex instanceof SerializableException) { error = (SerializableException)ex; } else { error = new SerializableException(ex); } } finally { if (error != null) { // error.printStackTrace(); LOG.warn(String.format("%s - Unexpected %s on partition %d", ts, error.getClass().getSimpleName(), this.partitionId), error); // (debug.val ? error : null)); } // Success, but without any results??? if (result == null && status == Status.OK) { String msg = String.format("The WorkFragment %s executed successfully on Partition %d but " + "result is null for %s", fragment.getFragmentIdList(), this.partitionId, ts); Exception ex = new Exception(msg); if (debug.val) LOG.warn(ex); status = Status.ABORT_UNEXPECTED; error = new SerializableException(ex); } } // For single-partition INSERT/UPDATE/DELETE queries, we don't directly // execute the SendPlanNode in order to get back the number of tuples that // were modified. So we have to rely on the output dependency ids set in the task assert(status != Status.OK || (status == Status.OK && result.size() == fragmentIds.length)) : "Got back " + result.size() + " results but was expecting " + fragmentIds.length; // Make sure that we mark the round as finished before we start sending results if (is_basepartition == false) { ts.finishRound(this.partitionId); } // ------------------------------- // PREFETCH QUERIES // ------------------------------- if (is_prefetch) { // Regardless of whether this txn is running at the same HStoreSite as this PartitionExecutor, // we always need to put the result inside of the local query cache // This is so that we can identify if we get request for a query that we have already executed // We'll only do this if it succeeded. If it failed, then we won't do anything and will // just wait until they come back to execute the query again before // we tell them that something went wrong. It's ghetto, but it's just easier this way... if (status == Status.OK) { // We're going to store the result in the base partition cache if they're // on the same HStoreSite as us if (is_remote == false) { PartitionExecutor other = this.hstore_site.getPartitionExecutor(ts.getBasePartition()); for (int i = 0, cnt = result.size(); i < cnt; i++) { if (trace.val) LOG.trace(String.format("%s - Storing %s prefetch result [params=%s]", ts, CatalogUtil.getPlanFragment(catalogContext.catalog, fragment.getFragmentId(fragmentOffsets[i])).fullName(), parameters[i])); other.addPrefetchResult((LocalTransaction)ts, fragment.getStmtCounter(fragmentOffsets[i]), fragment.getFragmentId(fragmentOffsets[i]), this.partitionId, parameters[i].hashCode(), result.dependencies[i]); } // FOR } } // Now if it's a remote transaction, we need to use the coordinator to send // them our result. Note that we want to send a single message per partition. Unlike // with the TransactionWorkRequests, we don't need to wait until all of the partitions // that are prefetching for this txn at our local HStoreSite to finish. if (is_remote) { WorkResult wr = this.buildWorkResult(ts, result, status, error); TransactionPrefetchResult.Builder builder = TransactionPrefetchResult.newBuilder() .setTransactionId(ts.getTransactionId().longValue()) .setSourcePartition(this.partitionId) .setResult(wr) .setStatus(status) .addAllFragmentId(fragment.getFragmentIdList()) .addAllStmtCounter(fragment.getStmtCounterList()); for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) { builder.addParamHash(parameters[i].hashCode()); } if (debug.val) LOG.debug(String.format("%s - Sending back %s to partition %d [numResults=%s, status=%s]", ts, wr.getClass().getSimpleName(), ts.getBasePartition(), result.size(), status)); hstore_coordinator.transactionPrefetchResult((RemoteTransaction)ts, builder.build()); } } // ------------------------------- // LOCAL TRANSACTION // ------------------------------- else if (is_remote == false) { LocalTransaction local_ts = (LocalTransaction)ts; // If the transaction is local, store the result directly in the local TransactionState if (status == Status.OK) { if (trace.val) LOG.trace(String.format("%s - Storing %d dependency results locally for successful work fragment", ts, result.size())); assert(result.size() == outputDepIds.length); DependencyTracker otherTracker = this.hstore_site.getDependencyTracker(ts.getBasePartition()); for (int i = 0; i < outputDepIds.length; i++) { if (trace.val) LOG.trace(String.format("%s - Storing DependencyId #%d [numRows=%d]\n%s", ts, outputDepIds[i], result.dependencies[i].getRowCount(), result.dependencies[i])); try { otherTracker.addResult(local_ts, this.partitionId, outputDepIds[i], result.dependencies[i]); } catch (Throwable ex) { // ex.printStackTrace(); String msg = String.format("Failed to stored Dependency #%d for %s [idx=%d, fragmentId=%d]", outputDepIds[i], ts, i, fragmentIds[i]); LOG.error(String.format("%s - WorkFragment:%d\nExpectedIds:%s\nOutputDepIds: %s\nResultDepIds: %s\n%s", msg, fragment.hashCode(), fragment.getOutputDepIdList(), Arrays.toString(outputDepIds), Arrays.toString(result.depIds), fragment)); throw new ServerFaultException(msg, ex); } } // FOR } else { local_ts.setPendingError(error, true); } } // ------------------------------- // REMOTE TRANSACTION // ------------------------------- else { if (trace.val) LOG.trace(String.format("%s - Constructing WorkResult with %d bytes from partition %d to send " + "back to initial partition %d [status=%s]", ts, (result != null ? result.size() : null), this.partitionId, ts.getBasePartition(), status)); RpcCallback<WorkResult> callback = ((RemoteTransaction)ts).getWorkCallback(); if (callback == null) { LOG.fatal("Unable to send FragmentResponseMessage for " + ts); LOG.fatal("Orignal WorkFragment:\n" + fragment); LOG.fatal(ts.toString()); throw new ServerFaultException("No RPC callback to HStoreSite for " + ts, ts.getTransactionId()); } WorkResult response = this.buildWorkResult((RemoteTransaction)ts, result, status, error); assert(response != null); callback.run(response); } // Check whether this is the last query that we're going to get // from this transaction. If it is, then we can go ahead and prepare the txn if (is_basepartition == false && fragment.getLastFragment()) { if (debug.val) LOG.debug(String.format("%s - Invoking early 2PC:PREPARE at partition %d", ts, this.partitionId)); this.queuePrepare(ts); } } /** * Executes a WorkFragment on behalf of some remote site and returns the * resulting DependencySet * @param fragment * @return * @throws Exception */ private DependencySet executeFragmentIds(AbstractTransaction ts, long undoToken, long fragmentIds[], ParameterSet parameters[], int output_depIds[], int input_depIds[], Map<Integer, List<VoltTable>> input_deps) throws Exception { if (fragmentIds.length == 0) { LOG.warn(String.format("Got a fragment batch for %s that does not have any fragments?", ts)); return (null); } // *********************************** DEBUG *********************************** if (trace.val) { LOG.trace(String.format("%s - Getting ready to kick %d fragments to partition %d EE [undoToken=%d]", ts, fragmentIds.length, this.partitionId, (undoToken != HStoreConstants.NULL_UNDO_LOGGING_TOKEN ? undoToken : "null"))); // if (trace.val) { // LOG.trace("WorkFragmentIds: " + Arrays.toString(fragmentIds)); // Map<String, Object> m = new LinkedHashMap<String, Object>(); // for (int i = 0; i < parameters.length; i++) { // m.put("Parameter[" + i + "]", parameters[i]); // } // FOR // LOG.trace("Parameters:\n" + StringUtil.formatMaps(m)); // } } // *********************************** DEBUG *********************************** DependencySet result = null; // ------------------------------- // SYSPROC FRAGMENTS // ------------------------------- if (ts.isSysProc()) { result = this.executeSysProcFragments(ts, undoToken, fragmentIds.length, fragmentIds, parameters, output_depIds, input_depIds, input_deps); // ------------------------------- // REGULAR FRAGMENTS // ------------------------------- } else { result = this.executePlanFragments(ts, undoToken, fragmentIds.length, fragmentIds, parameters, output_depIds, input_depIds, input_deps); if (result == null) { LOG.warn(String.format("Output DependencySet for %s in %s is null?", Arrays.toString(fragmentIds), ts)); } } return (result); } /** * Execute a BatchPlan directly on this PartitionExecutor without having to covert it * to WorkFragments first. This is big speed improvement over having to queue things up * @param ts * @param plan * @return */ private VoltTable[] executeLocalPlan(LocalTransaction ts, BatchPlanner.BatchPlan plan, ParameterSet parameterSets[]) { // Start the new execution round long undoToken = this.calculateNextUndoToken(ts, plan.isReadOnly()); ts.initFirstRound(undoToken, plan.getBatchSize()); int fragmentCount = plan.getFragmentCount(); long fragmentIds[] = plan.getFragmentIds(); int output_depIds[] = plan.getOutputDependencyIds(); int input_depIds[] = plan.getInputDependencyIds(); // Mark that we touched the local partition once for each query in the batch // ts.getTouchedPartitions().put(this.partitionId, plan.getBatchSize()); // Only notify other partitions that we're done with them if we're not // a single-partition transaction if (hstore_conf.site.specexec_enable && ts.isPredictSinglePartition() == false) { //FIXME //PartitionSet new_done = ts.calculateDonePartitions(this.thresholds); //if (new_done != null && new_done.isEmpty() == false) { // LocalPrepareCallback callback = ts.getPrepareCallback(); // assert(callback.isInitialized()); // this.hstore_coordinator.transactionPrepare(ts, callback, new_done); //} } if (trace.val) LOG.trace(String.format("Txn #%d - BATCHPLAN:\n" + " fragmentIds: %s\n" + " fragmentCount: %s\n" + " output_depIds: %s\n" + " input_depIds: %s", ts.getTransactionId(), Arrays.toString(plan.getFragmentIds()), plan.getFragmentCount(), Arrays.toString(plan.getOutputDependencyIds()), Arrays.toString(plan.getInputDependencyIds()))); // NOTE: There are no dependencies that we need to pass in because the entire // batch is local to this partition. DependencySet result = null; try { result = this.executePlanFragments(ts, undoToken, fragmentCount, fragmentIds, parameterSets, output_depIds, input_depIds, null); } finally { ts.fastFinishRound(this.partitionId); } // assert(result != null) : "Unexpected null DependencySet result for " + ts; if (trace.val) LOG.trace("Output:\n" + result); return (result != null ? result.dependencies : null); } /** * Execute the given fragment tasks on this site's underlying EE * @param ts * @param undoToken * @param batchSize * @param fragmentIds * @param parameterSets * @param output_depIds * @param input_depIds * @return */ private DependencySet executeSysProcFragments(AbstractTransaction ts, long undoToken, int batchSize, long fragmentIds[], ParameterSet parameters[], int output_depIds[], int input_depIds[], Map<Integer, List<VoltTable>> input_deps) { assert(fragmentIds.length == 1); assert(fragmentIds.length == parameters.length) : String.format("%s - Fragments:%d / Parameters:%d", ts, fragmentIds.length, parameters.length); VoltSystemProcedure volt_proc = this.m_registeredSysProcPlanFragments.get(fragmentIds[0]); if (volt_proc == null) { String msg = "No sysproc handle exists for FragmentID #" + fragmentIds[0] + " :: " + this.m_registeredSysProcPlanFragments; throw new ServerFaultException(msg, ts.getTransactionId()); } // HACK: We have to set the TransactionState for sysprocs manually volt_proc.setTransactionState(ts); ts.markExecNotReadOnly(this.partitionId); DependencySet result = null; try { result = volt_proc.executePlanFragment(ts.getTransactionId(), this.tmp_EEdependencies, (int)fragmentIds[0], parameters[0], this.m_systemProcedureContext); } catch (Throwable ex) { String msg = "Unexpected error when executing system procedure"; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } if (debug.val) LOG.debug(String.format("%s - Finished executing sysproc fragment for %s (#%d)%s", ts, m_registeredSysProcPlanFragments.get(fragmentIds[0]).getClass().getSimpleName(), fragmentIds[0], (trace.val ? "\n" + result : ""))); return (result); } /** * Execute the given fragment tasks on this site's underlying EE * @param ts * @param undoToken * @param batchSize * @param fragmentIds * @param parameterSets * @param output_depIds * @param input_depIds * @return */ private DependencySet executePlanFragments(AbstractTransaction ts, long undoToken, int batchSize, long fragmentIds[], ParameterSet parameterSets[], int output_depIds[], int input_depIds[], Map<Integer, List<VoltTable>> input_deps) { assert(this.ee != null) : "The EE object is null. This is bad!"; Long txn_id = ts.getTransactionId(); //LOG.info("in executePlanFragments()"); // *********************************** DEBUG *********************************** if (debug.val) { StringBuilder sb = new StringBuilder(); sb.append(String.format("%s - Executing %d fragments [lastTxnId=%d, undoToken=%d]", ts, batchSize, this.lastCommittedTxnId, undoToken)); // if (trace.val) { Map<String, Object> m = new LinkedHashMap<String, Object>(); m.put("Fragments", Arrays.toString(fragmentIds)); Map<Integer, Object> inner = new LinkedHashMap<Integer, Object>(); for (int i = 0; i < batchSize; i++) inner.put(i, parameterSets[i].toString()); m.put("Parameters", inner); if (batchSize > 0 && input_depIds[0] != HStoreConstants.NULL_DEPENDENCY_ID) { inner = new LinkedHashMap<Integer, Object>(); for (int i = 0; i < batchSize; i++) { List<VoltTable> deps = input_deps.get(input_depIds[i]); inner.put(input_depIds[i], (deps != null ? StringUtil.join("\n", deps) : "???")); } // FOR m.put("Input Dependencies", inner); } m.put("Output Dependencies", Arrays.toString(output_depIds)); sb.append("\n" + StringUtil.formatMaps(m)); // } LOG.debug(sb.toString().trim()); } // *********************************** DEBUG *********************************** // pass attached dependencies to the EE (for non-sysproc work). if (input_deps != null && input_deps.isEmpty() == false) { if (debug.val) LOG.debug(String.format("%s - Stashing %d InputDependencies at partition %d", ts, input_deps.size(), this.partitionId)); this.ee.stashWorkUnitDependencies(input_deps); } // Java-based Table Read-Write Sets boolean readonly = true; boolean speculative = ts.isSpeculative(); boolean singlePartition = ts.isPredictSinglePartition(); int tableIds[] = null; for (int i = 0; i < batchSize; i++) { boolean fragReadOnly = PlanFragmentIdGenerator.isPlanFragmentReadOnly(fragmentIds[i]); // We don't need to maintain read/write sets for non-speculative txns if (speculative || singlePartition == false) { if (fragReadOnly) { tableIds = catalogContext.getReadTableIds(Long.valueOf(fragmentIds[i])); if (tableIds != null) ts.markTableIdsRead(this.partitionId, tableIds); } else { tableIds = catalogContext.getWriteTableIds(Long.valueOf(fragmentIds[i])); if (tableIds != null) ts.markTableIdsWritten(this.partitionId, tableIds); } } readonly = readonly && fragReadOnly; } // Enable read/write set tracking if (hstore_conf.site.exec_readwrite_tracking && ts.hasExecutedWork(this.partitionId) == false) { if (trace.val) LOG.trace(String.format("%s - Enabling read/write set tracking in EE at partition %d", ts, this.partitionId)); this.ee.trackingEnable(txn_id); } // Check whether the txn has only exeuted read-only queries up to this point if (ts.isExecReadOnly(this.partitionId)) { if (readonly == false) { if (trace.val) LOG.trace(String.format("%s - Marking txn as not read-only %s", ts, Arrays.toString(fragmentIds))); ts.markExecNotReadOnly(this.partitionId); } // We can do this here because the only way that we're not read-only is if // we actually modify data at this partition ts.markExecutedWork(this.partitionId); } DependencySet result = null; boolean needs_profiling = false; if (ts.isExecLocal(this.partitionId)) { if (hstore_conf.site.txn_profiling && ((LocalTransaction)ts).profiler != null) { needs_profiling = true; ((LocalTransaction)ts).profiler.startExecEE(); } } Throwable error = null; try { assert(this.lastCommittedUndoToken < undoToken) : String.format("Trying to execute work using undoToken %d for %s but " + "it is less than the last committed undoToken %d at partition %d", undoToken, ts, this.lastCommittedUndoToken, this.partitionId); if (trace.val) LOG.trace(String.format("%s - Executing fragments %s at partition %d [undoToken=%d]", ts, Arrays.toString(fragmentIds), this.partitionId, undoToken)); result = this.ee.executeQueryPlanFragmentsAndGetDependencySet( fragmentIds, batchSize, input_depIds, output_depIds, parameterSets, batchSize, txn_id.longValue(), this.lastCommittedTxnId.longValue(), undoToken); } catch (AssertionError ex) { LOG.error("Fatal error when processing " + ts + "\n" + ts.debug()); error = ex; throw ex; } catch (EvictedTupleAccessException ex) { if (debug.val) LOG.warn("Caught EvictedTupleAccessException."); error = ex; throw ex; } catch (SerializableException ex) { if (debug.val) LOG.error(String.format("%s - Unexpected error in the ExecutionEngine on partition %d", ts, this.partitionId), ex); error = ex; throw ex; } catch (Throwable ex) { error = ex; String msg = String.format("%s - Failed to execute PlanFragments: %s", ts, Arrays.toString(fragmentIds)); throw new ServerFaultException(msg, ex); } finally { if (needs_profiling) ((LocalTransaction)ts).profiler.stopExecEE(); if (error == null && result == null) { LOG.warn(String.format("%s - Finished executing fragments but got back null results [fragmentIds=%s]", ts, Arrays.toString(fragmentIds))); } } // *********************************** DEBUG *********************************** if (debug.val) { if (result != null) { LOG.debug(String.format("%s - Finished executing fragments and got back %d results", ts, result.depIds.length)); } else { LOG.warn(String.format("%s - Finished executing fragments but got back null results? That seems bad...", ts)); } } // *********************************** DEBUG *********************************** return (result); } /** * Load a VoltTable directly into the EE at this partition. * <B>NOTE:</B> This should only be invoked by a system stored procedure. * @param txn_id * @param clusterName * @param databaseName * @param tableName * @param data * @param allowELT * @throws VoltAbortException */ public void loadTable(AbstractTransaction ts, String clusterName, String databaseName, String tableName, VoltTable data, int allowELT) throws VoltAbortException { Table table = this.catalogContext.database.getTables().getIgnoreCase(tableName); if (table == null) { throw new VoltAbortException("Table '" + tableName + "' does not exist in database " + clusterName + "." + databaseName); } if (debug.val) LOG.debug(String.format("Loading %d row(s) into %s [txnId=%d]", data.getRowCount(), table.getName(), ts.getTransactionId())); ts.markExecutedWork(this.partitionId); this.ee.loadTable(table.getRelativeIndex(), data, ts.getTransactionId(), this.lastCommittedTxnId.longValue(), ts.getLastUndoToken(this.partitionId), allowELT != 0); } /** * Load a VoltTable directly into the EE at this partition. * <B>NOTE:</B> This should only be used for testing * @param txnId * @param table * @param data * @param allowELT * @throws VoltAbortException */ protected void loadTable(Long txnId, Table table, VoltTable data, boolean allowELT) throws VoltAbortException { if (debug.val) LOG.debug(String.format("Loading %d row(s) into %s [txnId=%d]", data.getRowCount(), table.getName(), txnId)); this.ee.loadTable(table.getRelativeIndex(), data, txnId.longValue(), this.lastCommittedTxnId.longValue(), HStoreConstants.NULL_UNDO_LOGGING_TOKEN, allowELT); } /** * Execute a SQLStmt batch at this partition. This is the main entry point from * VoltProcedure for where we will execute a SQLStmt batch from a txn. * @param ts The txn handle that is executing this query batch * @param batchSize The number of SQLStmts that the txn queued up using voltQueueSQL() * @param batchStmts The SQLStmts that the txn is trying to execute * @param batchParams The input parameters for the SQLStmts * @param finalTask Whether the txn has marked this as the last batch that they will ever execute * @param forceSinglePartition Whether to force the BatchPlanner to only generate a single-partition plan * @return */ public VoltTable[] executeSQLStmtBatch(LocalTransaction ts, int batchSize, SQLStmt batchStmts[], ParameterSet batchParams[], boolean finalTask, boolean forceSinglePartition) { boolean needs_profiling = (hstore_conf.site.txn_profiling && ts.profiler != null); if (needs_profiling) { ts.profiler.addBatch(batchSize); ts.profiler.stopExecJava(); ts.profiler.startExecPlanning(); } // HACK: This is needed to handle updates on replicated tables properly // when there is only one partition in the cluster. if (catalogContext.numberOfPartitions == 1) { this.depTracker.addTransaction(ts); } if (hstore_conf.site.exec_deferrable_queries) { // TODO: Loop through batchStmts and check whether their corresponding Statement // is marked as deferrable. If so, then remove them from batchStmts and batchParams // (sliding everyone over by one in the arrays). Queue up the deferred query. // Be sure decrement batchSize after you finished processing this. // EXAMPLE: batchStmts[0].getStatement().getDeferrable() } // Calculate the hash code for this batch to see whether we already have a planner final Integer batchHashCode = VoltProcedure.getBatchHashCode(batchStmts, batchSize); BatchPlanner planner = this.batchPlanners.get(batchHashCode); if (planner == null) { // Assume fast case planner = new BatchPlanner(batchStmts, batchSize, ts.getProcedure(), this.p_estimator, forceSinglePartition); this.batchPlanners.put(batchHashCode, planner); } assert(planner != null); // At this point we have to calculate exactly what we need to do on each partition // for this batch. So somehow right now we need to fire this off to either our // local executor or to Evan's magical distributed transaction manager BatchPlanner.BatchPlan plan = planner.plan(ts.getTransactionId(), this.partitionId, ts.getPredictTouchedPartitions(), ts.getTouchedPartitions(), batchParams); assert(plan != null); if (trace.val) { LOG.trace(ts + " - Touched Partitions: " + ts.getTouchedPartitions().values()); LOG.trace(ts + " - Next BatchPlan:\n" + plan.toString()); } if (needs_profiling) ts.profiler.stopExecPlanning(); // Tell the TransactionEstimator that we're about to execute these mofos EstimatorState t_state = ts.getEstimatorState(); if (this.localTxnEstimator != null && t_state != null && t_state.isUpdatesEnabled()) { if (needs_profiling) ts.profiler.startExecEstimation(); try { this.localTxnEstimator.executeQueries(t_state, planner.getStatements(), plan.getStatementPartitions()); } finally { if (needs_profiling) ts.profiler.stopExecEstimation(); } } else if (t_state != null && t_state.shouldAllowUpdates()) { LOG.warn("Skipping estimator updates for " + ts); } // Check whether our plan was caused a mispredict // Doing it this way allows us to update the TransactionEstimator before we abort the txn if (plan.getMisprediction() != null) { MispredictionException ex = plan.getMisprediction(); ts.setPendingError(ex, false); assert(ex.getPartitions().isEmpty() == false) : "Unexpected empty PartitionSet for mispredicated txn " + ts; // Print Misprediction Debug if (hstore_conf.site.exec_mispredict_crash) { // Use a lock so that only dump out the first txn that fails synchronized (PartitionExecutor.class) { LOG.warn("\n" + EstimatorUtil.mispredictDebug(ts, planner, batchStmts, batchParams)); LOG.fatal(String.format("Crashing because site.exec_mispredict_crash is true [txn=%s]", ts)); this.crash(ex); } // SYNCH } else if (debug.val) { if (trace.val) LOG.warn("\n" + EstimatorUtil.mispredictDebug(ts, planner, batchStmts, batchParams)); LOG.debug(ts + " - Aborting and restarting mispredicted txn."); } throw ex; } // Keep track of the number of times that we've executed each query for this transaction int stmtCounters[] = this.tmp_stmtCounters.getArray(batchSize); for (int i = 0; i < batchSize; i++) { stmtCounters[i] = ts.updateStatementCounter(batchStmts[i].getStatement()); } // FOR if (ts.hasPrefetchQueries()) { PartitionSet stmtPartitions[] = plan.getStatementPartitions(); PrefetchState prefetchState = ts.getPrefetchState(); QueryTracker queryTracker = prefetchState.getExecQueryTracker(); assert(prefetchState != null); for (int i = 0; i < batchSize; i++) { // We always have to update the query tracker regardless of whether // the query was prefetched or not. This is so that we can ensure // that we execute the queries in the right order. Statement stmt = batchStmts[i].getStatement(); stmtCounters[i] = queryTracker.addQuery(stmt, stmtPartitions[i], batchParams[i]); } // FOR // FIXME PrefetchQueryUtil.checkSQLStmtBatch(this, ts, plan, batchSize, batchStmts, batchParams); } // PREFETCH VoltTable results[] = null; // FAST-PATH: Single-partition + Local // If the BatchPlan only has WorkFragments that are for this partition, then // we can use the fast-path executeLocalPlan() method if (plan.isSingledPartitionedAndLocal()) { if (trace.val) LOG.trace(String.format("%s - Sending %s directly to the ExecutionEngine at partition %d", ts, plan.getClass().getSimpleName(), this.partitionId)); // If this the finalTask flag is set to true, and we're only executing queries at this // partition, then we need to notify the other partitions that we're done with them. if (hstore_conf.site.exec_early_prepare && finalTask == true && ts.isPredictSinglePartition() == false && ts.isSysProc() == false && ts.allowEarlyPrepare() == true) { tmp_fragmentsPerPartition.clearValues(); tmp_fragmentsPerPartition.put(this.partitionId, batchSize); DonePartitionsNotification notify = this.computeDonePartitions(ts, null, tmp_fragmentsPerPartition, finalTask); if (notify != null && notify.hasSitesToNotify()) this.notifyDonePartitions(ts, notify); } // Execute the queries right away. results = this.executeLocalPlan(ts, plan, batchParams); } // DISTRIBUTED EXECUTION // Otherwise, we need to generate WorkFragments and then send the messages out // to our remote partitions using the HStoreCoordinator else { ExecutionState execState = ts.getExecutionState(); execState.tmp_partitionFragments.clear(); plan.getWorkFragmentsBuilders(ts.getTransactionId(), stmtCounters, execState.tmp_partitionFragments); if (debug.val) LOG.debug(String.format("%s - Using dispatchWorkFragments to execute %d %ss", ts, execState.tmp_partitionFragments.size(), WorkFragment.class.getSimpleName())); if (needs_profiling) { int remote_cnt = 0; PartitionSet stmtPartitions[] = plan.getStatementPartitions(); for (int i = 0; i < batchSize; i++) { if (stmtPartitions[i].get() != ts.getBasePartition()) remote_cnt++; if (trace.val) LOG.trace(String.format("%s - [%02d] stmt:%s / partitions:%s", ts, i, batchStmts[i].getStatement().getName(), stmtPartitions[i])); } // FOR if (trace.val) LOG.trace(String.format("%s - Remote Queries Count = %d", ts, remote_cnt)); ts.profiler.addRemoteQuery(remote_cnt); } // Block until we get all of our responses. results = this.dispatchWorkFragments(ts, batchSize, batchParams, execState.tmp_partitionFragments, finalTask); } if (debug.val && results == null) LOG.warn("Got back a null results array for " + ts + "\n" + plan.toString()); if (needs_profiling) ts.profiler.startExecJava(); return (results); } /** * * @param fresponse */ protected WorkResult buildWorkResult(AbstractTransaction ts, DependencySet result, Status status, SerializableException error) { WorkResult.Builder builder = WorkResult.newBuilder(); // Partition Id builder.setPartitionId(this.partitionId); // Status builder.setStatus(status); // SerializableException if (error != null) { int size = error.getSerializedSize(); BBContainer bc = this.buffer_pool.acquire(size); try { error.serializeToBuffer(bc.b); } catch (IOException ex) { String msg = "Failed to serialize error for " + ts; throw new ServerFaultException(msg, ex); } bc.b.rewind(); builder.setError(ByteString.copyFrom(bc.b)); bc.discard(); } // Push dependencies back to the remote partition that needs it if (status == Status.OK) { for (int i = 0, cnt = result.size(); i < cnt; i++) { builder.addDepId(result.depIds[i]); this.fs.clear(); try { result.dependencies[i].writeExternal(this.fs); ByteString bs = ByteString.copyFrom(this.fs.getBBContainer().b); builder.addDepData(bs); } catch (Exception ex) { throw new ServerFaultException(String.format("Failed to serialize output dependency %d for %s", result.depIds[i], ts), ex); } if (trace.val) LOG.trace(String.format("%s - Serialized Output Dependency %d\n%s", ts, result.depIds[i], result.dependencies[i])); } // FOR this.fs.getBBContainer().discard(); } return (builder.build()); } /** * This method is invoked when the PartitionExecutor wants to execute work at a remote HStoreSite. * The doneNotificationsPerSite is an array where each offset (based on SiteId) may contain * a PartitionSet of the partitions that this txn is finished with at the remote node and will * not be executing any work in the current batch. * @param ts * @param fragmentBuilders * @param parameterSets * @param doneNotificationsPerSite */ private void requestWork(LocalTransaction ts, Collection<WorkFragment.Builder> fragmentBuilders, List<ByteString> parameterSets, DonePartitionsNotification notify) { assert(fragmentBuilders.isEmpty() == false); assert(ts != null); Long txn_id = ts.getTransactionId(); if (trace.val) LOG.trace(String.format("%s - Wrapping %d %s into a %s", ts, fragmentBuilders.size(), WorkFragment.class.getSimpleName(), TransactionWorkRequest.class.getSimpleName())); // If our transaction was originally designated as a single-partitioned, then we need to make // sure that we don't touch any partition other than our local one. If we do, then we need abort // it and restart it as multi-partitioned boolean need_restart = false; boolean predict_singlepartition = ts.isPredictSinglePartition(); PartitionSet done_partitions = ts.getDonePartitions(); Estimate t_estimate = ts.getLastEstimate(); // Now we can go back through and start running all of the WorkFragments that were not blocked // waiting for an input dependency. Note that we pack all the fragments into a single // CoordinatorFragment rather than sending each WorkFragment in its own message for (WorkFragment.Builder fragmentBuilder : fragmentBuilders) { assert(this.depTracker.isBlocked(ts, fragmentBuilder) == false); final int target_partition = fragmentBuilder.getPartitionId(); final int target_site = catalogContext.getSiteIdForPartitionId(target_partition); final PartitionSet doneNotifications = (notify != null ? notify.getNotifications(target_site) : null); // Make sure that this isn't a single-partition txn trying to access a remote partition if (predict_singlepartition && target_partition != this.partitionId) { if (debug.val) LOG.debug(String.format("%s - Txn on partition %d is suppose to be " + "single-partitioned, but it wants to execute a fragment on partition %d", ts, this.partitionId, target_partition)); need_restart = true; break; } // Make sure that this txn isn't trying to access a partition that we said we were // done with earlier else if (done_partitions.contains(target_partition)) { if (debug.val) LOG.warn(String.format("%s on partition %d was marked as done on partition %d " + "but now it wants to go back for more!", ts, this.partitionId, target_partition)); need_restart = true; break; } // Make sure we at least have something to do! else if (fragmentBuilder.getFragmentIdCount() == 0) { LOG.warn(String.format("%s - Trying to send a WorkFragment request with 0 fragments", ts)); continue; } // Add in the specexec query estimate at this partition if needed if (hstore_conf.site.specexec_enable && t_estimate != null && t_estimate.hasQueryEstimate(target_partition)) { List<CountedStatement> queryEst = t_estimate.getQueryEstimate(target_partition); // if (debug.val) if (target_partition == 0) if (debug.val) LOG.debug(String.format("%s - Sending remote query estimate to partition %d " + "containing %d queries\n%s", ts, target_partition, queryEst.size(), StringUtil.join("\n", queryEst))); assert(queryEst.isEmpty() == false); QueryEstimate.Builder estBuilder = QueryEstimate.newBuilder(); for (CountedStatement countedStmt : queryEst) { estBuilder.addStmtIds(countedStmt.statement.getId()); estBuilder.addStmtCounters(countedStmt.counter); } // FOR fragmentBuilder.setFutureStatements(estBuilder); } // Get the TransactionWorkRequest.Builder for the remote HStoreSite // We will use this store our serialized input dependencies TransactionWorkRequestBuilder requestBuilder = tmp_transactionRequestBuilders[target_site]; if (requestBuilder == null) { requestBuilder = tmp_transactionRequestBuilders[target_site] = new TransactionWorkRequestBuilder(); } TransactionWorkRequest.Builder builder = requestBuilder.getBuilder(ts, doneNotifications); // Also keep track of what Statements they are executing so that we know // we need to send over the wire to them. requestBuilder.addParamIndexes(fragmentBuilder.getParamIndexList()); // Input Dependencies if (fragmentBuilder.getNeedsInput()) { if (debug.val) LOG.debug(String.format("%s - Retrieving input dependencies at partition %d", ts, this.partitionId)); tmp_removeDependenciesMap.clear(); for (int i = 0, cnt = fragmentBuilder.getInputDepIdCount(); i < cnt; i++) { this.getFragmentInputs(ts, fragmentBuilder.getInputDepId(i), tmp_removeDependenciesMap); } // FOR for (Entry<Integer, List<VoltTable>> e : tmp_removeDependenciesMap.entrySet()) { if (requestBuilder.hasInputDependencyId(e.getKey())) continue; if (debug.val) LOG.debug(String.format("%s - Attaching %d input dependencies to be sent to %s", ts, e.getValue().size(), HStoreThreadManager.formatSiteName(target_site))); for (VoltTable vt : e.getValue()) { this.fs.clear(); try { this.fs.writeObject(vt); builder.addAttachedDepId(e.getKey().intValue()); builder.addAttachedData(ByteString.copyFrom(this.fs.getBBContainer().b)); } catch (Exception ex) { String msg = String.format("Failed to serialize input dependency %d for %s", e.getKey(), ts); throw new ServerFaultException(msg, ts.getTransactionId()); } if (debug.val) LOG.debug(String.format("%s - Storing %d rows for InputDependency %d to send " + "to partition %d [bytes=%d]", ts, vt.getRowCount(), e.getKey(), fragmentBuilder.getPartitionId(), CollectionUtil.last(builder.getAttachedDataList()).size())); } // FOR requestBuilder.addInputDependencyId(e.getKey()); } // FOR this.fs.getBBContainer().discard(); } builder.addFragments(fragmentBuilder); } // FOR (tasks) // Bad mojo! We need to throw a MispredictionException so that the VoltProcedure // will catch it and we can propagate the error message all the way back to the HStoreSite if (need_restart) { if (trace.val) LOG.trace(String.format("Aborting %s because it was mispredicted", ts)); // This is kind of screwy because we don't actually want to send the touched partitions // histogram because VoltProcedure will just do it for us... throw new MispredictionException(txn_id, null); } // Stick on the ParameterSets that each site needs into the TransactionWorkRequest for (int target_site = 0; target_site < tmp_transactionRequestBuilders.length; target_site++) { TransactionWorkRequestBuilder builder = tmp_transactionRequestBuilders[target_site]; if (builder == null || builder.isDirty() == false) { continue; } assert(builder != null); builder.addParameterSets(parameterSets); // Bombs away! this.hstore_coordinator.transactionWork(ts, target_site, builder.build(), this.request_work_callback); if (debug.val) LOG.debug(String.format("%s - Sent Work request to remote site %s", ts, HStoreThreadManager.formatSiteName(target_site))); } // FOR } /** * Figure out what partitions this transaction is done with. This will only return * a PartitionSet of what partitions we think we're done with. * For each partition that we idenitfy that the txn is done with, we will check to see * whether the txn is going to execute a query at its site in this batch. If it's not, * then we will notify that HStoreSite through the HStoreCoordinator. * If the partition that it doesn't need anymore is local (i.e., it's at the same * HStoreSite that we're at right now), then we'll just pass them a quick message * to let them know that they can prepare the txn. * @param ts * @param estimate * @param fragmentsPerPartition A histogram of the number of PlanFragments the * txn will execute in this batch at each partition. * @param finalTask Whether the txn has marked this as the last batch that they will ever execute * @return A notification object that can be used to notify partitions that this txn is done with them. */ private DonePartitionsNotification computeDonePartitions(final LocalTransaction ts, final Estimate estimate, final FastIntHistogram fragmentsPerPartition, final boolean finalTask) { final PartitionSet touchedPartitions = ts.getPredictTouchedPartitions(); final PartitionSet donePartitions = ts.getDonePartitions(); // Compute the partitions that the txn will be finished with after this batch PartitionSet estDonePartitions = null; // If the finalTask flag is set to true, then the new done partitions // is every partition that this txn has locked if (finalTask) { estDonePartitions = touchedPartitions; } // Otherwise, we'll rely on the transaction's current estimate to figure it out. else { if (estimate == null || estimate.isValid() == false) { if (debug.val) LOG.debug(String.format("%s - Unable to compute new done partitions because there " + "is no valid estimate for the txn", ts, estimate.getClass().getSimpleName())); return (null); } estDonePartitions = estimate.getDonePartitions(this.thresholds); if (estDonePartitions == null || estDonePartitions.isEmpty()) { if (debug.val) LOG.debug(String.format("%s - There are no new done partitions identified by %s", ts, estimate.getClass().getSimpleName())); return (null); } } assert(estDonePartitions != null) : "Null done partitions for " + ts; assert(estDonePartitions.isEmpty() == false) : "Empty done partitions for " + ts; if (debug.val) LOG.debug(String.format("%s - New estimated done partitions %s%s", ts, estDonePartitions, (trace.val ? "\n"+estimate : ""))); // Note that we can actually be done with ourself, if this txn is only going to execute queries // at remote partitions. But we can't actually execute anything because this partition's only // execution thread is going to be blocked. So we always do this so that we're not sending a // useless message estDonePartitions.remove(this.partitionId); // Make sure that we only tell partitions that we actually touched, otherwise they will // be stuck waiting for a finish request that will never come! DonePartitionsNotification notify = new DonePartitionsNotification(); for (int partition : estDonePartitions.values()) { // Only mark the txn done at this partition if the Estimate says we were done // with it after executing this batch and it's a partition that we've locked. if (donePartitions.contains(partition) || touchedPartitions.contains(partition) == false) continue; if (trace.val) LOG.trace(String.format("%s - Marking partition %d as done for txn", ts, partition)); notify.donePartitions.add(partition); if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.markEarly2PCPartition(partition); // Check whether we're executing a query at this partition in this batch. // If we're not, then we need to check whether we can piggyback the "done" message // in another WorkFragment going to that partition or whether we have to // send a separate TransactionPrepareRequest if (fragmentsPerPartition.get(partition, 0) == 0) { // We need to let them know that the party is over! if (hstore_site.isLocalPartition(partition)) { // if (debug.val) LOG.info(String.format("%s - Notifying local partition %d that txn is finished it", ts, partition)); hstore_site.getPartitionExecutor(partition).queuePrepare(ts); } // Check whether we can piggyback on another WorkFragment that is going to // the same site else { Site remoteSite = catalogContext.getSiteForPartition(partition); boolean found = false; for (Partition remotePartition : remoteSite.getPartitions().values()) { if (fragmentsPerPartition.get(remotePartition.getId(), 0) != 0) { found = true; break; } } // FOR notify.addSiteNotification(remoteSite, partition, (found == false)); } } } // FOR return (notify); } /** * Send asynchronous notification messages to any remote site to tell them that we * are done with partitions that they have. * @param ts * @param notify */ private void notifyDonePartitions(LocalTransaction ts, DonePartitionsNotification notify) { // BLAST OUT NOTIFICATIONS! for (int remoteSiteId : notify._sitesToNotify) { assert(notify.notificationsPerSite[remoteSiteId] != null); if (debug.val) LOG.info(String.format("%s - Notifying %s that txn is finished with partitions %s", ts, HStoreThreadManager.formatSiteName(remoteSiteId), notify.notificationsPerSite[remoteSiteId])); hstore_coordinator.transactionPrepare(ts, ts.getPrepareCallback(), notify.notificationsPerSite[remoteSiteId]); // Make sure that we remove the PartitionSet for this site so that we don't // try to send the notifications again. notify.notificationsPerSite[remoteSiteId] = null; } // FOR } /** * Execute the given tasks and then block the current thread waiting for the list of dependency_ids to come * back from whatever it was we were suppose to do... * This is the slowest way to execute a bunch of WorkFragments and therefore should only be invoked * for batches that need to access non-local partitions * @param ts The txn handle that is executing this query batch * @param batchSize The number of SQLStmts that the txn queued up using voltQueueSQL() * @param batchParams The input parameters for the SQLStmts * @param allFragmentBuilders * @param finalTask Whether the txn has marked this as the last batch that they will ever execute * @return */ public VoltTable[] dispatchWorkFragments(final LocalTransaction ts, final int batchSize, final ParameterSet batchParams[], final Collection<WorkFragment.Builder> allFragmentBuilders, boolean finalTask) { assert(allFragmentBuilders.isEmpty() == false) : "Unexpected empty WorkFragment list for " + ts; final boolean needs_profiling = (hstore_conf.site.txn_profiling && ts.profiler != null); // *********************************** DEBUG *********************************** if (debug.val) { LOG.debug(String.format("%s - Preparing to dispatch %d messages and wait for the results [needsProfiling=%s]", ts, allFragmentBuilders.size(), needs_profiling)); if (trace.val) { StringBuilder sb = new StringBuilder(); sb.append(ts + " - WorkFragments:\n"); for (WorkFragment.Builder fragment : allFragmentBuilders) { sb.append(StringBoxUtil.box(fragment.toString()) + "\n"); } // FOR sb.append(ts + " - ParameterSets:\n"); for (ParameterSet ps : batchParams) { sb.append(ps + "\n"); } // FOR LOG.trace(sb); } } // *********************************** DEBUG *********************************** // OPTIONAL: Check to make sure that this request is valid // (1) At least one of the WorkFragments needs to be executed on a remote partition // (2) All of the PlanFragments ids in the WorkFragments match this txn's Procedure if (hstore_conf.site.exec_validate_work && ts.isSysProc() == false) { LOG.warn(String.format("%s - Checking whether all of the WorkFragments are valid", ts)); boolean has_remote = false; for (WorkFragment.Builder frag : allFragmentBuilders) { if (frag.getPartitionId() != this.partitionId) { has_remote = true; } for (int frag_id : frag.getFragmentIdList()) { PlanFragment catalog_frag = CatalogUtil.getPlanFragment(catalogContext.database, frag_id); Statement catalog_stmt = catalog_frag.getParent(); assert(catalog_stmt != null); Procedure catalog_proc = catalog_stmt.getParent(); if (catalog_proc.equals(ts.getProcedure()) == false) { LOG.warn(ts.debug() + "\n" + allFragmentBuilders + "\n---- INVALID ----\n" + frag); String msg = String.format("%s - Unexpected %s", ts, catalog_frag.fullName()); throw new ServerFaultException(msg, ts.getTransactionId()); } } } // FOR if (has_remote == false) { LOG.warn(ts.debug() + "\n" + allFragmentBuilders); String msg = ts + "Trying to execute all local single-partition queries using the slow-path!"; throw new ServerFaultException(msg, ts.getTransactionId()); } } boolean first = true; boolean serializedParams = false; CountDownLatch latch = null; boolean all_local = true; boolean is_localSite; boolean is_localPartition; boolean is_localReadOnly = true; int num_localPartition = 0; int num_localSite = 0; int num_remote = 0; int num_skipped = 0; int total = 0; Collection<WorkFragment.Builder> fragmentBuilders = allFragmentBuilders; // Make sure our txn is in our DependencyTracker if (trace.val) LOG.trace(String.format("%s - Added transaction to %s", ts, this.depTracker.getClass().getSimpleName())); this.depTracker.addTransaction(ts); // Count the number of fragments that we're going to send to each partition and // figure out whether the txn will always be read-only at this partition tmp_fragmentsPerPartition.clearValues(); for (WorkFragment.Builder fragmentBuilder : allFragmentBuilders) { int partition = fragmentBuilder.getPartitionId(); tmp_fragmentsPerPartition.put(partition); if (this.partitionId == partition && fragmentBuilder.getReadOnly() == false) { is_localReadOnly = false; } } // FOR long undoToken = this.calculateNextUndoToken(ts, is_localReadOnly); ts.initFirstRound(undoToken, batchSize); final boolean predict_singlePartition = ts.isPredictSinglePartition(); // Calculate whether we are finished with partitions now final Estimate lastEstimate = ts.getLastEstimate(); DonePartitionsNotification notify = null; if (hstore_conf.site.exec_early_prepare && ts.isSysProc() == false && ts.allowEarlyPrepare()) { notify = this.computeDonePartitions(ts, lastEstimate, tmp_fragmentsPerPartition, finalTask); if (notify != null && notify.hasSitesToNotify()) this.notifyDonePartitions(ts, notify); } // Attach the ParameterSets to our transaction handle so that anybody on this HStoreSite // can access them directly without needing to deserialize them from the WorkFragments ts.attachParameterSets(batchParams); // Now if we have some work sent out to other partitions, we need to wait until they come back // In the first part, we wait until all of our blocked WorkFragments become unblocked final BlockingDeque<Collection<WorkFragment.Builder>> queue = this.depTracker.getUnblockedWorkFragmentsQueue(ts); // Run through this loop if: // (1) We have no pending errors // (2) This is our first time in the loop (first == true) // (3) If we know that there are still messages being blocked // (4) If we know that there are still unblocked messages that we need to process // (5) The latch for this round is still greater than zero while (ts.hasPendingError() == false && (first == true || this.depTracker.stillHasWorkFragments(ts) || (latch != null && latch.getCount() > 0))) { if (trace.val) LOG.trace(String.format("%s - %s loop [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, ClassUtil.getCurrentMethodName(), first, this.depTracker.stillHasWorkFragments(ts), queue.size(), latch)); // If this is the not first time through the loop, then poll the queue // to get our list of fragments if (first == false) { all_local = true; is_localSite = false; is_localPartition = false; num_localPartition = 0; num_localSite = 0; num_remote = 0; num_skipped = 0; total = 0; if (trace.val) LOG.trace(String.format("%s - Waiting for unblocked tasks on partition %d", ts, this.partitionId)); fragmentBuilders = queue.poll(); // NON-BLOCKING // If we didn't get back a list of fragments here, then we will spin through // and invoke utilityWork() to try to do something useful until what we need shows up if (needs_profiling) ts.profiler.startExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.start(); try { while (fragmentBuilders == null) { // If there is more work that we could do, then we'll just poll the queue // without waiting so that we can go back and execute it again if we have // more time. if (this.utilityWork()) { fragmentBuilders = queue.poll(); } // Otherwise we will wait a little so that we don't spin the CPU else { fragmentBuilders = queue.poll(WORK_QUEUE_POLL_TIME, TimeUnit.MILLISECONDS); } } // WHILE } catch (InterruptedException ex) { if (this.hstore_site.isShuttingDown() == false) { LOG.error(String.format("%s - We were interrupted while waiting for blocked tasks", ts), ex); } return (null); } finally { if (needs_profiling) ts.profiler.stopExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.stopIfStarted(); } } assert(fragmentBuilders != null); // If the list to fragments unblock is empty, then we // know that we have dispatched all of the WorkFragments for the // transaction's current SQLStmt batch. That means we can just wait // until all the results return to us. if (fragmentBuilders.isEmpty()) { if (trace.val) LOG.trace(String.format("%s - Got an empty list of WorkFragments at partition %d. " + "Blocking until dependencies arrive", ts, this.partitionId)); break; } this.tmp_localWorkFragmentBuilders.clear(); if (predict_singlePartition == false) { this.tmp_remoteFragmentBuilders.clear(); this.tmp_localSiteFragmentBuilders.clear(); } // ------------------------------- // FAST PATH: Assume everything is local // ------------------------------- if (predict_singlePartition) { for (WorkFragment.Builder fragmentBuilder : fragmentBuilders) { if (first == false || this.depTracker.addWorkFragment(ts, fragmentBuilder, batchParams)) { this.tmp_localWorkFragmentBuilders.add(fragmentBuilder); total++; num_localPartition++; } } // FOR // We have to tell the transaction handle to start the round before we send off the // WorkFragments for execution, since they might start executing locally! if (first) { ts.startRound(this.partitionId); latch = this.depTracker.getDependencyLatch(ts); } // Execute all of our WorkFragments quickly at our local ExecutionEngine for (WorkFragment.Builder fragmentBuilder : this.tmp_localWorkFragmentBuilders) { if (debug.val) LOG.debug(String.format("%s - Got unblocked %s to execute locally", ts, fragmentBuilder.getClass().getSimpleName())); assert(fragmentBuilder.getPartitionId() == this.partitionId) : String.format("Trying to process %s for %s on partition %d but it should have been " + "sent to partition %d [singlePartition=%s]\n%s", fragmentBuilder.getClass().getSimpleName(), ts, this.partitionId, fragmentBuilder.getPartitionId(), predict_singlePartition, fragmentBuilder); WorkFragment fragment = fragmentBuilder.build(); this.processWorkFragment(ts, fragment, batchParams); } // FOR } // ------------------------------- // SLOW PATH: Mixed local and remote messages // ------------------------------- else { // Look at each task and figure out whether it needs to be executed at a remote // HStoreSite or whether we can execute it at one of our local PartitionExecutors. for (WorkFragment.Builder fragmentBuilder : fragmentBuilders) { int partition = fragmentBuilder.getPartitionId(); is_localSite = hstore_site.isLocalPartition(partition); is_localPartition = (partition == this.partitionId); all_local = all_local && is_localPartition; // If this is the last WorkFragment that we're going to send to this partition for // this batch, then we will want to check whether we know that this is the last // time this txn will ever need to go to that txn. If so, then we'll want to if (notify != null && notify.donePartitions.contains(partition) && tmp_fragmentsPerPartition.dec(partition) == 0) { if (debug.val) LOG.debug(String.format("%s - Setting last fragment flag in %s for partition %d", ts, WorkFragment.class.getSimpleName(), partition)); fragmentBuilder.setLastFragment(true); } if (first == false || this.depTracker.addWorkFragment(ts, fragmentBuilder, batchParams)) { total++; // At this point we know that all the WorkFragment has been registered // in the LocalTransaction, so then it's safe for us to look to see // whether we already have a prefetched result that we need // if (prefetch && is_localPartition == false) { // boolean skip_queue = true; // for (int i = 0, cnt = fragmentBuilder.getFragmentIdCount(); i < cnt; i++) { // int fragId = fragmentBuilder.getFragmentId(i); // int paramIdx = fragmentBuilder.getParamIndex(i); // // VoltTable vt = this.queryCache.getResult(ts.getTransactionId(), // fragId, // partition, // parameters[paramIdx]); // if (vt != null) { // if (trace.val) // LOG.trace(String.format("%s - Storing cached result from partition %d for fragment %d", // ts, partition, fragId)); // this.depTracker.addResult(ts, partition, fragmentBuilder.getOutputDepId(i), vt); // } else { // skip_queue = false; // } // } // FOR // // If we were able to get cached results for all of the fragmentIds in // // this WorkFragment, then there is no need for us to send the message // // So we'll just skip queuing it up! How nice! // if (skip_queue) { // if (debug.val) // LOG.debug(String.format("%s - Using prefetch result for all fragments from partition %d", // ts, partition)); // num_skipped++; // continue; // } // } // Otherwise add it to our list of WorkFragments that we want // queue up right now if (is_localPartition) { is_localReadOnly = (is_localReadOnly && fragmentBuilder.getReadOnly()); this.tmp_localWorkFragmentBuilders.add(fragmentBuilder); num_localPartition++; } else if (is_localSite) { this.tmp_localSiteFragmentBuilders.add(fragmentBuilder); num_localSite++; } else { this.tmp_remoteFragmentBuilders.add(fragmentBuilder); num_remote++; } } } // FOR assert(total == (num_remote + num_localSite + num_localPartition + num_skipped)) : String.format("Total:%d / Remote:%d / LocalSite:%d / LocalPartition:%d / Skipped:%d", total, num_remote, num_localSite, num_localPartition, num_skipped); // We have to tell the txn to start the round before we send off the // WorkFragments for execution, since they might start executing locally! if (first) { ts.startRound(this.partitionId); latch = this.depTracker.getDependencyLatch(ts); } // Now request the fragments that aren't local // We want to push these out as soon as possible if (num_remote > 0) { // We only need to serialize the ParameterSets once if (serializedParams == false) { if (needs_profiling) ts.profiler.startSerialization(); tmp_serializedParams.clear(); for (int i = 0; i < batchParams.length; i++) { if (batchParams[i] == null) { tmp_serializedParams.add(ByteString.EMPTY); } else { this.fs.clear(); try { batchParams[i].writeExternal(this.fs); ByteString bs = ByteString.copyFrom(this.fs.getBBContainer().b); tmp_serializedParams.add(bs); } catch (Exception ex) { String msg = "Failed to serialize ParameterSet " + i + " for " + ts; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } } } // FOR if (needs_profiling) ts.profiler.stopSerialization(); } if (trace.val) LOG.trace(String.format("%s - Requesting %d %s to be executed on remote partitions " + "[doneNotifications=%s]", ts, WorkFragment.class.getSimpleName(), num_remote, notify!=null)); this.requestWork(ts, tmp_remoteFragmentBuilders, tmp_serializedParams, notify); if (needs_profiling) ts.profiler.markRemoteQuery(); } // Then dispatch the task that are needed at the same HStoreSite but // at a different partition than this one if (num_localSite > 0) { if (trace.val) LOG.trace(String.format("%s - Executing %d WorkFragments on local site's partitions", ts, num_localSite)); for (WorkFragment.Builder builder : this.tmp_localSiteFragmentBuilders) { PartitionExecutor other = hstore_site.getPartitionExecutor(builder.getPartitionId()); other.queueWork(ts, builder.build()); } // FOR if (needs_profiling) ts.profiler.markRemoteQuery(); } // Then execute all of the tasks need to access the partitions at this HStoreSite // We'll dispatch the remote-partition-local-site fragments first because they're going // to need to get queued up by at the other PartitionExecutors if (num_localPartition > 0) { if (trace.val) LOG.trace(String.format("%s - Executing %d WorkFragments on local partition", ts, num_localPartition)); for (WorkFragment.Builder fragmentBuilder : this.tmp_localWorkFragmentBuilders) { this.processWorkFragment(ts, fragmentBuilder.build(), batchParams); } // FOR } } if (trace.val) LOG.trace(String.format("%s - Dispatched %d WorkFragments " + "[remoteSite=%d, localSite=%d, localPartition=%d]", ts, total, num_remote, num_localSite, num_localPartition)); first = false; } // WHILE this.fs.getBBContainer().discard(); if (trace.val) LOG.trace(String.format("%s - BREAK OUT [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, first, this.depTracker.stillHasWorkFragments(ts), latch)); // assert(ts.stillHasWorkFragments() == false) : // String.format("Trying to block %s before all of its WorkFragments have been dispatched!\n%s\n%s", // ts, // StringUtil.join("** ", "\n", tempDebug), // this.getVoltProcedure(ts.getProcedureName()).getLastBatchPlan()); // Now that we know all of our WorkFragments have been dispatched, we can then // wait for all of the results to come back in. if (latch == null) latch = this.depTracker.getDependencyLatch(ts); assert(latch != null) : String.format("Unexpected null dependency latch for " + ts); if (latch.getCount() > 0) { if (debug.val) { LOG.debug(String.format("%s - All blocked messages dispatched. Waiting for %d dependencies", ts, latch.getCount())); if (trace.val) LOG.trace(ts.toString()); } boolean timeout = false; long startTime = EstTime.currentTimeMillis(); if (needs_profiling) ts.profiler.startExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.start(); try { while (latch.getCount() > 0 && ts.hasPendingError() == false) { if (this.utilityWork() == false) { timeout = latch.await(WORK_QUEUE_POLL_TIME, TimeUnit.MILLISECONDS); if (timeout == false) break; } if ((EstTime.currentTimeMillis() - startTime) > hstore_conf.site.exec_response_timeout) { timeout = true; break; } } // WHILE } catch (InterruptedException ex) { if (this.hstore_site.isShuttingDown() == false) { LOG.error(String.format("%s - We were interrupted while waiting for results", ts), ex); } timeout = true; } catch (Throwable ex) { String msg = String.format("Fatal error for %s while waiting for results", ts); throw new ServerFaultException(msg, ex); } finally { if (needs_profiling) ts.profiler.stopExecDtxnWork(); if (hstore_conf.site.exec_profiling) this.profiler.sp1_time.stopIfStarted(); } if (timeout && this.isShuttingDown() == false) { LOG.warn(String.format("Still waiting for responses for %s after %d ms [latch=%d]\n%s", ts, hstore_conf.site.exec_response_timeout, latch.getCount(), ts.debug())); LOG.warn("Procedure Parameters:\n" + ts.getProcedureParameters()); hstore_conf.site.exec_profiling = true; LOG.warn(hstore_site.statusSnapshot()); String msg = "The query responses for " + ts + " never arrived!"; throw new ServerFaultException(msg, ts.getTransactionId()); } } // Update done partitions if (notify != null && notify.donePartitions.isEmpty() == false) { if (debug.val) LOG.debug(String.format("%s - Marking new done partitions %s", ts, notify.donePartitions)); ts.getDonePartitions().addAll(notify.donePartitions); } // IMPORTANT: Check whether the fragments failed somewhere and we got a response with an error // We will rethrow this so that it pops the stack all the way back to VoltProcedure.call() // where we can generate a message to the client if (ts.hasPendingError()) { if (debug.val) LOG.warn(String.format("%s was hit with a %s", ts, ts.getPendingError().getClass().getSimpleName())); throw ts.getPendingError(); } // IMPORTANT: Don't try to check whether we got back the right number of tables because the batch // may have hit an error and we didn't execute all of them. VoltTable results[] = null; try { results = this.depTracker.getResults(ts); } catch (AssertionError ex) { LOG.error("Failed to get final results for batch\n" + ts.debug()); throw ex; } ts.finishRound(this.partitionId); if (debug.val) { if (trace.val) LOG.trace(ts + " is now running and looking for love in all the wrong places..."); LOG.debug(String.format("%s - Returning back %d tables to VoltProcedure", ts, results.length)); } return (results); } // --------------------------------------------------------------- // COMMIT + ABORT METHODS // --------------------------------------------------------------- /** * Queue a speculatively executed transaction to send its ClientResponseImpl message */ private void blockClientResponse(LocalTransaction ts, ClientResponseImpl cresponse) { assert(ts.isPredictSinglePartition() == true) : String.format("Specutatively executed multi-partition %s [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); assert(ts.isSpeculative() == true) : String.format("Blocking ClientResponse for non-specutative %s [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); assert(cresponse.getStatus() != Status.ABORT_MISPREDICT) : String.format("Trying to block ClientResponse for mispredicted %s [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); assert(this.currentExecMode != ExecutionMode.COMMIT_ALL) : String.format("Blocking ClientResponse for %s when in non-specutative mode [mode=%s, status=%s]", ts, this.currentExecMode, cresponse.getStatus()); this.specExecBlocked.push(Pair.of(ts, cresponse)); this.specExecModified = this.specExecModified && ts.isExecReadOnly(this.partitionId); if (debug.val) LOG.debug(String.format("%s - Blocking %s ClientResponse [partitions=%s, blockQueue=%d]", ts, cresponse.getStatus(), ts.getTouchedPartitions().values(), this.specExecBlocked.size())); } /** * For the given transaction's ClientResponse, figure out whether we can send it back to the client * right now or whether we need to initiate two-phase commit. * @param ts * @param cresponse */ protected void processClientResponse(LocalTransaction ts, ClientResponseImpl cresponse) { // IMPORTANT: If we executed this locally and only touched our partition, then we need to commit/abort right here // 2010-11-14: The reason why we can do this is because we will just ignore the commit // message when it shows from the Dtxn.Coordinator. We should probably double check with Evan on this... Status status = cresponse.getStatus(); if (debug.val) { LOG.debug(String.format("%s - Processing ClientResponse at partition %d " + "[status=%s, singlePartition=%s, local=%s, clientHandle=%d]", ts, this.partitionId, status, ts.isPredictSinglePartition(), ts.isExecLocal(this.partitionId), cresponse.getClientHandle())); if (trace.val) { LOG.trace(ts + " Touched Partitions: " + ts.getTouchedPartitions().values()); if (ts.isPredictSinglePartition() == false) LOG.trace(ts + " Done Partitions: " + ts.getDonePartitions()); } } // ------------------------------- // ALL: Transactions that need to be internally restarted // ------------------------------- if (status == Status.ABORT_MISPREDICT || status == Status.ABORT_SPECULATIVE || status == Status.ABORT_EVICTEDACCESS) { // If the txn was mispredicted, then we will pass the information over to the // HStoreSite so that it can re-execute the transaction. We want to do this // first so that the txn gets re-executed as soon as possible... if (debug.val) LOG.debug(String.format("%s - Restarting because transaction was hit with %s", ts, (ts.getPendingError() != null ? ts.getPendingError().getClass().getSimpleName() : ""))); // We don't want to delete the transaction here because whoever is going to requeue it for // us will need to know what partitions that the transaction touched when it executed before if (ts.isPredictSinglePartition()) { this.finishTransaction(ts, status); this.hstore_site.transactionRequeue(ts, status); } // Send a message all the partitions involved that the party is over // and that they need to abort the transaction. We don't actually care when we get the // results back because we'll start working on new txns right away. // Note that when we call transactionFinish() right here this thread will then go on // to invoke HStoreSite.transactionFinish() for us. That means when it returns we will // have successfully aborted the txn at least at all of the local partitions at this site. else { if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPostFinish(); LocalFinishCallback finish_callback = ts.getFinishCallback(); finish_callback.init(ts, status); finish_callback.markForRequeue(); if (hstore_conf.site.exec_profiling) this.profiler.network_time.start(); this.hstore_coordinator.transactionFinish(ts, status, finish_callback); if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); } } // ------------------------------- // ALL: Single-Partition Transactions // ------------------------------- else if (ts.isPredictSinglePartition()) { // Commit or abort the transaction only if we haven't done it already // This can happen when we commit speculative txns out of order if (ts.isMarkedFinished(this.partitionId) == false) { this.finishTransaction(ts, status); } // We have to mark it as loggable to prevent the response // from getting sent back to the client if (hstore_conf.site.commandlog_enable) ts.markLogEnabled(); if (hstore_conf.site.exec_profiling) this.profiler.network_time.start(); this.hstore_site.responseSend(ts, cresponse); if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); this.hstore_site.queueDeleteTransaction(ts.getTransactionId(), status); } // ------------------------------- // COMMIT: Distributed Transaction // ------------------------------- else if (status == Status.OK) { // We need to set the new ExecutionMode before we invoke transactionPrepare // because the LocalTransaction handle might get cleaned up immediately ExecutionMode newMode = null; if (hstore_conf.site.specexec_enable) { newMode = (ts.isExecReadOnly(this.partitionId) ? ExecutionMode.COMMIT_READONLY : ExecutionMode.COMMIT_NONE); } else { newMode = ExecutionMode.DISABLED; } this.setExecutionMode(ts, newMode); // We have to send a prepare message to all of our remote HStoreSites // We want to make sure that we don't go back to ones that we've already told PartitionSet donePartitions = ts.getDonePartitions(); PartitionSet notifyPartitions = new PartitionSet(); for (int partition : ts.getPredictTouchedPartitions().values()) { if (donePartitions.contains(partition) == false) { notifyPartitions.add(partition); } } // FOR if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPostPrepare(); ts.setClientResponse(cresponse); if (hstore_conf.site.exec_profiling) { this.profiler.network_time.start(); this.profiler.sp3_local_time.start(); } LocalPrepareCallback callback = ts.getPrepareCallback(); callback.init(ts, notifyPartitions); this.hstore_coordinator.transactionPrepare(ts, callback, notifyPartitions); if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); } // ------------------------------- // ABORT: Distributed Transaction // ------------------------------- else { // Send back the result to the client right now, since there's no way // that we're magically going to be able to recover this and get them a result // This has to come before the network messages above because this will clean-up the // LocalTransaction state information this.hstore_site.responseSend(ts, cresponse); // Send a message all the partitions involved that the party is over // and that they need to abort the transaction. We don't actually care when we get the // results back because we'll start working on new txns right away. // Note that when we call transactionFinish() right here this thread will then go on // to invoke HStoreSite.transactionFinish() for us. That means when it returns we will // have successfully aborted the txn at least at all of the local partitions at this site. if (hstore_conf.site.txn_profiling && ts.profiler != null) ts.profiler.startPostFinish(); LocalFinishCallback callback = ts.getFinishCallback(); callback.init(ts, status); if (hstore_conf.site.exec_profiling) this.profiler.network_time.start(); try { this.hstore_coordinator.transactionFinish(ts, status, callback); } finally { if (hstore_conf.site.exec_profiling) this.profiler.network_time.stopIfStarted(); } } } /** * Enable speculative execution mode for this partition. The given transaction is * the one that we will need to wait to finish before we can release the ClientResponses * for any speculatively executed transactions. * @param txn_id * @return true if speculative execution was enabled at this partition */ private Status prepareTransaction(AbstractTransaction ts) { assert(ts != null) : "Unexpected null transaction handle at partition " + this.partitionId; assert(ts.isInitialized()) : String.format("Trying to prepare uninitialized transaction %s at partition %d", ts, this.partitionId); assert(ts.isMarkedFinished(this.partitionId) == false) : String.format("Trying to prepare %s again after it was already finished at partition %d", ts, this.partitionId); Status status = Status.OK; // Skip if we've already invoked prepared for this txn at this partition if (ts.isMarkedPrepared(this.partitionId) == false) { if (debug.val) LOG.debug(String.format("%s - Preparing to commit txn at partition %d [specBlocked=%d]", ts, this.partitionId, this.specExecBlocked.size())); ExecutionMode newMode = ExecutionMode.COMMIT_NONE; if (hstore_conf.site.exec_profiling && this.partitionId != ts.getBasePartition() && ts.needsFinish(this.partitionId)) { profiler.sp3_remote_time.start(); } if (hstore_conf.site.specexec_enable) { // Check to see if there were any conflicts with the dtxn and any of its speculative // txns at this partition. If there were, then we know that we can't commit the txn here. LocalTransaction spec_ts; for (Pair<LocalTransaction, ClientResponseImpl> pair : this.specExecBlocked) { spec_ts = pair.getFirst(); if (debug.val) LOG.debug(String.format("%s - Checking for conflicts with speculative %s at partition %d [%s]", ts, spec_ts, this.partitionId, this.specExecChecker.getClass().getSimpleName())); if (this.specExecChecker.hasConflictAfter(ts, spec_ts, this.partitionId)) { if (debug.val) LOG.debug(String.format("%s - Conflict found with speculative txn %s at partition %d", ts, spec_ts, this.partitionId)); status = Status.ABORT_RESTART; break; } } // FOR // Check whether the txn that we're waiting for is read-only. // If it is, then that means all read-only transactions can commit right away if (status == Status.OK && ts.isExecReadOnly(this.partitionId)) { if (debug.val) LOG.debug(String.format("%s - Txn is read-only at partition %d [readOnly=%s]", ts, this.partitionId, ts.isExecReadOnly(this.partitionId))); newMode = ExecutionMode.COMMIT_READONLY; } } if (this.currentDtxn != null) this.setExecutionMode(ts, newMode); } // It's ok if they try to prepare the txn twice. That might just mean that they never // got the acknowledgement back in time if they tried to send an early commit message. else if (debug.val) { LOG.debug(String.format("%s - Already marked 2PC:PREPARE at partition %d", ts, this.partitionId)); } // IMPORTANT // When we do an early 2PC-PREPARE, we won't have this callback ready // because we don't know what callback to use to send the acknowledgements // back over the network PartitionCountingCallback<AbstractTransaction> callback = ts.getPrepareCallback(); if (status == Status.OK) { if (callback.isInitialized()) { try { callback.run(this.partitionId); } catch (Throwable ex) { LOG.warn("Unexpected error for " + ts, ex); } } // But we will always mark ourselves as prepared at this partition ts.markPrepared(this.partitionId); } else { if (debug.val) LOG.debug(String.format("%s - Aborting txn from partition %d [%s]", ts, this.partitionId, status)); callback.abort(this.partitionId, status); } return (status); } /** * Internal call to abort/commit the transaction down in the execution engine * @param ts * @param commit */ private void finishTransaction(AbstractTransaction ts, Status status) { assert(ts != null) : "Unexpected null transaction handle at partition " + this.partitionId; assert(ts.isInitialized()) : String.format("Trying to commit uninitialized transaction %s at partition %d", ts, this.partitionId); assert(ts.isMarkedFinished(this.partitionId) == false) : String.format("Trying to commit %s twice at partition %d", ts, this.partitionId); // This can be null if they haven't submitted anything boolean commit = (status == Status.OK); long undoToken = (commit ? ts.getLastUndoToken(this.partitionId) : ts.getFirstUndoToken(this.partitionId)); // Only commit/abort this transaction if: // (2) We have the last undo token used by this transaction // (3) The transaction was executed with undo buffers // (4) The transaction actually submitted work to the EE // (5) The transaction modified data at this partition if (ts.needsFinish(this.partitionId) && undoToken != HStoreConstants.NULL_UNDO_LOGGING_TOKEN) { if (trace.val) LOG.trace(String.format("%s - Invoking EE to finish work for txn [%s / speculative=%s]", ts, status, ts.isSpeculative())); this.finishWorkEE(ts, undoToken, commit); } // We always need to do the following things regardless if we hit up the EE or not if (commit) this.lastCommittedTxnId = ts.getTransactionId(); if (trace.val) LOG.trace(String.format("%s - Telling queue manager that txn is finished at partition %d", ts, this.partitionId)); this.queueManager.lockQueueFinished(ts, status, this.partitionId); if (debug.val) LOG.debug(String.format("%s - Successfully %sed transaction at partition %d", ts, (commit ? "committ" : "abort"), this.partitionId)); ts.markFinished(this.partitionId); } /** * The real method that actually reaches down into the EE and commits/undos the changes * for the given token. * Unless you know what you're doing, you probably want to be calling finishTransaction() * instead of calling this directly. * @param ts * @param undoToken * @param commit */ private void finishWorkEE(AbstractTransaction ts, long undoToken, boolean commit) { assert(ts.isMarkedFinished(this.partitionId) == false) : String.format("Trying to commit %s twice at partition %d", ts, this.partitionId); // If the txn is completely read-only and they didn't use undo-logging, then // there is nothing that we need to do, except to check to make sure we aren't // trying to abort this txn if (undoToken == HStoreConstants.DISABLE_UNDO_LOGGING_TOKEN) { // SANITY CHECK: Make sure that they're not trying to undo a transaction that // modified the database but did not use undo logging if (ts.isExecReadOnly(this.partitionId) == false && commit == false) { String msg = String.format("TRYING TO ABORT TRANSACTION ON PARTITION %d WITHOUT UNDO LOGGING [undoToken=%d]", this.partitionId, undoToken); LOG.fatal(msg + "\n" + ts.debug()); this.crash(new ServerFaultException(msg, ts.getTransactionId())); } if (debug.val) LOG.debug(String.format("%s - undoToken == DISABLE_UNDO_LOGGING_TOKEN", ts)); } // COMMIT / ABORT else { boolean needs_profiling = false; if (hstore_conf.site.txn_profiling && ts.isExecLocal(this.partitionId) && ((LocalTransaction)ts).profiler != null) { needs_profiling = true; ((LocalTransaction)ts).profiler.startPostEE(); } assert(this.lastCommittedUndoToken != undoToken) : String.format("Trying to %s undoToken %d for %s twice at partition %d", (commit ? "COMMIT" : "ABORT"), undoToken, ts, this.partitionId); // COMMIT! if (commit) { if (debug.val) { LOG.debug(String.format("%s - COMMITING txn on partition %d with undoToken %d " + "[lastTxnId=%d, lastUndoToken=%d, dtxn=%s]%s", ts, this.partitionId, undoToken, this.lastCommittedTxnId, this.lastCommittedUndoToken, this.currentDtxn, (ts instanceof LocalTransaction ? " - " + ((LocalTransaction)ts).getSpeculationType() : ""))); if (this.specExecBlocked.isEmpty() == false && ts.isPredictSinglePartition() == false) { LOG.debug(String.format("%s - # of Speculatively Executed Txns: %d ", ts, this.specExecBlocked.size())); } } assert(this.lastCommittedUndoToken < undoToken) : String.format("Trying to commit undoToken %d for %s but it is less than the " + "last committed undoToken %d at partition %d\n" + "Last Committed Txn: %d", undoToken, ts, this.lastCommittedUndoToken, this.partitionId, this.lastCommittedTxnId); this.ee.releaseUndoToken(undoToken); this.lastCommittedUndoToken = undoToken; } // ABORT! else { // Evan says that txns will be aborted LIFO. This means the first txn that // we get in abortWork() will have a the greatest undoToken, which means that // it will automagically rollback all other outstanding txns. // I'm lazy/tired, so for now I'll just rollback everything I get, but in theory // we should be able to check whether our undoToken has already been rolled back if (debug.val) { LOG.debug(String.format("%s - ABORTING txn on partition %d with undoToken %d " + "[lastTxnId=%d, lastUndoToken=%d, dtxn=%s]%s", ts, this.partitionId, undoToken, this.lastCommittedTxnId, this.lastCommittedUndoToken, this.currentDtxn, (ts instanceof LocalTransaction ? " - " + ((LocalTransaction)ts).getSpeculationType() : ""))); if (this.specExecBlocked.isEmpty() == false && ts.isPredictSinglePartition() == false) { LOG.debug(String.format("%s - # of Speculatively Executed Txns: %d ", ts, this.specExecBlocked.size())); } } assert(this.lastCommittedUndoToken < undoToken) : String.format("Trying to abort undoToken %d for %s but it is less than the " + "last committed undoToken %d at partition %d " + "[lastTxnId=%d, lastUndoToken=%d, dtxn=%s]%s", undoToken, ts, this.lastCommittedUndoToken, this.partitionId, this.lastCommittedTxnId, this.lastCommittedUndoToken, this.currentDtxn, (ts instanceof LocalTransaction ? " - " + ((LocalTransaction)ts).getSpeculationType() : "")); this.ee.undoUndoToken(undoToken); } if (needs_profiling) ((LocalTransaction)ts).profiler.stopPostEE(); } } /** * Somebody told us that our partition needs to abort/commit the given transaction id. * This method should only be used for distributed transactions, because * it will do some extra work for speculative execution * @param ts - The transaction to finish up. * @param status - The final status of the transaction */ private void finishDistributedTransaction(final AbstractTransaction ts, final Status status) { if (debug.val) LOG.debug(String.format("%s - Processing finish request at partition %d " + "[status=%s, readOnly=%s]", ts, this.partitionId, status, ts.isExecReadOnly(this.partitionId))); if (this.currentDtxn == ts) { // 2012-11-22 -- Yes, today is Thanksgiving and I'm working on my database. // That's just grad student life I guess. Anyway, if you're reading this then // you know that this is an important part of the system. We have a dtxn that // we have been told is completely finished and now we need to either commit // or abort any changes that it may have made at this partition. The tricky thing // is that if we have speculative execution enabled, then we need to make sure // that we process any transactions that were executed while the dtxn was running // in the right order to ensure that we maintain serializability. // Here is the basic logic of what's about to happen: // // (1) If the dtxn is commiting, then we just need to commit the the last txn that // was executed (since this will have the largest undo token). // The EE will automatically commit all undo tokens less than that. // (2) If the dtxn is aborting, then we can commit any speculative txn that was // executed before the dtxn's first non-readonly undo token. // // Note that none of the speculative txns in the blocked queue will need to be // aborted at this point, because we will have rolled back their changes immediately // when they aborted, so that our dtxn doesn't read dirty data. if (this.specExecBlocked.isEmpty() == false) { // First thing we need to do is get the latch that will be set by any transaction // that was in the middle of being executed when we were called if (debug.val) LOG.debug(String.format("%s - Checking %d blocked speculative transactions at " + "partition %d [currentMode=%s]", ts, this.specExecBlocked.size(), this.partitionId, this.currentExecMode)); LocalTransaction spec_ts = null; ClientResponseImpl spec_cr = null; // ------------------------------- // DTXN NON-READ-ONLY ABORT // If the dtxn did not modify this partition, then everthing can commit // Otherwise, we want to commit anything that was executed before the dtxn started // ------------------------------- if (status != Status.OK && ts.isExecReadOnly(this.partitionId) == false) { // We need to get the first undo tokens for our distributed transaction long dtxnUndoToken = ts.getFirstUndoToken(this.partitionId); if (debug.val) LOG.debug(String.format("%s - Looking for speculative txns to commit before we rollback undoToken %d", ts, dtxnUndoToken)); // Queue of speculative txns that need to be committed. final Queue<Pair<LocalTransaction, ClientResponseImpl>> txnsToCommit = new LinkedList<Pair<LocalTransaction,ClientResponseImpl>>(); // Queue of speculative txns that need to be aborted + restarted final Queue<Pair<LocalTransaction, ClientResponseImpl>> txnsToRestart = new LinkedList<Pair<LocalTransaction,ClientResponseImpl>>(); long spec_token; long max_token = HStoreConstants.NULL_UNDO_LOGGING_TOKEN; LocalTransaction max_ts = null; for (Pair<LocalTransaction, ClientResponseImpl> pair : this.specExecBlocked) { boolean shouldCommit = false; spec_ts = pair.getFirst(); spec_token = spec_ts.getFirstUndoToken(this.partitionId); if (debug.val) LOG.debug(String.format("Speculative Txn %s [undoToken=%d, %s]", spec_ts, spec_token, spec_ts.getSpeculationType())); // Speculative txns should never be executed without an undo token assert(spec_token != HStoreConstants.DISABLE_UNDO_LOGGING_TOKEN); assert(spec_ts.isSpeculative()) : spec_ts + " isn't marked as speculative!"; // If the speculative undoToken is null, then this txn didn't execute // any queries. That means we can always commit it if (spec_token == HStoreConstants.NULL_UNDO_LOGGING_TOKEN) { if (debug.val) LOG.debug(String.format("Speculative Txn %s has a null undoToken at partition %d", spec_ts, this.partitionId)); shouldCommit = true; } // Otherwise, look to see if this txn was speculatively executed before the // first undo token of the distributed txn. That means we know that this guy // didn't read any modifications made by the dtxn. else if (spec_token < dtxnUndoToken) { if (debug.val) LOG.debug(String.format("Speculative Txn %s has an undoToken less than the dtxn %s " + "at partition %d [%d < %d]", spec_ts, ts, this.partitionId, spec_token, dtxnUndoToken)); shouldCommit = true; } // Ok so at this point we know that our spec txn came *after* the distributed txn // started. So we need to use our checker to see whether there is a conflict else if (this.specExecChecker.hasConflictAfter(ts, spec_ts, this.partitionId) == false) { if (debug.val) LOG.debug(String.format("Speculative Txn %s does not conflict with dtxn %s at partition %d", spec_ts, ts, this.partitionId)); shouldCommit = true; } if (shouldCommit) { txnsToCommit.add(pair); if (spec_token != HStoreConstants.NULL_UNDO_LOGGING_TOKEN && spec_token > max_token) { max_token = spec_token; max_ts = spec_ts; } } else { txnsToRestart.add(pair); } } // FOR if (debug.val) LOG.debug(String.format("%s - Found %d speculative txns at partition %d that need to be " + "committed *before* we abort this txn", ts, txnsToCommit.size(), this.partitionId)); // (1) Commit the greatest token that we've seen. This means that // all our other txns can be safely processed without needing // to go down in the EE if (max_token != HStoreConstants.NULL_UNDO_LOGGING_TOKEN) { assert(max_ts != null); this.finishWorkEE(max_ts, max_token, true); } // (2) Process all the txns that need to be committed Pair<LocalTransaction, ClientResponseImpl> pair = null; while ((pair = txnsToCommit.poll()) != null) { spec_ts = pair.getFirst(); spec_cr = pair.getSecond(); spec_ts.markFinished(this.partitionId); try { if (debug.val) LOG.debug(String.format("%s - Releasing blocked ClientResponse for %s [status=%s]", ts, spec_ts, spec_cr.getStatus())); this.processClientResponse(spec_ts, spec_cr); } catch (Throwable ex) { String msg = "Failed to complete queued response for " + spec_ts; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } } // FOR // (3) Abort the distributed txn this.finishTransaction(ts, status); // (4) Restart all the other txns while ((pair = txnsToRestart.poll()) != null) { spec_ts = pair.getFirst(); spec_cr = pair.getSecond(); MispredictionException error = new MispredictionException(spec_ts.getTransactionId(), spec_ts.getTouchedPartitions()); spec_ts.setPendingError(error, false); spec_cr.setStatus(Status.ABORT_SPECULATIVE); this.processClientResponse(spec_ts, spec_cr); } // FOR } // ------------------------------- // DTXN READ-ONLY ABORT or DTXN COMMIT // ------------------------------- else { // **IMPORTANT** // If the dtxn needs to commit, then all we need to do is get the // last undoToken that we've generated (since we know that it had to // have been used either by our distributed txn or for one of our // speculative txns). // // If the read-only dtxn needs to abort, then there's nothing we need to // do, because it didn't make any changes. That means we can just // commit the last speculatively executed transaction // // Once we have this token, we can just make a direct call to the EE // to commit any changes that came before it. Note that we are using our // special 'finishWorkEE' method that does not require us to provide // the transaction that we're committing. long undoToken = this.lastUndoToken; if (debug.val) LOG.debug(String.format("%s - Last undoToken at partition %d => %d", ts, this.partitionId, undoToken)); // Bombs away! if (undoToken != this.lastCommittedUndoToken) { this.finishWorkEE(ts, undoToken, true); // IMPORTANT: Make sure that we remove the dtxn from the lock queue! // This is normally done in finishTransaction() but because we're trying // to be clever and invoke the EE directly, we have to make sure that // we call it ourselves. this.queueManager.lockQueueFinished(ts, status, this.partitionId); } // Make sure that we mark the dtxn as finished so that we don't // try to do anything with it later on. ts.markFinished(this.partitionId); // Now make sure that all of the speculative txns are processed without // committing (since we just committed any change that they could have made // up above). Pair<LocalTransaction, ClientResponseImpl> pair = null; while ((pair = this.specExecBlocked.pollFirst()) != null) { spec_ts = pair.getFirst(); spec_cr = pair.getSecond(); spec_ts.markFinished(this.partitionId); try { if (trace.val) LOG.trace(String.format("%s - Releasing blocked ClientResponse for %s [status=%s]", ts, spec_ts, spec_cr.getStatus())); this.processClientResponse(spec_ts, spec_cr); } catch (Throwable ex) { String msg = "Failed to complete queued response for " + spec_ts; throw new ServerFaultException(msg, ex, ts.getTransactionId()); } } // WHILE } this.specExecBlocked.clear(); this.specExecModified = false; if (trace.val) LOG.trace(String.format("Finished processing all queued speculative txns for dtxn %s", ts)); } // ------------------------------- // NO SPECULATIVE TXNS // ------------------------------- else { // There are no speculative txns waiting for this dtxn, // so we can just commit it right away if (debug.val) LOG.debug(String.format("%s - No speculative txns at partition %d. Just %s txn by itself", ts, this.partitionId, (status == Status.OK ? "commiting" : "aborting"))); this.finishTransaction(ts, status); } // Clear our cached query results that are specific for this transaction // this.queryCache.purgeTransaction(ts.getTransactionId()); // TODO: Remove anything in our queue for this txn // if (ts.hasQueuedWork(this.partitionId)) { // } // Check whether this is the response that the speculatively executed txns have been waiting for // We could have turned off speculative execution mode beforehand if (debug.val) LOG.debug(String.format("%s - Attempting to unmark as the current DTXN at partition %d and " + "setting execution mode to %s", ts, this.partitionId, ExecutionMode.COMMIT_ALL)); try { // Resetting the current_dtxn variable has to come *before* we change the execution mode this.resetCurrentDtxn(); this.setExecutionMode(ts, ExecutionMode.COMMIT_ALL); // Release blocked transactions this.releaseBlockedTransactions(ts); } catch (Throwable ex) { String msg = String.format("Failed to finish %s at partition %d", ts, this.partitionId); throw new ServerFaultException(msg, ex, ts.getTransactionId()); } if (hstore_conf.site.exec_profiling) { this.profiler.sp3_local_time.stopIfStarted(); this.profiler.sp3_remote_time.stopIfStarted(); } } // We were told told to finish a dtxn that is not the current one // at this partition. That's ok as long as it's aborting and not trying // to commit. else { assert(status != Status.OK) : String.format("Trying to commit %s at partition %d but the current dtxn is %s", ts, this.partitionId, this.currentDtxn); this.queueManager.lockQueueFinished(ts, status, this.partitionId); } // ------------------------------- // FINISH CALLBACKS // ------------------------------- // MapReduceTransaction if (ts instanceof MapReduceTransaction) { PartitionCountingCallback<AbstractTransaction> callback = ((MapReduceTransaction)ts).getCleanupCallback(); // We don't want to invoke this callback at the basePartition's site // because we don't want the parent txn to actually get deleted. if (this.partitionId == ts.getBasePartition()) { if (debug.val) LOG.debug(String.format("%s - Notifying %s that the txn is finished at partition %d", ts, callback.getClass().getSimpleName(), this.partitionId)); callback.run(this.partitionId); } } else { PartitionCountingCallback<AbstractTransaction> callback = ts.getFinishCallback(); if (debug.val) LOG.debug(String.format("%s - Notifying %s that the txn is finished at partition %d", ts, callback.getClass().getSimpleName(), this.partitionId)); callback.run(this.partitionId); } } private void blockTransaction(InternalTxnMessage work) { if (debug.val) LOG.debug(String.format("%s - Adding %s work to blocked queue", work.getTransaction(), work.getClass().getSimpleName())); this.currentBlockedTxns.add(work); } private void blockTransaction(LocalTransaction ts) { this.blockTransaction(new StartTxnMessage(ts)); } /** * Release all the transactions that are currently in this partition's blocked queue * into the work queue. * @param ts */ private void releaseBlockedTransactions(AbstractTransaction ts) { if (this.currentBlockedTxns.isEmpty() == false) { if (debug.val) LOG.debug(String.format("Attempting to release %d blocked transactions at partition %d because of %s", this.currentBlockedTxns.size(), this.partitionId, ts)); this.work_queue.addAll(this.currentBlockedTxns); int released = this.currentBlockedTxns.size(); this.currentBlockedTxns.clear(); if (debug.val) LOG.debug(String.format("Released %d blocked transactions at partition %d because of %s", released, this.partitionId, ts)); } assert(this.currentBlockedTxns.isEmpty()); } // --------------------------------------------------------------- // SNAPSHOT METHODS // --------------------------------------------------------------- /** * Do snapshot work exclusively until there is no more. Also blocks * until the syncing and closing of snapshot data targets has completed. */ public void initiateSnapshots(Deque<SnapshotTableTask> tasks) { m_snapshotter.initiateSnapshots(ee, tasks); } public Collection<Exception> completeSnapshotWork() throws InterruptedException { return m_snapshotter.completeSnapshotWork(ee); } // --------------------------------------------------------------- // SHUTDOWN METHODS // --------------------------------------------------------------- /** * Cause this PartitionExecutor to make the entire HStore cluster shutdown * This won't return! */ public synchronized void crash(Throwable ex) { String msg = String.format("PartitionExecutor for Partition #%d is crashing", this.partitionId); if (ex == null) LOG.warn(msg); else LOG.warn(msg, ex); assert(this.hstore_coordinator != null); this.hstore_coordinator.shutdownClusterBlocking(ex); } @Override public boolean isShuttingDown() { return (this.hstore_site.isShuttingDown()); // shutdown_state == State.PREPARE_SHUTDOWN || this.shutdown_state == State.SHUTDOWN); } @Override public void prepareShutdown(boolean error) { this.shutdown_state = ShutdownState.PREPARE_SHUTDOWN; } /** * Somebody from the outside wants us to shutdown */ public synchronized void shutdown() { if (this.shutdown_state == ShutdownState.SHUTDOWN) { if (debug.val) LOG.debug(String.format("Partition #%d told to shutdown again. Ignoring...", this.partitionId)); return; } this.shutdown_state = ShutdownState.SHUTDOWN; if (debug.val) LOG.debug(String.format("Shutting down PartitionExecutor for Partition #%d", this.partitionId)); // Clear the queue this.work_queue.clear(); // Knock out this ma if (this.m_snapshotter != null) this.m_snapshotter.shutdown(); // Make sure we shutdown our threadpool // this.thread_pool.shutdownNow(); if (this.self != null) this.self.interrupt(); if (this.shutdown_latch != null) { try { this.shutdown_latch.acquire(); } catch (InterruptedException ex) { // Ignore } catch (Exception ex) { LOG.fatal("Unexpected error while shutting down", ex); } } } // ---------------------------------------------------------------------------- // DEBUG METHODS // ---------------------------------------------------------------------------- @Override public String toString() { return String.format("%s{%s}", this.getClass().getSimpleName(), HStoreThreadManager.formatPartitionName(siteId, partitionId)); } public class Debug implements DebugContext { public VoltProcedure getVoltProcedure(String procName) { Procedure proc = catalogContext.procedures.getIgnoreCase(procName); return (PartitionExecutor.this.getVoltProcedure(proc.getId())); } public SpecExecScheduler getSpecExecScheduler() { return (PartitionExecutor.this.specExecScheduler); } public AbstractConflictChecker getSpecExecConflictChecker() { return (PartitionExecutor.this.specExecChecker); } public Collection<BatchPlanner> getBatchPlanners() { return (PartitionExecutor.this.batchPlanners.values()); } public PartitionExecutorProfiler getProfiler() { return (PartitionExecutor.this.profiler); } public Thread getExecutionThread() { return (PartitionExecutor.this.self); } public Queue<InternalMessage> getWorkQueue() { return (PartitionExecutor.this.work_queue); } public void setExecutionMode(AbstractTransaction ts, ExecutionMode newMode) { PartitionExecutor.this.setExecutionMode(ts, newMode); } public ExecutionMode getExecutionMode() { return (PartitionExecutor.this.currentExecMode); } public Long getLastExecutedTxnId() { return (PartitionExecutor.this.lastExecutedTxnId); } public Long getLastCommittedTxnId() { return (PartitionExecutor.this.lastCommittedTxnId); } public long getLastCommittedIndoToken() { return (PartitionExecutor.this.lastCommittedUndoToken); } /** * Get the VoltProcedure handle of the current running txn. This could be null. * <B>FOR TESTING ONLY</B> */ public VoltProcedure getCurrentVoltProcedure() { return (PartitionExecutor.this.currentVoltProc); } /** * Get the txnId of the current distributed transaction at this partition * <B>FOR TESTING ONLY</B> */ public AbstractTransaction getCurrentDtxn() { return (PartitionExecutor.this.currentDtxn); } /** * Get the txnId of the current distributed transaction at this partition * <B>FOR TESTING ONLY</B> */ public Long getCurrentDtxnId() { Long ret = null; // This is a race condition, so we'll just ignore any errors if (PartitionExecutor.this.currentDtxn != null) { try { ret = PartitionExecutor.this.currentDtxn.getTransactionId(); } catch (NullPointerException ex) { // IGNORE } } return (ret); } public Long getCurrentTxnId() { return (PartitionExecutor.this.currentTxnId); } public int getBlockedWorkCount() { return (PartitionExecutor.this.currentBlockedTxns.size()); } /** * Return the number of spec exec txns have completed but are waiting * for the distributed txn to finish at this partition */ public int getBlockedSpecExecCount() { return (PartitionExecutor.this.specExecBlocked.size()); } public int getWorkQueueSize() { return (PartitionExecutor.this.work_queue.size()); } public void updateMemory() { PartitionExecutor.this.updateMemoryStats(EstTime.currentTimeMillis()); } /** * Replace the ConflictChecker. This should only be used for testing * @param checker */ protected void setConflictChecker(AbstractConflictChecker checker) { LOG.warn(String.format("Replacing original checker %s with %s at partition %d", specExecChecker.getClass().getSimpleName(), checker.getClass().getSimpleName(), partitionId)); specExecChecker = checker; specExecScheduler.getDebugContext().setConflictChecker(checker); } } private Debug cachedDebugContext; public Debug getDebugContext() { if (this.cachedDebugContext == null) { // We don't care if we're thread-safe here... this.cachedDebugContext = new Debug(); } return this.cachedDebugContext; } }
diff --git a/phone/com/android/internal/policy/impl/PhoneWindowManager.java b/phone/com/android/internal/policy/impl/PhoneWindowManager.java index 4abd268..e7a03e4 100755 --- a/phone/com/android/internal/policy/impl/PhoneWindowManager.java +++ b/phone/com/android/internal/policy/impl/PhoneWindowManager.java @@ -1,2241 +1,2247 @@ /* * Copyright (C) 2006 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.internal.policy.impl; import android.app.Activity; import android.app.ActivityManagerNative; import android.app.IActivityManager; import android.app.IStatusBar; import android.content.ActivityNotFoundException; import android.content.BroadcastReceiver; import android.content.ContentResolver; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.content.pm.ActivityInfo; import android.content.pm.PackageManager; import android.content.res.Configuration; import android.content.res.Resources; import android.database.ContentObserver; import android.graphics.Rect; import android.os.BatteryManager; import android.os.Handler; import android.os.IBinder; import android.os.LocalPowerManager; import android.os.PowerManager; import android.os.RemoteException; import android.os.ServiceManager; import android.os.SystemClock; import android.os.SystemProperties; import android.os.Vibrator; import android.provider.Settings; import com.android.internal.policy.PolicyManager; import com.android.internal.telephony.ITelephony; import android.util.Config; import android.util.EventLog; import android.util.Log; import android.view.Display; import android.view.Gravity; import android.view.HapticFeedbackConstants; import android.view.IWindowManager; import android.view.KeyEvent; import android.view.MotionEvent; import android.view.WindowOrientationListener; import android.view.RawInputEvent; import android.view.Surface; import android.view.View; import android.view.ViewConfiguration; import android.view.Window; import android.view.WindowManager; import static android.view.WindowManager.LayoutParams.FIRST_APPLICATION_WINDOW; import static android.view.WindowManager.LayoutParams.FLAG_FORCE_NOT_FULLSCREEN; import static android.view.WindowManager.LayoutParams.FLAG_FULLSCREEN; import static android.view.WindowManager.LayoutParams.FLAG_LAYOUT_IN_SCREEN; import static android.view.WindowManager.LayoutParams.FLAG_LAYOUT_INSET_DECOR; import static android.view.WindowManager.LayoutParams.FLAG_LAYOUT_NO_LIMITS; import static android.view.WindowManager.LayoutParams.FLAG_SHOW_WHEN_LOCKED; import static android.view.WindowManager.LayoutParams.FLAG_DISMISS_KEYGUARD; import static android.view.WindowManager.LayoutParams.SOFT_INPUT_MASK_ADJUST; import static android.view.WindowManager.LayoutParams.SOFT_INPUT_ADJUST_RESIZE; import static android.view.WindowManager.LayoutParams.LAST_APPLICATION_WINDOW; import static android.view.WindowManager.LayoutParams.TYPE_APPLICATION_MEDIA; import static android.view.WindowManager.LayoutParams.TYPE_APPLICATION_MEDIA_OVERLAY; import static android.view.WindowManager.LayoutParams.TYPE_APPLICATION_PANEL; import static android.view.WindowManager.LayoutParams.TYPE_APPLICATION_SUB_PANEL; import static android.view.WindowManager.LayoutParams.TYPE_APPLICATION_ATTACHED_DIALOG; import static android.view.WindowManager.LayoutParams.TYPE_KEYGUARD; import static android.view.WindowManager.LayoutParams.TYPE_KEYGUARD_DIALOG; import static android.view.WindowManager.LayoutParams.TYPE_PHONE; import static android.view.WindowManager.LayoutParams.TYPE_PRIORITY_PHONE; import static android.view.WindowManager.LayoutParams.TYPE_SEARCH_BAR; import static android.view.WindowManager.LayoutParams.TYPE_STATUS_BAR; import static android.view.WindowManager.LayoutParams.TYPE_STATUS_BAR_PANEL; import static android.view.WindowManager.LayoutParams.TYPE_SYSTEM_ALERT; import static android.view.WindowManager.LayoutParams.TYPE_SYSTEM_ERROR; import static android.view.WindowManager.LayoutParams.TYPE_INPUT_METHOD; import static android.view.WindowManager.LayoutParams.TYPE_INPUT_METHOD_DIALOG; import static android.view.WindowManager.LayoutParams.TYPE_SYSTEM_OVERLAY; import static android.view.WindowManager.LayoutParams.TYPE_TOAST; import static android.view.WindowManager.LayoutParams.TYPE_WALLPAPER; import android.view.WindowManagerImpl; import android.view.WindowManagerPolicy; import android.view.WindowManagerPolicy.WindowState; import android.view.animation.Animation; import android.view.animation.AnimationUtils; import android.media.IAudioService; import android.media.AudioManager; /** * WindowManagerPolicy implementation for the Android phone UI. This * introduces a new method suffix, Lp, for an internal lock of the * PhoneWindowManager. This is used to protect some internal state, and * can be acquired with either thw Lw and Li lock held, so has the restrictions * of both of those when held. */ public class PhoneWindowManager implements WindowManagerPolicy { static final String TAG = "WindowManager"; static final boolean DEBUG = false; static final boolean localLOGV = DEBUG ? Config.LOGD : Config.LOGV; static final boolean DEBUG_LAYOUT = false; static final boolean SHOW_STARTING_ANIMATIONS = true; static final boolean SHOW_PROCESSES_ON_ALT_MENU = false; // wallpaper is at the bottom, though the window manager may move it. static final int WALLPAPER_LAYER = 2; static final int APPLICATION_LAYER = 2; static final int PHONE_LAYER = 3; static final int SEARCH_BAR_LAYER = 4; static final int STATUS_BAR_PANEL_LAYER = 5; // toasts and the plugged-in battery thing static final int TOAST_LAYER = 6; static final int STATUS_BAR_LAYER = 7; // SIM errors and unlock. Not sure if this really should be in a high layer. static final int PRIORITY_PHONE_LAYER = 8; // like the ANR / app crashed dialogs static final int SYSTEM_ALERT_LAYER = 9; // system-level error dialogs static final int SYSTEM_ERROR_LAYER = 10; // on-screen keyboards and other such input method user interfaces go here. static final int INPUT_METHOD_LAYER = 11; // on-screen keyboards and other such input method user interfaces go here. static final int INPUT_METHOD_DIALOG_LAYER = 12; // the keyguard; nothing on top of these can take focus, since they are // responsible for power management when displayed. static final int KEYGUARD_LAYER = 13; static final int KEYGUARD_DIALOG_LAYER = 14; // things in here CAN NOT take focus, but are shown on top of everything else. static final int SYSTEM_OVERLAY_LAYER = 15; static final int APPLICATION_MEDIA_SUBLAYER = -2; static final int APPLICATION_MEDIA_OVERLAY_SUBLAYER = -1; static final int APPLICATION_PANEL_SUBLAYER = 1; static final int APPLICATION_SUB_PANEL_SUBLAYER = 2; static final float SLIDE_TOUCH_EVENT_SIZE_LIMIT = 0.6f; // Debugging: set this to have the system act like there is no hard keyboard. static final boolean KEYBOARD_ALWAYS_HIDDEN = false; static public final String SYSTEM_DIALOG_REASON_KEY = "reason"; static public final String SYSTEM_DIALOG_REASON_GLOBAL_ACTIONS = "globalactions"; static public final String SYSTEM_DIALOG_REASON_RECENT_APPS = "recentapps"; static public final String SYSTEM_DIALOG_REASON_HOME_KEY = "homekey"; final Object mLock = new Object(); Context mContext; IWindowManager mWindowManager; LocalPowerManager mPowerManager; Vibrator mVibrator; // Vibrator for giving feedback of orientation changes // Vibrator pattern for haptic feedback of a long press. long[] mLongPressVibePattern; // Vibrator pattern for haptic feedback of virtual key press. long[] mVirtualKeyVibePattern; // Vibrator pattern for haptic feedback during boot when safe mode is disabled. long[] mSafeModeDisabledVibePattern; // Vibrator pattern for haptic feedback during boot when safe mode is enabled. long[] mSafeModeEnabledVibePattern; /** If true, hitting shift & menu will broadcast Intent.ACTION_BUG_REPORT */ boolean mEnableShiftMenuBugReports = false; boolean mSafeMode; WindowState mStatusBar = null; WindowState mKeyguard = null; KeyguardViewMediator mKeyguardMediator; GlobalActions mGlobalActions; boolean mShouldTurnOffOnKeyUp; RecentApplicationsDialog mRecentAppsDialog; Handler mHandler; final IntentFilter mBatteryStatusFilter = new IntentFilter(); boolean mLidOpen; int mPlugged; boolean mRegisteredBatteryReceiver; int mDockState = Intent.EXTRA_DOCK_STATE_UNDOCKED; int mLidOpenRotation; int mCarDockRotation; int mDeskDockRotation; int mCarDockKeepsScreenOn; int mDeskDockKeepsScreenOn; boolean mCarDockEnablesAccelerometer; boolean mDeskDockEnablesAccelerometer; int mLidKeyboardAccessibility; int mLidNavigationAccessibility; boolean mScreenOn = false; boolean mOrientationSensorEnabled = false; int mCurrentAppOrientation = ActivityInfo.SCREEN_ORIENTATION_UNSPECIFIED; static final int DEFAULT_ACCELEROMETER_ROTATION = 0; int mAccelerometerDefault = DEFAULT_ACCELEROMETER_ROTATION; boolean mHasSoftInput = false; // The current size of the screen. int mW, mH; // During layout, the current screen borders with all outer decoration // (status bar, input method dock) accounted for. int mCurLeft, mCurTop, mCurRight, mCurBottom; // During layout, the frame in which content should be displayed // to the user, accounting for all screen decoration except for any // space they deem as available for other content. This is usually // the same as mCur*, but may be larger if the screen decor has supplied // content insets. int mContentLeft, mContentTop, mContentRight, mContentBottom; // During layout, the current screen borders along with input method // windows are placed. int mDockLeft, mDockTop, mDockRight, mDockBottom; // During layout, the layer at which the doc window is placed. int mDockLayer; static final Rect mTmpParentFrame = new Rect(); static final Rect mTmpDisplayFrame = new Rect(); static final Rect mTmpContentFrame = new Rect(); static final Rect mTmpVisibleFrame = new Rect(); WindowState mTopFullscreenOpaqueWindowState; boolean mForceStatusBar; boolean mHideLockScreen; boolean mDismissKeyguard; boolean mHomePressed; Intent mHomeIntent; Intent mCarDockIntent; Intent mDeskDockIntent; boolean mSearchKeyPressed; boolean mConsumeSearchKeyUp; static final int ENDCALL_HOME = 0x1; static final int ENDCALL_SLEEPS = 0x2; static final int DEFAULT_ENDCALL_BEHAVIOR = ENDCALL_SLEEPS; int mEndcallBehavior; int mLandscapeRotation = -1; int mPortraitRotation = -1; // Nothing to see here, move along... int mFancyRotationAnimation; ShortcutManager mShortcutManager; PowerManager.WakeLock mBroadcastWakeLock; PowerManager.WakeLock mDockWakeLock; class SettingsObserver extends ContentObserver { SettingsObserver(Handler handler) { super(handler); } void observe() { ContentResolver resolver = mContext.getContentResolver(); resolver.registerContentObserver(Settings.System.getUriFor( Settings.System.END_BUTTON_BEHAVIOR), false, this); resolver.registerContentObserver(Settings.System.getUriFor( Settings.System.ACCELEROMETER_ROTATION), false, this); resolver.registerContentObserver(Settings.Secure.getUriFor( Settings.Secure.DEFAULT_INPUT_METHOD), false, this); resolver.registerContentObserver(Settings.System.getUriFor( "fancy_rotation_anim"), false, this); update(); } @Override public void onChange(boolean selfChange) { update(); try { mWindowManager.setRotation(USE_LAST_ROTATION, false, mFancyRotationAnimation); } catch (RemoteException e) { // Ignore } } public void update() { ContentResolver resolver = mContext.getContentResolver(); boolean updateRotation = false; synchronized (mLock) { mEndcallBehavior = Settings.System.getInt(resolver, Settings.System.END_BUTTON_BEHAVIOR, DEFAULT_ENDCALL_BEHAVIOR); mFancyRotationAnimation = Settings.System.getInt(resolver, "fancy_rotation_anim", 0) != 0 ? 0x80 : 0; int accelerometerDefault = Settings.System.getInt(resolver, Settings.System.ACCELEROMETER_ROTATION, DEFAULT_ACCELEROMETER_ROTATION); if (mAccelerometerDefault != accelerometerDefault) { mAccelerometerDefault = accelerometerDefault; updateOrientationListenerLp(); } String imId = Settings.Secure.getString(resolver, Settings.Secure.DEFAULT_INPUT_METHOD); boolean hasSoftInput = imId != null && imId.length() > 0; if (mHasSoftInput != hasSoftInput) { mHasSoftInput = hasSoftInput; updateRotation = true; } } if (updateRotation) { updateRotation(0); } } } class MyOrientationListener extends WindowOrientationListener { MyOrientationListener(Context context) { super(context); } @Override public void onOrientationChanged(int rotation) { // Send updates based on orientation value if (localLOGV) Log.v(TAG, "onOrientationChanged, rotation changed to " +rotation); try { mWindowManager.setRotation(rotation, false, mFancyRotationAnimation); } catch (RemoteException e) { // Ignore } } } MyOrientationListener mOrientationListener; boolean useSensorForOrientationLp(int appOrientation) { // The app says use the sensor. if (appOrientation == ActivityInfo.SCREEN_ORIENTATION_SENSOR) { return true; } // The user preference says we can rotate, and the app is willing to rotate. if (mAccelerometerDefault != 0 && (appOrientation == ActivityInfo.SCREEN_ORIENTATION_USER || appOrientation == ActivityInfo.SCREEN_ORIENTATION_UNSPECIFIED)) { return true; } // We're in a dock that has a rotation affinity, an the app is willing to rotate. if ((mCarDockEnablesAccelerometer && mDockState == Intent.EXTRA_DOCK_STATE_CAR) || (mDeskDockEnablesAccelerometer && mDockState == Intent.EXTRA_DOCK_STATE_DESK)) { // Note we override the nosensor flag here. if (appOrientation == ActivityInfo.SCREEN_ORIENTATION_USER || appOrientation == ActivityInfo.SCREEN_ORIENTATION_UNSPECIFIED || appOrientation == ActivityInfo.SCREEN_ORIENTATION_NOSENSOR) { return true; } } // Else, don't use the sensor. return false; } /* * We always let the sensor be switched on by default except when * the user has explicitly disabled sensor based rotation or when the * screen is switched off. */ boolean needSensorRunningLp() { if (mCurrentAppOrientation == ActivityInfo.SCREEN_ORIENTATION_SENSOR) { // If the application has explicitly requested to follow the // orientation, then we need to turn the sensor or. return true; } if ((mCarDockEnablesAccelerometer && mDockState == Intent.EXTRA_DOCK_STATE_CAR) || (mDeskDockEnablesAccelerometer && mDockState == Intent.EXTRA_DOCK_STATE_DESK)) { // enable accelerometer if we are docked in a dock that enables accelerometer // orientation management, return true; } if (mAccelerometerDefault == 0) { // If the setting for using the sensor by default is enabled, then // we will always leave it on. Note that the user could go to // a window that forces an orientation that does not use the // sensor and in theory we could turn it off... however, when next // turning it on we won't have a good value for the current // orientation for a little bit, which can cause orientation // changes to lag, so we'd like to keep it always on. (It will // still be turned off when the screen is off.) return false; } return true; } /* * Various use cases for invoking this function * screen turning off, should always disable listeners if already enabled * screen turned on and current app has sensor based orientation, enable listeners * if not already enabled * screen turned on and current app does not have sensor orientation, disable listeners if * already enabled * screen turning on and current app has sensor based orientation, enable listeners if needed * screen turning on and current app has nosensor based orientation, do nothing */ void updateOrientationListenerLp() { if (!mOrientationListener.canDetectOrientation()) { // If sensor is turned off or nonexistent for some reason return; } //Could have been invoked due to screen turning on or off or //change of the currently visible window's orientation if (localLOGV) Log.v(TAG, "Screen status="+mScreenOn+ ", current orientation="+mCurrentAppOrientation+ ", SensorEnabled="+mOrientationSensorEnabled); boolean disable = true; if (mScreenOn) { if (needSensorRunningLp()) { disable = false; //enable listener if not already enabled if (!mOrientationSensorEnabled) { mOrientationListener.enable(); if(localLOGV) Log.v(TAG, "Enabling listeners"); mOrientationSensorEnabled = true; } } } //check if sensors need to be disabled if (disable && mOrientationSensorEnabled) { mOrientationListener.disable(); if(localLOGV) Log.v(TAG, "Disabling listeners"); mOrientationSensorEnabled = false; } } Runnable mPowerLongPress = new Runnable() { public void run() { mShouldTurnOffOnKeyUp = false; performHapticFeedbackLw(null, HapticFeedbackConstants.LONG_PRESS, false); sendCloseSystemWindows(SYSTEM_DIALOG_REASON_GLOBAL_ACTIONS); showGlobalActionsDialog(); } }; void showGlobalActionsDialog() { if (mGlobalActions == null) { mGlobalActions = new GlobalActions(mContext); } final boolean keyguardShowing = mKeyguardMediator.isShowingAndNotHidden(); mGlobalActions.showDialog(keyguardShowing, isDeviceProvisioned()); if (keyguardShowing) { // since it took two seconds of long press to bring this up, // poke the wake lock so they have some time to see the dialog. mKeyguardMediator.pokeWakelock(); } } boolean isDeviceProvisioned() { return Settings.Secure.getInt( mContext.getContentResolver(), Settings.Secure.DEVICE_PROVISIONED, 0) != 0; } /** * When a home-key longpress expires, close other system windows and launch the recent apps */ Runnable mHomeLongPress = new Runnable() { public void run() { /* * Eat the longpress so it won't dismiss the recent apps dialog when * the user lets go of the home key */ mHomePressed = false; performHapticFeedbackLw(null, HapticFeedbackConstants.LONG_PRESS, false); sendCloseSystemWindows(SYSTEM_DIALOG_REASON_RECENT_APPS); showRecentAppsDialog(); } }; /** * Create (if necessary) and launch the recent apps dialog */ void showRecentAppsDialog() { if (mRecentAppsDialog == null) { mRecentAppsDialog = new RecentApplicationsDialog(mContext); } mRecentAppsDialog.show(); } /** {@inheritDoc} */ public void init(Context context, IWindowManager windowManager, LocalPowerManager powerManager) { mContext = context; mWindowManager = windowManager; mPowerManager = powerManager; mKeyguardMediator = new KeyguardViewMediator(context, this, powerManager); mHandler = new Handler(); mOrientationListener = new MyOrientationListener(mContext); SettingsObserver settingsObserver = new SettingsObserver(mHandler); settingsObserver.observe(); mShortcutManager = new ShortcutManager(context, mHandler); mShortcutManager.observe(); mHomeIntent = new Intent(Intent.ACTION_MAIN, null); mHomeIntent.addCategory(Intent.CATEGORY_HOME); mHomeIntent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_ACTIVITY_RESET_TASK_IF_NEEDED); mCarDockIntent = new Intent(Intent.ACTION_MAIN, null); mCarDockIntent.addCategory(Intent.CATEGORY_CAR_DOCK); mCarDockIntent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_ACTIVITY_RESET_TASK_IF_NEEDED); mDeskDockIntent = new Intent(Intent.ACTION_MAIN, null); mDeskDockIntent.addCategory(Intent.CATEGORY_DESK_DOCK); mDeskDockIntent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_ACTIVITY_RESET_TASK_IF_NEEDED); PowerManager pm = (PowerManager)context.getSystemService(Context.POWER_SERVICE); mBroadcastWakeLock = pm.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, "PhoneWindowManager.mBroadcastWakeLock"); mDockWakeLock = pm.newWakeLock(PowerManager.FULL_WAKE_LOCK, "PhoneWindowManager.mDockWakeLock"); mDockWakeLock.setReferenceCounted(false); mEnableShiftMenuBugReports = "1".equals(SystemProperties.get("ro.debuggable")); mLidOpenRotation = readRotation( com.android.internal.R.integer.config_lidOpenRotation); mCarDockRotation = readRotation( com.android.internal.R.integer.config_carDockRotation); mDeskDockRotation = readRotation( com.android.internal.R.integer.config_deskDockRotation); mCarDockKeepsScreenOn = mContext.getResources().getInteger( com.android.internal.R.integer.config_carDockKeepsScreenOn); mDeskDockKeepsScreenOn = mContext.getResources().getInteger( com.android.internal.R.integer.config_deskDockKeepsScreenOn); mCarDockEnablesAccelerometer = mContext.getResources().getBoolean( com.android.internal.R.bool.config_carDockEnablesAccelerometer); mDeskDockEnablesAccelerometer = mContext.getResources().getBoolean( com.android.internal.R.bool.config_deskDockEnablesAccelerometer); mLidKeyboardAccessibility = mContext.getResources().getInteger( com.android.internal.R.integer.config_lidKeyboardAccessibility); mLidNavigationAccessibility = mContext.getResources().getInteger( com.android.internal.R.integer.config_lidNavigationAccessibility); // register for battery events mBatteryStatusFilter.addAction(Intent.ACTION_BATTERY_CHANGED); mPlugged = 0; updatePlugged(context.registerReceiver(null, mBatteryStatusFilter)); // register for dock events context.registerReceiver(mDockReceiver, new IntentFilter(Intent.ACTION_DOCK_EVENT)); mVibrator = new Vibrator(); mLongPressVibePattern = getLongIntArray(mContext.getResources(), com.android.internal.R.array.config_longPressVibePattern); mVirtualKeyVibePattern = getLongIntArray(mContext.getResources(), com.android.internal.R.array.config_virtualKeyVibePattern); mSafeModeDisabledVibePattern = getLongIntArray(mContext.getResources(), com.android.internal.R.array.config_safeModeDisabledVibePattern); mSafeModeEnabledVibePattern = getLongIntArray(mContext.getResources(), com.android.internal.R.array.config_safeModeEnabledVibePattern); } void updatePlugged(Intent powerIntent) { if (localLOGV) Log.v(TAG, "New battery status: " + powerIntent.getExtras()); if (powerIntent != null) { mPlugged = powerIntent.getIntExtra(BatteryManager.EXTRA_PLUGGED, 0); if (localLOGV) Log.v(TAG, "PLUGGED: " + mPlugged); } } private int readRotation(int resID) { try { int rotation = mContext.getResources().getInteger(resID); switch (rotation) { case 0: return Surface.ROTATION_0; case 90: return Surface.ROTATION_90; case 180: return Surface.ROTATION_180; case 270: return Surface.ROTATION_270; } } catch (Resources.NotFoundException e) { // fall through } return -1; } /** {@inheritDoc} */ public int checkAddPermission(WindowManager.LayoutParams attrs) { int type = attrs.type; if (type < WindowManager.LayoutParams.FIRST_SYSTEM_WINDOW || type > WindowManager.LayoutParams.LAST_SYSTEM_WINDOW) { return WindowManagerImpl.ADD_OKAY; } String permission = null; switch (type) { case TYPE_TOAST: // XXX right now the app process has complete control over // this... should introduce a token to let the system // monitor/control what they are doing. break; case TYPE_INPUT_METHOD: case TYPE_WALLPAPER: // The window manager will check these. break; case TYPE_PHONE: case TYPE_PRIORITY_PHONE: case TYPE_SYSTEM_ALERT: case TYPE_SYSTEM_ERROR: case TYPE_SYSTEM_OVERLAY: permission = android.Manifest.permission.SYSTEM_ALERT_WINDOW; break; default: permission = android.Manifest.permission.INTERNAL_SYSTEM_WINDOW; } if (permission != null) { if (mContext.checkCallingOrSelfPermission(permission) != PackageManager.PERMISSION_GRANTED) { return WindowManagerImpl.ADD_PERMISSION_DENIED; } } return WindowManagerImpl.ADD_OKAY; } public void adjustWindowParamsLw(WindowManager.LayoutParams attrs) { switch (attrs.type) { case TYPE_SYSTEM_OVERLAY: case TYPE_TOAST: // These types of windows can't receive input events. attrs.flags |= WindowManager.LayoutParams.FLAG_NOT_FOCUSABLE | WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE; break; } } void readLidState() { try { int sw = mWindowManager.getSwitchState(RawInputEvent.SW_LID); if (sw >= 0) { mLidOpen = sw == 0; } } catch (RemoteException e) { // Ignore } } private int determineHiddenState(boolean lidOpen, int mode, int hiddenValue, int visibleValue) { switch (mode) { case 1: return lidOpen ? visibleValue : hiddenValue; case 2: return lidOpen ? hiddenValue : visibleValue; } return visibleValue; } /** {@inheritDoc} */ public void adjustConfigurationLw(Configuration config) { readLidState(); final boolean lidOpen = !KEYBOARD_ALWAYS_HIDDEN && mLidOpen; mPowerManager.setKeyboardVisibility(lidOpen); config.hardKeyboardHidden = determineHiddenState(lidOpen, mLidKeyboardAccessibility, Configuration.HARDKEYBOARDHIDDEN_YES, Configuration.HARDKEYBOARDHIDDEN_NO); config.navigationHidden = determineHiddenState(lidOpen, mLidNavigationAccessibility, Configuration.NAVIGATIONHIDDEN_YES, Configuration.NAVIGATIONHIDDEN_NO); config.keyboardHidden = (config.hardKeyboardHidden == Configuration.HARDKEYBOARDHIDDEN_NO || mHasSoftInput) ? Configuration.KEYBOARDHIDDEN_NO : Configuration.KEYBOARDHIDDEN_YES; } public boolean isCheekPressedAgainstScreen(MotionEvent ev) { if(ev.getSize() > SLIDE_TOUCH_EVENT_SIZE_LIMIT) { return true; } int size = ev.getHistorySize(); for(int i = 0; i < size; i++) { if(ev.getHistoricalSize(i) > SLIDE_TOUCH_EVENT_SIZE_LIMIT) { return true; } } return false; } /** {@inheritDoc} */ public int windowTypeToLayerLw(int type) { if (type >= FIRST_APPLICATION_WINDOW && type <= LAST_APPLICATION_WINDOW) { return APPLICATION_LAYER; } switch (type) { case TYPE_STATUS_BAR: return STATUS_BAR_LAYER; case TYPE_STATUS_BAR_PANEL: return STATUS_BAR_PANEL_LAYER; case TYPE_SEARCH_BAR: return SEARCH_BAR_LAYER; case TYPE_PHONE: return PHONE_LAYER; case TYPE_KEYGUARD: return KEYGUARD_LAYER; case TYPE_KEYGUARD_DIALOG: return KEYGUARD_DIALOG_LAYER; case TYPE_SYSTEM_ALERT: return SYSTEM_ALERT_LAYER; case TYPE_SYSTEM_ERROR: return SYSTEM_ERROR_LAYER; case TYPE_INPUT_METHOD: return INPUT_METHOD_LAYER; case TYPE_INPUT_METHOD_DIALOG: return INPUT_METHOD_DIALOG_LAYER; case TYPE_SYSTEM_OVERLAY: return SYSTEM_OVERLAY_LAYER; case TYPE_PRIORITY_PHONE: return PRIORITY_PHONE_LAYER; case TYPE_TOAST: return TOAST_LAYER; case TYPE_WALLPAPER: return WALLPAPER_LAYER; } Log.e(TAG, "Unknown window type: " + type); return APPLICATION_LAYER; } /** {@inheritDoc} */ public int subWindowTypeToLayerLw(int type) { switch (type) { case TYPE_APPLICATION_PANEL: case TYPE_APPLICATION_ATTACHED_DIALOG: return APPLICATION_PANEL_SUBLAYER; case TYPE_APPLICATION_MEDIA: return APPLICATION_MEDIA_SUBLAYER; case TYPE_APPLICATION_MEDIA_OVERLAY: return APPLICATION_MEDIA_OVERLAY_SUBLAYER; case TYPE_APPLICATION_SUB_PANEL: return APPLICATION_SUB_PANEL_SUBLAYER; } Log.e(TAG, "Unknown sub-window type: " + type); return 0; } public int getMaxWallpaperLayer() { return STATUS_BAR_LAYER; } public boolean doesForceHide(WindowState win, WindowManager.LayoutParams attrs) { return attrs.type == WindowManager.LayoutParams.TYPE_KEYGUARD; } public boolean canBeForceHidden(WindowState win, WindowManager.LayoutParams attrs) { return attrs.type != WindowManager.LayoutParams.TYPE_STATUS_BAR && attrs.type != WindowManager.LayoutParams.TYPE_WALLPAPER; } /** {@inheritDoc} */ public View addStartingWindow(IBinder appToken, String packageName, int theme, CharSequence nonLocalizedLabel, int labelRes, int icon) { if (!SHOW_STARTING_ANIMATIONS) { return null; } if (packageName == null) { return null; } Context context = mContext; boolean setTheme = false; //Log.i(TAG, "addStartingWindow " + packageName + ": nonLocalizedLabel=" // + nonLocalizedLabel + " theme=" + Integer.toHexString(theme)); if (theme != 0 || labelRes != 0) { try { context = context.createPackageContext(packageName, 0); if (theme != 0) { context.setTheme(theme); setTheme = true; } } catch (PackageManager.NameNotFoundException e) { // Ignore } } if (!setTheme) { context.setTheme(com.android.internal.R.style.Theme); } Window win = PolicyManager.makeNewWindow(context); if (win.getWindowStyle().getBoolean( com.android.internal.R.styleable.Window_windowDisablePreview, false)) { return null; } Resources r = context.getResources(); win.setTitle(r.getText(labelRes, nonLocalizedLabel)); win.setType( WindowManager.LayoutParams.TYPE_APPLICATION_STARTING); // Force the window flags: this is a fake window, so it is not really // touchable or focusable by the user. We also add in the ALT_FOCUSABLE_IM // flag because we do know that the next window will take input // focus, so we want to get the IME window up on top of us right away. win.setFlags( WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE| WindowManager.LayoutParams.FLAG_NOT_FOCUSABLE| WindowManager.LayoutParams.FLAG_ALT_FOCUSABLE_IM, WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE| WindowManager.LayoutParams.FLAG_NOT_FOCUSABLE| WindowManager.LayoutParams.FLAG_ALT_FOCUSABLE_IM); win.setLayout(WindowManager.LayoutParams.FILL_PARENT, WindowManager.LayoutParams.FILL_PARENT); final WindowManager.LayoutParams params = win.getAttributes(); params.token = appToken; params.packageName = packageName; params.windowAnimations = win.getWindowStyle().getResourceId( com.android.internal.R.styleable.Window_windowAnimationStyle, 0); params.setTitle("Starting " + packageName); try { WindowManagerImpl wm = (WindowManagerImpl) context.getSystemService(Context.WINDOW_SERVICE); View view = win.getDecorView(); if (win.isFloating()) { // Whoops, there is no way to display an animation/preview // of such a thing! After all that work... let's skip it. // (Note that we must do this here because it is in // getDecorView() where the theme is evaluated... maybe // we should peek the floating attribute from the theme // earlier.) return null; } if (localLOGV) Log.v( TAG, "Adding starting window for " + packageName + " / " + appToken + ": " + (view.getParent() != null ? view : null)); wm.addView(view, params); // Only return the view if it was successfully added to the // window manager... which we can tell by it having a parent. return view.getParent() != null ? view : null; } catch (WindowManagerImpl.BadTokenException e) { // ignore Log.w(TAG, appToken + " already running, starting window not displayed"); } return null; } /** {@inheritDoc} */ public void removeStartingWindow(IBinder appToken, View window) { // RuntimeException e = new RuntimeException(); // Log.i(TAG, "remove " + appToken + " " + window, e); if (localLOGV) Log.v( TAG, "Removing starting window for " + appToken + ": " + window); if (window != null) { WindowManagerImpl wm = (WindowManagerImpl) mContext.getSystemService(Context.WINDOW_SERVICE); wm.removeView(window); } } /** * Preflight adding a window to the system. * * Currently enforces that three window types are singletons: * <ul> * <li>STATUS_BAR_TYPE</li> * <li>KEYGUARD_TYPE</li> * </ul> * * @param win The window to be added * @param attrs Information about the window to be added * * @return If ok, WindowManagerImpl.ADD_OKAY. If too many singletons, WindowManagerImpl.ADD_MULTIPLE_SINGLETON */ public int prepareAddWindowLw(WindowState win, WindowManager.LayoutParams attrs) { switch (attrs.type) { case TYPE_STATUS_BAR: if (mStatusBar != null) { return WindowManagerImpl.ADD_MULTIPLE_SINGLETON; } mStatusBar = win; break; case TYPE_KEYGUARD: if (mKeyguard != null) { return WindowManagerImpl.ADD_MULTIPLE_SINGLETON; } mKeyguard = win; break; } return WindowManagerImpl.ADD_OKAY; } /** {@inheritDoc} */ public void removeWindowLw(WindowState win) { if (mStatusBar == win) { mStatusBar = null; } else if (mKeyguard == win) { mKeyguard = null; } } static final boolean PRINT_ANIM = false; /** {@inheritDoc} */ public int selectAnimationLw(WindowState win, int transit) { if (PRINT_ANIM) Log.i(TAG, "selectAnimation in " + win + ": transit=" + transit); if (transit == TRANSIT_PREVIEW_DONE) { if (win.hasAppShownWindows()) { if (PRINT_ANIM) Log.i(TAG, "**** STARTING EXIT"); return com.android.internal.R.anim.app_starting_exit; } } return 0; } public Animation createForceHideEnterAnimation() { return AnimationUtils.loadAnimation(mContext, com.android.internal.R.anim.lock_screen_behind_enter); } static ITelephony getPhoneInterface() { return ITelephony.Stub.asInterface(ServiceManager.checkService(Context.TELEPHONY_SERVICE)); } static IAudioService getAudioInterface() { return IAudioService.Stub.asInterface(ServiceManager.checkService(Context.AUDIO_SERVICE)); } boolean keyguardOn() { return keyguardIsShowingTq() || inKeyguardRestrictedKeyInputMode(); } private static final int[] WINDOW_TYPES_WHERE_HOME_DOESNT_WORK = { WindowManager.LayoutParams.TYPE_SYSTEM_ALERT, WindowManager.LayoutParams.TYPE_SYSTEM_ERROR, }; /** {@inheritDoc} */ public boolean interceptKeyTi(WindowState win, int code, int metaKeys, boolean down, int repeatCount, int flags) { boolean keyguardOn = keyguardOn(); if (false) { Log.d(TAG, "interceptKeyTi code=" + code + " down=" + down + " repeatCount=" + repeatCount + " keyguardOn=" + keyguardOn + " mHomePressed=" + mHomePressed); } // Clear a pending HOME longpress if the user releases Home // TODO: This could probably be inside the next bit of logic, but that code // turned out to be a bit fragile so I'm doing it here explicitly, for now. if ((code == KeyEvent.KEYCODE_HOME) && !down) { mHandler.removeCallbacks(mHomeLongPress); } // If the HOME button is currently being held, then we do special // chording with it. if (mHomePressed) { // If we have released the home key, and didn't do anything else // while it was pressed, then it is time to go home! if (code == KeyEvent.KEYCODE_HOME) { if (!down) { mHomePressed = false; if ((flags&KeyEvent.FLAG_CANCELED) == 0) { // If an incoming call is ringing, HOME is totally disabled. // (The user is already on the InCallScreen at this point, // and his ONLY options are to answer or reject the call.) boolean incomingRinging = false; try { ITelephony phoneServ = getPhoneInterface(); if (phoneServ != null) { incomingRinging = phoneServ.isRinging(); } else { Log.w(TAG, "Unable to find ITelephony interface"); } } catch (RemoteException ex) { Log.w(TAG, "RemoteException from getPhoneInterface()", ex); } if (incomingRinging) { Log.i(TAG, "Ignoring HOME; there's a ringing incoming call."); } else { launchHomeFromHotKey(); } } else { Log.i(TAG, "Ignoring HOME; event canceled."); } } } return true; } // First we always handle the home key here, so applications // can never break it, although if keyguard is on, we do let // it handle it, because that gives us the correct 5 second // timeout. if (code == KeyEvent.KEYCODE_HOME) { // If a system window has focus, then it doesn't make sense // right now to interact with applications. WindowManager.LayoutParams attrs = win != null ? win.getAttrs() : null; if (attrs != null) { final int type = attrs.type; if (type == WindowManager.LayoutParams.TYPE_KEYGUARD || type == WindowManager.LayoutParams.TYPE_KEYGUARD_DIALOG) { // the "app" is keyguard, so give it the key return false; } final int typeCount = WINDOW_TYPES_WHERE_HOME_DOESNT_WORK.length; for (int i=0; i<typeCount; i++) { if (type == WINDOW_TYPES_WHERE_HOME_DOESNT_WORK[i]) { // don't do anything, but also don't pass it to the app return true; } } } if (down && repeatCount == 0) { if (!keyguardOn) { mHandler.postDelayed(mHomeLongPress, ViewConfiguration.getGlobalActionKeyTimeout()); } mHomePressed = true; } return true; } else if (code == KeyEvent.KEYCODE_MENU) { // Hijack modified menu keys for debugging features final int chordBug = KeyEvent.META_SHIFT_ON; if (down && repeatCount == 0) { if (mEnableShiftMenuBugReports && (metaKeys & chordBug) == chordBug) { Intent intent = new Intent(Intent.ACTION_BUG_REPORT); mContext.sendOrderedBroadcast(intent, null); return true; } else if (SHOW_PROCESSES_ON_ALT_MENU && (metaKeys & KeyEvent.META_ALT_ON) == KeyEvent.META_ALT_ON) { Intent service = new Intent(); service.setClassName(mContext, "com.android.server.LoadAverageService"); ContentResolver res = mContext.getContentResolver(); boolean shown = Settings.System.getInt( res, Settings.System.SHOW_PROCESSES, 0) != 0; if (!shown) { mContext.startService(service); } else { mContext.stopService(service); } Settings.System.putInt( res, Settings.System.SHOW_PROCESSES, shown ? 0 : 1); return true; } } } else if (code == KeyEvent.KEYCODE_NOTIFICATION) { if (down) { // this key doesn't exist on current hardware, but if a device // didn't have a touchscreen, it would want one of these to open // the status bar. IStatusBar sbs = IStatusBar.Stub.asInterface(ServiceManager.getService("statusbar")); if (sbs != null) { try { sbs.toggle(); } catch (RemoteException e) { // we're screwed anyway, since it's in this process throw new RuntimeException(e); } } } return true; } else if (code == KeyEvent.KEYCODE_SEARCH) { if (down) { if (repeatCount == 0) { mSearchKeyPressed = true; } } else { mSearchKeyPressed = false; if (mConsumeSearchKeyUp) { // Consume the up-event mConsumeSearchKeyUp = false; return true; } } } // Shortcuts are invoked through Search+key, so intercept those here if (mSearchKeyPressed) { if (down && repeatCount == 0 && !keyguardOn) { Intent shortcutIntent = mShortcutManager.getIntent(code, metaKeys); if (shortcutIntent != null) { shortcutIntent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK); mContext.startActivity(shortcutIntent); /* * We launched an app, so the up-event of the search key * should be consumed */ mConsumeSearchKeyUp = true; return true; } } } return false; } /** * A home key -> launch home action was detected. Take the appropriate action * given the situation with the keyguard. */ void launchHomeFromHotKey() { if (mKeyguardMediator.isShowingAndNotHidden()) { // don't launch home if keyguard showing } else if (!mHideLockScreen && mKeyguardMediator.isInputRestricted()) { // when in keyguard restricted mode, must first verify unlock // before launching home mKeyguardMediator.verifyUnlock(new OnKeyguardExitResult() { public void onKeyguardExitResult(boolean success) { if (success) { try { ActivityManagerNative.getDefault().stopAppSwitches(); } catch (RemoteException e) { } sendCloseSystemWindows(SYSTEM_DIALOG_REASON_HOME_KEY); startDockOrHome(); } } }); } else { // no keyguard stuff to worry about, just launch home! try { ActivityManagerNative.getDefault().stopAppSwitches(); } catch (RemoteException e) { } sendCloseSystemWindows(SYSTEM_DIALOG_REASON_HOME_KEY); startDockOrHome(); } } public void getContentInsetHintLw(WindowManager.LayoutParams attrs, Rect contentInset) { final int fl = attrs.flags; if ((fl & (FLAG_LAYOUT_IN_SCREEN | FLAG_FULLSCREEN | FLAG_LAYOUT_INSET_DECOR)) == (FLAG_LAYOUT_IN_SCREEN | FLAG_LAYOUT_INSET_DECOR)) { contentInset.set(mCurLeft, mCurTop, mW - mCurRight, mH - mCurBottom); } else { contentInset.setEmpty(); } } /** {@inheritDoc} */ public void beginLayoutLw(int displayWidth, int displayHeight) { mW = displayWidth; mH = displayHeight; mDockLeft = mContentLeft = mCurLeft = 0; mDockTop = mContentTop = mCurTop = 0; mDockRight = mContentRight = mCurRight = displayWidth; mDockBottom = mContentBottom = mCurBottom = displayHeight; mDockLayer = 0x10000000; mTopFullscreenOpaqueWindowState = null; mForceStatusBar = false; mHideLockScreen = false; mDismissKeyguard = false; // decide where the status bar goes ahead of time if (mStatusBar != null) { final Rect pf = mTmpParentFrame; final Rect df = mTmpDisplayFrame; final Rect vf = mTmpVisibleFrame; pf.left = df.left = vf.left = 0; pf.top = df.top = vf.top = 0; pf.right = df.right = vf.right = displayWidth; pf.bottom = df.bottom = vf.bottom = displayHeight; mStatusBar.computeFrameLw(pf, df, vf, vf); if (mStatusBar.isVisibleLw()) { // If the status bar is hidden, we don't want to cause // windows behind it to scroll. mDockTop = mContentTop = mCurTop = mStatusBar.getFrameLw().bottom; if (DEBUG_LAYOUT) Log.v(TAG, "Status bar: mDockBottom=" + mDockBottom + " mContentBottom=" + mContentBottom + " mCurBottom=" + mCurBottom); } } } void setAttachedWindowFrames(WindowState win, int fl, int sim, WindowState attached, boolean insetDecors, Rect pf, Rect df, Rect cf, Rect vf) { if (win.getSurfaceLayer() > mDockLayer && attached.getSurfaceLayer() < mDockLayer) { // Here's a special case: if this attached window is a panel that is // above the dock window, and the window it is attached to is below // the dock window, then the frames we computed for the window it is // attached to can not be used because the dock is effectively part // of the underlying window and the attached window is floating on top // of the whole thing. So, we ignore the attached window and explicitly // compute the frames that would be appropriate without the dock. df.left = cf.left = vf.left = mDockLeft; df.top = cf.top = vf.top = mDockTop; df.right = cf.right = vf.right = mDockRight; df.bottom = cf.bottom = vf.bottom = mDockBottom; } else { // The effective display frame of the attached window depends on // whether it is taking care of insetting its content. If not, // we need to use the parent's content frame so that the entire // window is positioned within that content. Otherwise we can use // the display frame and let the attached window take care of // positioning its content appropriately. if ((sim & SOFT_INPUT_MASK_ADJUST) != SOFT_INPUT_ADJUST_RESIZE) { cf.set(attached.getDisplayFrameLw()); } else { // If the window is resizing, then we want to base the content // frame on our attached content frame to resize... however, // things can be tricky if the attached window is NOT in resize // mode, in which case its content frame will be larger. // Ungh. So to deal with that, make sure the content frame // we end up using is not covering the IM dock. cf.set(attached.getContentFrameLw()); if (attached.getSurfaceLayer() < mDockLayer) { if (cf.left < mContentLeft) cf.left = mContentLeft; if (cf.top < mContentTop) cf.top = mContentTop; if (cf.right > mContentRight) cf.right = mContentRight; if (cf.bottom > mContentBottom) cf.bottom = mContentBottom; } } df.set(insetDecors ? attached.getDisplayFrameLw() : cf); vf.set(attached.getVisibleFrameLw()); } // The LAYOUT_IN_SCREEN flag is used to determine whether the attached // window should be positioned relative to its parent or the entire // screen. pf.set((fl & FLAG_LAYOUT_IN_SCREEN) == 0 ? attached.getFrameLw() : df); } /** {@inheritDoc} */ public void layoutWindowLw(WindowState win, WindowManager.LayoutParams attrs, WindowState attached) { // we've already done the status bar if (win == mStatusBar) { return; } if (false) { if ("com.google.android.youtube".equals(attrs.packageName) && attrs.type == WindowManager.LayoutParams.TYPE_APPLICATION_PANEL) { Log.i(TAG, "GOTCHA!"); } } final int fl = attrs.flags; final int sim = attrs.softInputMode; final Rect pf = mTmpParentFrame; final Rect df = mTmpDisplayFrame; final Rect cf = mTmpContentFrame; final Rect vf = mTmpVisibleFrame; if (attrs.type == TYPE_INPUT_METHOD) { pf.left = df.left = cf.left = vf.left = mDockLeft; pf.top = df.top = cf.top = vf.top = mDockTop; pf.right = df.right = cf.right = vf.right = mDockRight; pf.bottom = df.bottom = cf.bottom = vf.bottom = mDockBottom; // IM dock windows always go to the bottom of the screen. attrs.gravity = Gravity.BOTTOM; mDockLayer = win.getSurfaceLayer(); } else { if ((fl & (FLAG_LAYOUT_IN_SCREEN | FLAG_FULLSCREEN | FLAG_LAYOUT_INSET_DECOR)) == (FLAG_LAYOUT_IN_SCREEN | FLAG_LAYOUT_INSET_DECOR)) { // This is the case for a normal activity window: we want it // to cover all of the screen space, and it can take care of // moving its contents to account for screen decorations that // intrude into that space. if (attached != null) { // If this window is attached to another, our display // frame is the same as the one we are attached to. setAttachedWindowFrames(win, fl, sim, attached, true, pf, df, cf, vf); } else { pf.left = df.left = 0; pf.top = df.top = 0; pf.right = df.right = mW; pf.bottom = df.bottom = mH; if ((sim & SOFT_INPUT_MASK_ADJUST) != SOFT_INPUT_ADJUST_RESIZE) { cf.left = mDockLeft; cf.top = mDockTop; cf.right = mDockRight; cf.bottom = mDockBottom; } else { cf.left = mContentLeft; cf.top = mContentTop; cf.right = mContentRight; cf.bottom = mContentBottom; } vf.left = mCurLeft; vf.top = mCurTop; vf.right = mCurRight; vf.bottom = mCurBottom; } } else if ((fl & FLAG_LAYOUT_IN_SCREEN) != 0) { // A window that has requested to fill the entire screen just // gets everything, period. pf.left = df.left = cf.left = 0; pf.top = df.top = cf.top = 0; pf.right = df.right = cf.right = mW; pf.bottom = df.bottom = cf.bottom = mH; vf.left = mCurLeft; vf.top = mCurTop; vf.right = mCurRight; vf.bottom = mCurBottom; } else if (attached != null) { // A child window should be placed inside of the same visible // frame that its parent had. setAttachedWindowFrames(win, fl, sim, attached, false, pf, df, cf, vf); } else { // Otherwise, a normal window must be placed inside the content // of all screen decorations. pf.left = mContentLeft; pf.top = mContentTop; pf.right = mContentRight; pf.bottom = mContentBottom; if ((sim & SOFT_INPUT_MASK_ADJUST) != SOFT_INPUT_ADJUST_RESIZE) { df.left = cf.left = mDockLeft; df.top = cf.top = mDockTop; df.right = cf.right = mDockRight; df.bottom = cf.bottom = mDockBottom; } else { df.left = cf.left = mContentLeft; df.top = cf.top = mContentTop; df.right = cf.right = mContentRight; df.bottom = cf.bottom = mContentBottom; } vf.left = mCurLeft; vf.top = mCurTop; vf.right = mCurRight; vf.bottom = mCurBottom; } } if ((fl & FLAG_LAYOUT_NO_LIMITS) != 0) { df.left = df.top = cf.left = cf.top = vf.left = vf.top = -10000; df.right = df.bottom = cf.right = cf.bottom = vf.right = vf.bottom = 10000; } if (DEBUG_LAYOUT) Log.v(TAG, "Compute frame " + attrs.getTitle() + ": sim=#" + Integer.toHexString(sim) + " pf=" + pf.toShortString() + " df=" + df.toShortString() + " cf=" + cf.toShortString() + " vf=" + vf.toShortString()); if (false) { if ("com.google.android.youtube".equals(attrs.packageName) && attrs.type == WindowManager.LayoutParams.TYPE_APPLICATION_PANEL) { if (true || localLOGV) Log.v(TAG, "Computing frame of " + win + ": sim=#" + Integer.toHexString(sim) + " pf=" + pf.toShortString() + " df=" + df.toShortString() + " cf=" + cf.toShortString() + " vf=" + vf.toShortString()); } } win.computeFrameLw(pf, df, cf, vf); if (mTopFullscreenOpaqueWindowState == null && win.isVisibleOrBehindKeyguardLw()) { if ((attrs.flags & FLAG_FORCE_NOT_FULLSCREEN) != 0) { mForceStatusBar = true; } if (attrs.type >= FIRST_APPLICATION_WINDOW && attrs.type <= LAST_APPLICATION_WINDOW && win.fillsScreenLw(mW, mH, false, false)) { if (DEBUG_LAYOUT) Log.v(TAG, "Fullscreen window: " + win); mTopFullscreenOpaqueWindowState = win; if ((attrs.flags & FLAG_SHOW_WHEN_LOCKED) != 0) { if (localLOGV) Log.v(TAG, "Setting mHideLockScreen to true by win " + win); mHideLockScreen = true; } } if ((attrs.flags & FLAG_DISMISS_KEYGUARD) != 0) { if (localLOGV) Log.v(TAG, "Setting mDismissKeyguard to true by win " + win); mDismissKeyguard = true; } } // Dock windows carve out the bottom of the screen, so normal windows // can't appear underneath them. if (attrs.type == TYPE_INPUT_METHOD && !win.getGivenInsetsPendingLw()) { int top = win.getContentFrameLw().top; top += win.getGivenContentInsetsLw().top; if (mContentBottom > top) { mContentBottom = top; } top = win.getVisibleFrameLw().top; top += win.getGivenVisibleInsetsLw().top; if (mCurBottom > top) { mCurBottom = top; } if (DEBUG_LAYOUT) Log.v(TAG, "Input method: mDockBottom=" + mDockBottom + " mContentBottom=" + mContentBottom + " mCurBottom=" + mCurBottom); } } /** {@inheritDoc} */ public int finishLayoutLw() { int changes = 0; boolean hiding = false; if (mStatusBar != null) { if (localLOGV) Log.i(TAG, "force=" + mForceStatusBar + " top=" + mTopFullscreenOpaqueWindowState); if (mForceStatusBar) { if (DEBUG_LAYOUT) Log.v(TAG, "Showing status bar"); if (mStatusBar.showLw(true)) changes |= FINISH_LAYOUT_REDO_LAYOUT; } else if (mTopFullscreenOpaqueWindowState != null) { //Log.i(TAG, "frame: " + mTopFullscreenOpaqueWindowState.getFrameLw() // + " shown frame: " + mTopFullscreenOpaqueWindowState.getShownFrameLw()); //Log.i(TAG, "attr: " + mTopFullscreenOpaqueWindowState.getAttrs()); WindowManager.LayoutParams lp = mTopFullscreenOpaqueWindowState.getAttrs(); boolean hideStatusBar = (lp.flags & WindowManager.LayoutParams.FLAG_FULLSCREEN) != 0; if (hideStatusBar) { if (DEBUG_LAYOUT) Log.v(TAG, "Hiding status bar"); if (mStatusBar.hideLw(true)) changes |= FINISH_LAYOUT_REDO_LAYOUT; hiding = true; } else { if (DEBUG_LAYOUT) Log.v(TAG, "Showing status bar"); if (mStatusBar.showLw(true)) changes |= FINISH_LAYOUT_REDO_LAYOUT; } } } // Hide the key guard if a visible window explicitly specifies that it wants to be displayed // when the screen is locked if (mKeyguard != null) { if (localLOGV) Log.v(TAG, "finishLayoutLw::mHideKeyguard="+mHideLockScreen); if (mDismissKeyguard && !mKeyguardMediator.isSecure()) { if (mKeyguard.hideLw(false)) { changes |= FINISH_LAYOUT_REDO_LAYOUT | FINISH_LAYOUT_REDO_CONFIG | FINISH_LAYOUT_REDO_WALLPAPER; } if (mKeyguardMediator.isShowing()) { mHandler.post(new Runnable() { public void run() { mKeyguardMediator.keyguardDone(false, false); } }); } } else if (mHideLockScreen) { if (mKeyguard.hideLw(false)) { mKeyguardMediator.setHidden(true); changes |= FINISH_LAYOUT_REDO_LAYOUT | FINISH_LAYOUT_REDO_CONFIG | FINISH_LAYOUT_REDO_WALLPAPER; } } else { if (mKeyguard.showLw(false)) { mKeyguardMediator.setHidden(false); changes |= FINISH_LAYOUT_REDO_LAYOUT | FINISH_LAYOUT_REDO_CONFIG | FINISH_LAYOUT_REDO_WALLPAPER; } } } if (changes != 0 && hiding) { IStatusBar sbs = IStatusBar.Stub.asInterface(ServiceManager.getService("statusbar")); if (sbs != null) { try { // Make sure the window shade is hidden. sbs.deactivate(); } catch (RemoteException e) { } } } return changes; } /** {@inheritDoc} */ public void beginAnimationLw(int displayWidth, int displayHeight) { } /** {@inheritDoc} */ public void animatingWindowLw(WindowState win, WindowManager.LayoutParams attrs) { } /** {@inheritDoc} */ public boolean finishAnimationLw() { return false; } /** {@inheritDoc} */ public boolean preprocessInputEventTq(RawInputEvent event) { switch (event.type) { case RawInputEvent.EV_SW: if (event.keycode == RawInputEvent.SW_LID) { // lid changed state mLidOpen = event.value == 0; boolean awakeNow = mKeyguardMediator.doLidChangeTq(mLidOpen); updateRotation(Surface.FLAGS_ORIENTATION_ANIMATION_DISABLE); if (awakeNow) { // If the lid opening and we don't have to keep the // keyguard up, then we can turn on the screen // immediately. mKeyguardMediator.pokeWakelock(); } else if (keyguardIsShowingTq()) { if (mLidOpen) { // If we are opening the lid and not hiding the // keyguard, then we need to have it turn on the // screen once it is shown. mKeyguardMediator.onWakeKeyWhenKeyguardShowingTq( KeyEvent.KEYCODE_POWER); } } else { // Light up the keyboard if we are sliding up. if (mLidOpen) { mPowerManager.userActivity(SystemClock.uptimeMillis(), false, LocalPowerManager.BUTTON_EVENT); } else { mPowerManager.userActivity(SystemClock.uptimeMillis(), false, LocalPowerManager.OTHER_EVENT); } } } } return false; } /** {@inheritDoc} */ public boolean isAppSwitchKeyTqTiLwLi(int keycode) { return keycode == KeyEvent.KEYCODE_HOME || keycode == KeyEvent.KEYCODE_ENDCALL; } /** {@inheritDoc} */ public boolean isMovementKeyTi(int keycode) { switch (keycode) { case KeyEvent.KEYCODE_DPAD_UP: case KeyEvent.KEYCODE_DPAD_DOWN: case KeyEvent.KEYCODE_DPAD_LEFT: case KeyEvent.KEYCODE_DPAD_RIGHT: return true; } return false; } /** * @return Whether a telephone call is in progress right now. */ boolean isInCall() { final ITelephony phone = getPhoneInterface(); if (phone == null) { Log.w(TAG, "couldn't get ITelephony reference"); return false; } try { return phone.isOffhook(); } catch (RemoteException e) { Log.w(TAG, "ITelephony.isOffhhook threw RemoteException " + e); return false; } } /** * @return Whether music is being played right now. */ boolean isMusicActive() { final AudioManager am = (AudioManager)mContext.getSystemService(Context.AUDIO_SERVICE); if (am == null) { Log.w(TAG, "isMusicActive: couldn't get AudioManager reference"); return false; } return am.isMusicActive(); } /** * Tell the audio service to adjust the volume appropriate to the event. * @param keycode */ void handleVolumeKey(int stream, int keycode) { final IAudioService audio = getAudioInterface(); if (audio == null) { Log.w(TAG, "handleVolumeKey: couldn't get IAudioService reference"); return; } try { // since audio is playing, we shouldn't have to hold a wake lock // during the call, but we do it as a precaution for the rare possibility // that the music stops right before we call this mBroadcastWakeLock.acquire(); audio.adjustStreamVolume(stream, keycode == KeyEvent.KEYCODE_VOLUME_UP ? AudioManager.ADJUST_RAISE : AudioManager.ADJUST_LOWER, 0); } catch (RemoteException e) { Log.w(TAG, "IAudioService.adjustStreamVolume() threw RemoteException " + e); } finally { mBroadcastWakeLock.release(); } } static boolean isMediaKey(int code) { if (code == KeyEvent.KEYCODE_HEADSETHOOK || code == KeyEvent.KEYCODE_MEDIA_PLAY_PAUSE || code == KeyEvent.KEYCODE_MEDIA_STOP || code == KeyEvent.KEYCODE_MEDIA_NEXT || code == KeyEvent.KEYCODE_MEDIA_PREVIOUS || code == KeyEvent.KEYCODE_MEDIA_PREVIOUS || code == KeyEvent.KEYCODE_MEDIA_FAST_FORWARD) { return true; } return false; } /** {@inheritDoc} */ public int interceptKeyTq(RawInputEvent event, boolean screenIsOn) { int result = ACTION_PASS_TO_USER; final boolean isWakeKey = isWakeKeyTq(event); - final boolean keyguardShowing = keyguardIsShowingTq(); + // If screen is off then we treat the case where the keyguard is open but hidden + // the same as if it were open and in front. + // This will prevent any keys other than the power button from waking the screen + // when the keyguard is hidden by another activity. + final boolean keyguardActive = (screenIsOn ? + mKeyguardMediator.isShowingAndNotHidden() : + mKeyguardMediator.isShowing()); if (false) { Log.d(TAG, "interceptKeyTq event=" + event + " keycode=" + event.keycode - + " screenIsOn=" + screenIsOn + " keyguardShowing=" + keyguardShowing); + + " screenIsOn=" + screenIsOn + " keyguardActive=" + keyguardActive); } - if (keyguardShowing) { + if (keyguardActive) { if (screenIsOn) { // when the screen is on, always give the event to the keyguard result |= ACTION_PASS_TO_USER; } else { // otherwise, don't pass it to the user result &= ~ACTION_PASS_TO_USER; final boolean isKeyDown = (event.type == RawInputEvent.EV_KEY) && (event.value != 0); if (isWakeKey && isKeyDown) { // tell the mediator about a wake key, it may decide to // turn on the screen depending on whether the key is // appropriate. if (!mKeyguardMediator.onWakeKeyWhenKeyguardShowingTq(event.keycode) && (event.keycode == KeyEvent.KEYCODE_VOLUME_DOWN || event.keycode == KeyEvent.KEYCODE_VOLUME_UP)) { if (isInCall()) { // if the keyguard didn't wake the device, we are in call, and // it is a volume key, turn on the screen so that the user // can more easily adjust the in call volume. mKeyguardMediator.pokeWakelock(); } else if (isMusicActive()) { // when keyguard is showing and screen off, we need // to handle the volume key for music here handleVolumeKey(AudioManager.STREAM_MUSIC, event.keycode); } } } } } else if (!screenIsOn) { // If we are in-call with screen off and keyguard is not showing, // then handle the volume key ourselves. // This is necessary because the phone app will disable the keyguard // when the proximity sensor is in use. if (isInCall() && event.type == RawInputEvent.EV_KEY && (event.keycode == KeyEvent.KEYCODE_VOLUME_DOWN || event.keycode == KeyEvent.KEYCODE_VOLUME_UP)) { result &= ~ACTION_PASS_TO_USER; handleVolumeKey(AudioManager.STREAM_VOICE_CALL, event.keycode); } if (isWakeKey) { // a wake key has a sole purpose of waking the device; don't pass // it to the user result |= ACTION_POKE_USER_ACTIVITY; result &= ~ACTION_PASS_TO_USER; } } int type = event.type; int code = event.keycode; boolean down = event.value != 0; if (type == RawInputEvent.EV_KEY) { if (code == KeyEvent.KEYCODE_ENDCALL || code == KeyEvent.KEYCODE_POWER) { if (down) { boolean handled = false; // key repeats are generated by the window manager, and we don't see them // here, so unless the driver is doing something it shouldn't be, we know // this is the real press event. ITelephony phoneServ = getPhoneInterface(); if (phoneServ != null) { try { if (code == KeyEvent.KEYCODE_ENDCALL) { handled = phoneServ.endCall(); } else if (code == KeyEvent.KEYCODE_POWER && phoneServ.isRinging()) { // Pressing power during incoming call should silence the ringer phoneServ.silenceRinger(); handled = true; } } catch (RemoteException ex) { Log.w(TAG, "ITelephony threw RemoteException" + ex); } } else { Log.w(TAG, "!!! Unable to find ITelephony interface !!!"); } // power button should turn off screen in addition to hanging up the phone if ((handled && code != KeyEvent.KEYCODE_POWER) || !screenIsOn) { mShouldTurnOffOnKeyUp = false; } else { // only try to turn off the screen if we didn't already hang up mShouldTurnOffOnKeyUp = true; mHandler.postDelayed(mPowerLongPress, ViewConfiguration.getGlobalActionKeyTimeout()); result &= ~ACTION_PASS_TO_USER; } } else { mHandler.removeCallbacks(mPowerLongPress); if (mShouldTurnOffOnKeyUp) { mShouldTurnOffOnKeyUp = false; boolean gohome, sleeps; if (code == KeyEvent.KEYCODE_ENDCALL) { gohome = (mEndcallBehavior & ENDCALL_HOME) != 0; sleeps = (mEndcallBehavior & ENDCALL_SLEEPS) != 0; } else { gohome = false; sleeps = true; } - if (keyguardShowing + if (keyguardActive || (sleeps && !gohome) || (gohome && !goHome() && sleeps)) { // they must already be on the keyguad or home screen, // go to sleep instead Log.d(TAG, "I'm tired mEndcallBehavior=0x" + Integer.toHexString(mEndcallBehavior)); result &= ~ACTION_POKE_USER_ACTIVITY; result |= ACTION_GO_TO_SLEEP; } result &= ~ACTION_PASS_TO_USER; } } } else if (isMediaKey(code)) { // This key needs to be handled even if the screen is off. // If others need to be handled while it's off, this is a reasonable // pattern to follow. if ((result & ACTION_PASS_TO_USER) == 0) { // Only do this if we would otherwise not pass it to the user. In that // case, the PhoneWindow class will do the same thing, except it will // only do it if the showing app doesn't process the key on its own. KeyEvent keyEvent = new KeyEvent(event.when, event.when, down ? KeyEvent.ACTION_DOWN : KeyEvent.ACTION_UP, code, 0); mBroadcastWakeLock.acquire(); mHandler.post(new PassHeadsetKey(keyEvent)); } } else if (code == KeyEvent.KEYCODE_CALL) { // If an incoming call is ringing, answer it! // (We handle this key here, rather than in the InCallScreen, to make // sure we'll respond to the key even if the InCallScreen hasn't come to // the foreground yet.) // We answer the call on the DOWN event, to agree with // the "fallback" behavior in the InCallScreen. if (down) { try { ITelephony phoneServ = getPhoneInterface(); if (phoneServ != null) { if (phoneServ.isRinging()) { Log.i(TAG, "interceptKeyTq:" + " CALL key-down while ringing: Answer the call!"); phoneServ.answerRingingCall(); // And *don't* pass this key thru to the current activity // (which is presumably the InCallScreen.) result &= ~ACTION_PASS_TO_USER; } } else { Log.w(TAG, "CALL button: Unable to find ITelephony interface"); } } catch (RemoteException ex) { Log.w(TAG, "CALL button: RemoteException from getPhoneInterface()", ex); } } } else if ((code == KeyEvent.KEYCODE_VOLUME_UP) || (code == KeyEvent.KEYCODE_VOLUME_DOWN)) { // If an incoming call is ringing, either VOLUME key means // "silence ringer". We handle these keys here, rather than // in the InCallScreen, to make sure we'll respond to them // even if the InCallScreen hasn't come to the foreground yet. // Look for the DOWN event here, to agree with the "fallback" // behavior in the InCallScreen. if (down) { try { ITelephony phoneServ = getPhoneInterface(); if (phoneServ != null) { if (phoneServ.isRinging()) { Log.i(TAG, "interceptKeyTq:" + " VOLUME key-down while ringing: Silence ringer!"); // Silence the ringer. (It's safe to call this // even if the ringer has already been silenced.) phoneServ.silenceRinger(); // And *don't* pass this key thru to the current activity // (which is probably the InCallScreen.) result &= ~ACTION_PASS_TO_USER; } } else { Log.w(TAG, "VOLUME button: Unable to find ITelephony interface"); } } catch (RemoteException ex) { Log.w(TAG, "VOLUME button: RemoteException from getPhoneInterface()", ex); } } } } return result; } class PassHeadsetKey implements Runnable { KeyEvent mKeyEvent; PassHeadsetKey(KeyEvent keyEvent) { mKeyEvent = keyEvent; } public void run() { if (ActivityManagerNative.isSystemReady()) { Intent intent = new Intent(Intent.ACTION_MEDIA_BUTTON, null); intent.putExtra(Intent.EXTRA_KEY_EVENT, mKeyEvent); mContext.sendOrderedBroadcast(intent, null, mBroadcastDone, mHandler, Activity.RESULT_OK, null, null); } } } BroadcastReceiver mBroadcastDone = new BroadcastReceiver() { public void onReceive(Context context, Intent intent) { mBroadcastWakeLock.release(); } }; BroadcastReceiver mBatteryReceiver = new BroadcastReceiver() { public void onReceive(Context context, Intent intent) { updatePlugged(intent); updateDockKeepingScreenOn(); } }; BroadcastReceiver mDockReceiver = new BroadcastReceiver() { public void onReceive(Context context, Intent intent) { mDockState = intent.getIntExtra(Intent.EXTRA_DOCK_STATE, Intent.EXTRA_DOCK_STATE_UNDOCKED); boolean watchBattery = mDockState != Intent.EXTRA_DOCK_STATE_UNDOCKED; if (watchBattery != mRegisteredBatteryReceiver) { mRegisteredBatteryReceiver = watchBattery; if (watchBattery) { updatePlugged(mContext.registerReceiver(mBatteryReceiver, mBatteryStatusFilter)); } else { mContext.unregisterReceiver(mBatteryReceiver); } } updateRotation(Surface.FLAGS_ORIENTATION_ANIMATION_DISABLE); updateDockKeepingScreenOn(); updateOrientationListenerLp(); } }; /** {@inheritDoc} */ public boolean isWakeRelMovementTq(int device, int classes, RawInputEvent event) { // if it's tagged with one of the wake bits, it wakes up the device return ((event.flags & (FLAG_WAKE | FLAG_WAKE_DROPPED)) != 0); } /** {@inheritDoc} */ public boolean isWakeAbsMovementTq(int device, int classes, RawInputEvent event) { // if it's tagged with one of the wake bits, it wakes up the device return ((event.flags & (FLAG_WAKE | FLAG_WAKE_DROPPED)) != 0); } /** * Given the current state of the world, should this key wake up the device? */ protected boolean isWakeKeyTq(RawInputEvent event) { // There are not key maps for trackball devices, but we'd still // like to have pressing it wake the device up, so force it here. int keycode = event.keycode; int flags = event.flags; if (keycode == RawInputEvent.BTN_MOUSE) { flags |= WindowManagerPolicy.FLAG_WAKE; } return (flags & (WindowManagerPolicy.FLAG_WAKE | WindowManagerPolicy.FLAG_WAKE_DROPPED)) != 0; } /** {@inheritDoc} */ public void screenTurnedOff(int why) { EventLog.writeEvent(70000, 0); mKeyguardMediator.onScreenTurnedOff(why); synchronized (mLock) { mScreenOn = false; updateOrientationListenerLp(); } } /** {@inheritDoc} */ public void screenTurnedOn() { EventLog.writeEvent(70000, 1); mKeyguardMediator.onScreenTurnedOn(); synchronized (mLock) { mScreenOn = true; updateOrientationListenerLp(); } } /** {@inheritDoc} */ public void enableKeyguard(boolean enabled) { mKeyguardMediator.setKeyguardEnabled(enabled); } /** {@inheritDoc} */ public void exitKeyguardSecurely(OnKeyguardExitResult callback) { mKeyguardMediator.verifyUnlock(callback); } private boolean keyguardIsShowingTq() { return mKeyguardMediator.isShowingAndNotHidden(); } /** {@inheritDoc} */ public boolean inKeyguardRestrictedKeyInputMode() { return mKeyguardMediator.isInputRestricted(); } void sendCloseSystemWindows() { sendCloseSystemWindows(mContext, null); } void sendCloseSystemWindows(String reason) { sendCloseSystemWindows(mContext, reason); } static void sendCloseSystemWindows(Context context, String reason) { if (ActivityManagerNative.isSystemReady()) { try { ActivityManagerNative.getDefault().closeSystemDialogs(reason); } catch (RemoteException e) { } } } public int rotationForOrientationLw(int orientation, int lastRotation, boolean displayEnabled) { if (mPortraitRotation < 0) { // Initialize the rotation angles for each orientation once. Display d = ((WindowManager)mContext.getSystemService(Context.WINDOW_SERVICE)) .getDefaultDisplay(); if (d.getWidth() > d.getHeight()) { mPortraitRotation = Surface.ROTATION_90; mLandscapeRotation = Surface.ROTATION_0; } else { mPortraitRotation = Surface.ROTATION_0; mLandscapeRotation = Surface.ROTATION_90; } } synchronized (mLock) { switch (orientation) { case ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE: //always return landscape if orientation set to landscape return mLandscapeRotation; case ActivityInfo.SCREEN_ORIENTATION_PORTRAIT: //always return portrait if orientation set to portrait return mPortraitRotation; } // case for nosensor meaning ignore sensor and consider only lid // or orientation sensor disabled //or case.unspecified if (mLidOpen) { return mLidOpenRotation; } else if (mDockState == Intent.EXTRA_DOCK_STATE_CAR && mCarDockRotation >= 0) { return mCarDockRotation; } else if (mDockState == Intent.EXTRA_DOCK_STATE_DESK && mDeskDockRotation >= 0) { return mDeskDockRotation; } else { if (useSensorForOrientationLp(orientation)) { // If the user has enabled auto rotation by default, do it. int curRotation = mOrientationListener.getCurrentRotation(); return curRotation >= 0 ? curRotation : lastRotation; } return Surface.ROTATION_0; } } } public boolean detectSafeMode() { try { int menuState = mWindowManager.getKeycodeState(KeyEvent.KEYCODE_MENU); int sState = mWindowManager.getKeycodeState(KeyEvent.KEYCODE_S); int dpadState = mWindowManager.getDPadKeycodeState(KeyEvent.KEYCODE_DPAD_CENTER); int trackballState = mWindowManager.getTrackballScancodeState(RawInputEvent.BTN_MOUSE); mSafeMode = menuState > 0 || sState > 0 || dpadState > 0 || trackballState > 0; performHapticFeedbackLw(null, mSafeMode ? HapticFeedbackConstants.SAFE_MODE_ENABLED : HapticFeedbackConstants.SAFE_MODE_DISABLED, true); if (mSafeMode) { Log.i(TAG, "SAFE MODE ENABLED (menu=" + menuState + " s=" + sState + " dpad=" + dpadState + " trackball=" + trackballState + ")"); } else { Log.i(TAG, "SAFE MODE not enabled"); } return mSafeMode; } catch (RemoteException e) { // Doom! (it's also local) throw new RuntimeException("window manager dead"); } } static long[] getLongIntArray(Resources r, int resid) { int[] ar = r.getIntArray(resid); if (ar == null) { return null; } long[] out = new long[ar.length]; for (int i=0; i<ar.length; i++) { out[i] = ar[i]; } return out; } /** {@inheritDoc} */ public void systemReady() { // tell the keyguard mKeyguardMediator.onSystemReady(); android.os.SystemProperties.set("dev.bootcomplete", "1"); synchronized (mLock) { updateOrientationListenerLp(); } } /** {@inheritDoc} */ public void enableScreenAfterBoot() { readLidState(); updateRotation(Surface.FLAGS_ORIENTATION_ANIMATION_DISABLE); } void updateDockKeepingScreenOn() { if (mPlugged != 0) { if (localLOGV) Log.v(TAG, "Update: mDockState=" + mDockState + " mPlugged=" + mPlugged + " mCarDockKeepsScreenOn" + mCarDockKeepsScreenOn + " mDeskDockKeepsScreenOn" + mDeskDockKeepsScreenOn); if (mDockState == Intent.EXTRA_DOCK_STATE_CAR && (mPlugged&mCarDockKeepsScreenOn) != 0) { if (!mDockWakeLock.isHeld()) { mDockWakeLock.acquire(); } return; } else if (mDockState == Intent.EXTRA_DOCK_STATE_DESK && (mPlugged&mDeskDockKeepsScreenOn) != 0) { if (!mDockWakeLock.isHeld()) { mDockWakeLock.acquire(); } return; } } if (mDockWakeLock.isHeld()) { mDockWakeLock.release(); } } void updateRotation(int animFlags) { mPowerManager.setKeyboardVisibility(mLidOpen); int rotation = Surface.ROTATION_0; if (mLidOpen) { rotation = mLidOpenRotation; } else if (mDockState == Intent.EXTRA_DOCK_STATE_CAR && mCarDockRotation >= 0) { rotation = mCarDockRotation; } else if (mDockState == Intent.EXTRA_DOCK_STATE_DESK && mDeskDockRotation >= 0) { rotation = mDeskDockRotation; } //if lid is closed orientation will be portrait try { //set orientation on WindowManager mWindowManager.setRotation(rotation, true, mFancyRotationAnimation | animFlags); } catch (RemoteException e) { // Ignore } } /** * Return an Intent to launch the currently active dock as home. Returns * null if the standard home should be launched. * @return */ Intent createHomeDockIntent() { if (mDockState == Intent.EXTRA_DOCK_STATE_UNDOCKED) { return null; } Intent intent; if (mDockState == Intent.EXTRA_DOCK_STATE_CAR) { intent = mCarDockIntent; } else if (mDockState == Intent.EXTRA_DOCK_STATE_DESK) { intent = mDeskDockIntent; } else { Log.w(TAG, "Unknown dock state: " + mDockState); return null; } ActivityInfo ai = intent.resolveActivityInfo( mContext.getPackageManager(), PackageManager.GET_META_DATA); if (ai == null) { return null; } if (ai.metaData != null && ai.metaData.getBoolean(Intent.METADATA_DOCK_HOME)) { intent = new Intent(intent); intent.setClassName(ai.packageName, ai.name); return intent; } return null; } void startDockOrHome() { Intent dock = createHomeDockIntent(); if (dock != null) { try { mContext.startActivity(dock); return; } catch (ActivityNotFoundException e) { } } mContext.startActivity(mHomeIntent); } /** * goes to the home screen * @return whether it did anything */ boolean goHome() { if (false) { // This code always brings home to the front. try { ActivityManagerNative.getDefault().stopAppSwitches(); } catch (RemoteException e) { } sendCloseSystemWindows(); startDockOrHome(); } else { // This code brings home to the front or, if it is already // at the front, puts the device to sleep. try { ActivityManagerNative.getDefault().stopAppSwitches(); sendCloseSystemWindows(); Intent dock = createHomeDockIntent(); if (dock != null) { int result = ActivityManagerNative.getDefault() .startActivity(null, dock, dock.resolveTypeIfNeeded(mContext.getContentResolver()), null, 0, null, null, 0, true /* onlyIfNeeded*/, false); if (result == IActivityManager.START_RETURN_INTENT_TO_CALLER) { return false; } } int result = ActivityManagerNative.getDefault() .startActivity(null, mHomeIntent, mHomeIntent.resolveTypeIfNeeded(mContext.getContentResolver()), null, 0, null, null, 0, true /* onlyIfNeeded*/, false); if (result == IActivityManager.START_RETURN_INTENT_TO_CALLER) { return false; } } catch (RemoteException ex) { // bummer, the activity manager, which is in this process, is dead } } return true; } public void setCurrentOrientationLw(int newOrientation) { synchronized (mLock) { if (newOrientation != mCurrentAppOrientation) { mCurrentAppOrientation = newOrientation; updateOrientationListenerLp(); } } } public boolean performHapticFeedbackLw(WindowState win, int effectId, boolean always) { final boolean hapticsDisabled = Settings.System.getInt(mContext.getContentResolver(), Settings.System.HAPTIC_FEEDBACK_ENABLED, 0) == 0; if (!always && (hapticsDisabled || mKeyguardMediator.isShowingAndNotHidden())) { return false; } switch (effectId) { case HapticFeedbackConstants.LONG_PRESS: mVibrator.vibrate(mLongPressVibePattern, -1); return true; case HapticFeedbackConstants.VIRTUAL_KEY: mVibrator.vibrate(mVirtualKeyVibePattern, -1); return true; case HapticFeedbackConstants.SAFE_MODE_DISABLED: mVibrator.vibrate(mSafeModeDisabledVibePattern, -1); return true; case HapticFeedbackConstants.SAFE_MODE_ENABLED: mVibrator.vibrate(mSafeModeEnabledVibePattern, -1); return true; } return false; } public void keyFeedbackFromInput(KeyEvent event) { if (event.getAction() == KeyEvent.ACTION_DOWN && (event.getFlags()&KeyEvent.FLAG_VIRTUAL_HARD_KEY) != 0) { performHapticFeedbackLw(null, HapticFeedbackConstants.VIRTUAL_KEY, false); } } public void screenOnStoppedLw() { if (!mKeyguardMediator.isShowingAndNotHidden() && mPowerManager.isScreenOn()) { long curTime = SystemClock.uptimeMillis(); mPowerManager.userActivity(curTime, false, LocalPowerManager.OTHER_EVENT); } } public boolean allowKeyRepeat() { // disable key repeat when screen is off return mScreenOn; } }
false
true
public int interceptKeyTq(RawInputEvent event, boolean screenIsOn) { int result = ACTION_PASS_TO_USER; final boolean isWakeKey = isWakeKeyTq(event); final boolean keyguardShowing = keyguardIsShowingTq(); if (false) { Log.d(TAG, "interceptKeyTq event=" + event + " keycode=" + event.keycode + " screenIsOn=" + screenIsOn + " keyguardShowing=" + keyguardShowing); } if (keyguardShowing) { if (screenIsOn) { // when the screen is on, always give the event to the keyguard result |= ACTION_PASS_TO_USER; } else { // otherwise, don't pass it to the user result &= ~ACTION_PASS_TO_USER; final boolean isKeyDown = (event.type == RawInputEvent.EV_KEY) && (event.value != 0); if (isWakeKey && isKeyDown) { // tell the mediator about a wake key, it may decide to // turn on the screen depending on whether the key is // appropriate. if (!mKeyguardMediator.onWakeKeyWhenKeyguardShowingTq(event.keycode) && (event.keycode == KeyEvent.KEYCODE_VOLUME_DOWN || event.keycode == KeyEvent.KEYCODE_VOLUME_UP)) { if (isInCall()) { // if the keyguard didn't wake the device, we are in call, and // it is a volume key, turn on the screen so that the user // can more easily adjust the in call volume. mKeyguardMediator.pokeWakelock(); } else if (isMusicActive()) { // when keyguard is showing and screen off, we need // to handle the volume key for music here handleVolumeKey(AudioManager.STREAM_MUSIC, event.keycode); } } } } } else if (!screenIsOn) { // If we are in-call with screen off and keyguard is not showing, // then handle the volume key ourselves. // This is necessary because the phone app will disable the keyguard // when the proximity sensor is in use. if (isInCall() && event.type == RawInputEvent.EV_KEY && (event.keycode == KeyEvent.KEYCODE_VOLUME_DOWN || event.keycode == KeyEvent.KEYCODE_VOLUME_UP)) { result &= ~ACTION_PASS_TO_USER; handleVolumeKey(AudioManager.STREAM_VOICE_CALL, event.keycode); } if (isWakeKey) { // a wake key has a sole purpose of waking the device; don't pass // it to the user result |= ACTION_POKE_USER_ACTIVITY; result &= ~ACTION_PASS_TO_USER; } } int type = event.type; int code = event.keycode; boolean down = event.value != 0; if (type == RawInputEvent.EV_KEY) { if (code == KeyEvent.KEYCODE_ENDCALL || code == KeyEvent.KEYCODE_POWER) { if (down) { boolean handled = false; // key repeats are generated by the window manager, and we don't see them // here, so unless the driver is doing something it shouldn't be, we know // this is the real press event. ITelephony phoneServ = getPhoneInterface(); if (phoneServ != null) { try { if (code == KeyEvent.KEYCODE_ENDCALL) { handled = phoneServ.endCall(); } else if (code == KeyEvent.KEYCODE_POWER && phoneServ.isRinging()) { // Pressing power during incoming call should silence the ringer phoneServ.silenceRinger(); handled = true; } } catch (RemoteException ex) { Log.w(TAG, "ITelephony threw RemoteException" + ex); } } else { Log.w(TAG, "!!! Unable to find ITelephony interface !!!"); } // power button should turn off screen in addition to hanging up the phone if ((handled && code != KeyEvent.KEYCODE_POWER) || !screenIsOn) { mShouldTurnOffOnKeyUp = false; } else { // only try to turn off the screen if we didn't already hang up mShouldTurnOffOnKeyUp = true; mHandler.postDelayed(mPowerLongPress, ViewConfiguration.getGlobalActionKeyTimeout()); result &= ~ACTION_PASS_TO_USER; } } else { mHandler.removeCallbacks(mPowerLongPress); if (mShouldTurnOffOnKeyUp) { mShouldTurnOffOnKeyUp = false; boolean gohome, sleeps; if (code == KeyEvent.KEYCODE_ENDCALL) { gohome = (mEndcallBehavior & ENDCALL_HOME) != 0; sleeps = (mEndcallBehavior & ENDCALL_SLEEPS) != 0; } else { gohome = false; sleeps = true; } if (keyguardShowing || (sleeps && !gohome) || (gohome && !goHome() && sleeps)) { // they must already be on the keyguad or home screen, // go to sleep instead Log.d(TAG, "I'm tired mEndcallBehavior=0x" + Integer.toHexString(mEndcallBehavior)); result &= ~ACTION_POKE_USER_ACTIVITY; result |= ACTION_GO_TO_SLEEP; } result &= ~ACTION_PASS_TO_USER; } } } else if (isMediaKey(code)) { // This key needs to be handled even if the screen is off. // If others need to be handled while it's off, this is a reasonable // pattern to follow. if ((result & ACTION_PASS_TO_USER) == 0) { // Only do this if we would otherwise not pass it to the user. In that // case, the PhoneWindow class will do the same thing, except it will // only do it if the showing app doesn't process the key on its own. KeyEvent keyEvent = new KeyEvent(event.when, event.when, down ? KeyEvent.ACTION_DOWN : KeyEvent.ACTION_UP, code, 0); mBroadcastWakeLock.acquire(); mHandler.post(new PassHeadsetKey(keyEvent)); } } else if (code == KeyEvent.KEYCODE_CALL) { // If an incoming call is ringing, answer it! // (We handle this key here, rather than in the InCallScreen, to make // sure we'll respond to the key even if the InCallScreen hasn't come to // the foreground yet.) // We answer the call on the DOWN event, to agree with // the "fallback" behavior in the InCallScreen. if (down) { try { ITelephony phoneServ = getPhoneInterface(); if (phoneServ != null) { if (phoneServ.isRinging()) { Log.i(TAG, "interceptKeyTq:" + " CALL key-down while ringing: Answer the call!"); phoneServ.answerRingingCall(); // And *don't* pass this key thru to the current activity // (which is presumably the InCallScreen.) result &= ~ACTION_PASS_TO_USER; } } else { Log.w(TAG, "CALL button: Unable to find ITelephony interface"); } } catch (RemoteException ex) { Log.w(TAG, "CALL button: RemoteException from getPhoneInterface()", ex); } } } else if ((code == KeyEvent.KEYCODE_VOLUME_UP) || (code == KeyEvent.KEYCODE_VOLUME_DOWN)) { // If an incoming call is ringing, either VOLUME key means // "silence ringer". We handle these keys here, rather than // in the InCallScreen, to make sure we'll respond to them // even if the InCallScreen hasn't come to the foreground yet. // Look for the DOWN event here, to agree with the "fallback" // behavior in the InCallScreen. if (down) { try { ITelephony phoneServ = getPhoneInterface(); if (phoneServ != null) { if (phoneServ.isRinging()) { Log.i(TAG, "interceptKeyTq:" + " VOLUME key-down while ringing: Silence ringer!"); // Silence the ringer. (It's safe to call this // even if the ringer has already been silenced.) phoneServ.silenceRinger(); // And *don't* pass this key thru to the current activity // (which is probably the InCallScreen.) result &= ~ACTION_PASS_TO_USER; } } else { Log.w(TAG, "VOLUME button: Unable to find ITelephony interface"); } } catch (RemoteException ex) { Log.w(TAG, "VOLUME button: RemoteException from getPhoneInterface()", ex); } } } } return result; }
public int interceptKeyTq(RawInputEvent event, boolean screenIsOn) { int result = ACTION_PASS_TO_USER; final boolean isWakeKey = isWakeKeyTq(event); // If screen is off then we treat the case where the keyguard is open but hidden // the same as if it were open and in front. // This will prevent any keys other than the power button from waking the screen // when the keyguard is hidden by another activity. final boolean keyguardActive = (screenIsOn ? mKeyguardMediator.isShowingAndNotHidden() : mKeyguardMediator.isShowing()); if (false) { Log.d(TAG, "interceptKeyTq event=" + event + " keycode=" + event.keycode + " screenIsOn=" + screenIsOn + " keyguardActive=" + keyguardActive); } if (keyguardActive) { if (screenIsOn) { // when the screen is on, always give the event to the keyguard result |= ACTION_PASS_TO_USER; } else { // otherwise, don't pass it to the user result &= ~ACTION_PASS_TO_USER; final boolean isKeyDown = (event.type == RawInputEvent.EV_KEY) && (event.value != 0); if (isWakeKey && isKeyDown) { // tell the mediator about a wake key, it may decide to // turn on the screen depending on whether the key is // appropriate. if (!mKeyguardMediator.onWakeKeyWhenKeyguardShowingTq(event.keycode) && (event.keycode == KeyEvent.KEYCODE_VOLUME_DOWN || event.keycode == KeyEvent.KEYCODE_VOLUME_UP)) { if (isInCall()) { // if the keyguard didn't wake the device, we are in call, and // it is a volume key, turn on the screen so that the user // can more easily adjust the in call volume. mKeyguardMediator.pokeWakelock(); } else if (isMusicActive()) { // when keyguard is showing and screen off, we need // to handle the volume key for music here handleVolumeKey(AudioManager.STREAM_MUSIC, event.keycode); } } } } } else if (!screenIsOn) { // If we are in-call with screen off and keyguard is not showing, // then handle the volume key ourselves. // This is necessary because the phone app will disable the keyguard // when the proximity sensor is in use. if (isInCall() && event.type == RawInputEvent.EV_KEY && (event.keycode == KeyEvent.KEYCODE_VOLUME_DOWN || event.keycode == KeyEvent.KEYCODE_VOLUME_UP)) { result &= ~ACTION_PASS_TO_USER; handleVolumeKey(AudioManager.STREAM_VOICE_CALL, event.keycode); } if (isWakeKey) { // a wake key has a sole purpose of waking the device; don't pass // it to the user result |= ACTION_POKE_USER_ACTIVITY; result &= ~ACTION_PASS_TO_USER; } } int type = event.type; int code = event.keycode; boolean down = event.value != 0; if (type == RawInputEvent.EV_KEY) { if (code == KeyEvent.KEYCODE_ENDCALL || code == KeyEvent.KEYCODE_POWER) { if (down) { boolean handled = false; // key repeats are generated by the window manager, and we don't see them // here, so unless the driver is doing something it shouldn't be, we know // this is the real press event. ITelephony phoneServ = getPhoneInterface(); if (phoneServ != null) { try { if (code == KeyEvent.KEYCODE_ENDCALL) { handled = phoneServ.endCall(); } else if (code == KeyEvent.KEYCODE_POWER && phoneServ.isRinging()) { // Pressing power during incoming call should silence the ringer phoneServ.silenceRinger(); handled = true; } } catch (RemoteException ex) { Log.w(TAG, "ITelephony threw RemoteException" + ex); } } else { Log.w(TAG, "!!! Unable to find ITelephony interface !!!"); } // power button should turn off screen in addition to hanging up the phone if ((handled && code != KeyEvent.KEYCODE_POWER) || !screenIsOn) { mShouldTurnOffOnKeyUp = false; } else { // only try to turn off the screen if we didn't already hang up mShouldTurnOffOnKeyUp = true; mHandler.postDelayed(mPowerLongPress, ViewConfiguration.getGlobalActionKeyTimeout()); result &= ~ACTION_PASS_TO_USER; } } else { mHandler.removeCallbacks(mPowerLongPress); if (mShouldTurnOffOnKeyUp) { mShouldTurnOffOnKeyUp = false; boolean gohome, sleeps; if (code == KeyEvent.KEYCODE_ENDCALL) { gohome = (mEndcallBehavior & ENDCALL_HOME) != 0; sleeps = (mEndcallBehavior & ENDCALL_SLEEPS) != 0; } else { gohome = false; sleeps = true; } if (keyguardActive || (sleeps && !gohome) || (gohome && !goHome() && sleeps)) { // they must already be on the keyguad or home screen, // go to sleep instead Log.d(TAG, "I'm tired mEndcallBehavior=0x" + Integer.toHexString(mEndcallBehavior)); result &= ~ACTION_POKE_USER_ACTIVITY; result |= ACTION_GO_TO_SLEEP; } result &= ~ACTION_PASS_TO_USER; } } } else if (isMediaKey(code)) { // This key needs to be handled even if the screen is off. // If others need to be handled while it's off, this is a reasonable // pattern to follow. if ((result & ACTION_PASS_TO_USER) == 0) { // Only do this if we would otherwise not pass it to the user. In that // case, the PhoneWindow class will do the same thing, except it will // only do it if the showing app doesn't process the key on its own. KeyEvent keyEvent = new KeyEvent(event.when, event.when, down ? KeyEvent.ACTION_DOWN : KeyEvent.ACTION_UP, code, 0); mBroadcastWakeLock.acquire(); mHandler.post(new PassHeadsetKey(keyEvent)); } } else if (code == KeyEvent.KEYCODE_CALL) { // If an incoming call is ringing, answer it! // (We handle this key here, rather than in the InCallScreen, to make // sure we'll respond to the key even if the InCallScreen hasn't come to // the foreground yet.) // We answer the call on the DOWN event, to agree with // the "fallback" behavior in the InCallScreen. if (down) { try { ITelephony phoneServ = getPhoneInterface(); if (phoneServ != null) { if (phoneServ.isRinging()) { Log.i(TAG, "interceptKeyTq:" + " CALL key-down while ringing: Answer the call!"); phoneServ.answerRingingCall(); // And *don't* pass this key thru to the current activity // (which is presumably the InCallScreen.) result &= ~ACTION_PASS_TO_USER; } } else { Log.w(TAG, "CALL button: Unable to find ITelephony interface"); } } catch (RemoteException ex) { Log.w(TAG, "CALL button: RemoteException from getPhoneInterface()", ex); } } } else if ((code == KeyEvent.KEYCODE_VOLUME_UP) || (code == KeyEvent.KEYCODE_VOLUME_DOWN)) { // If an incoming call is ringing, either VOLUME key means // "silence ringer". We handle these keys here, rather than // in the InCallScreen, to make sure we'll respond to them // even if the InCallScreen hasn't come to the foreground yet. // Look for the DOWN event here, to agree with the "fallback" // behavior in the InCallScreen. if (down) { try { ITelephony phoneServ = getPhoneInterface(); if (phoneServ != null) { if (phoneServ.isRinging()) { Log.i(TAG, "interceptKeyTq:" + " VOLUME key-down while ringing: Silence ringer!"); // Silence the ringer. (It's safe to call this // even if the ringer has already been silenced.) phoneServ.silenceRinger(); // And *don't* pass this key thru to the current activity // (which is probably the InCallScreen.) result &= ~ACTION_PASS_TO_USER; } } else { Log.w(TAG, "VOLUME button: Unable to find ITelephony interface"); } } catch (RemoteException ex) { Log.w(TAG, "VOLUME button: RemoteException from getPhoneInterface()", ex); } } } } return result; }
diff --git a/G12/src/MonsterMash/LoginPage.java b/G12/src/MonsterMash/LoginPage.java index 2f5fdda..29f4f00 100644 --- a/G12/src/MonsterMash/LoginPage.java +++ b/G12/src/MonsterMash/LoginPage.java @@ -1,93 +1,93 @@ /* * To change this template, choose Tools | Templates * and open the template in the editor. */ import data.Player; import database.PersistenceManager; import java.io.IOException; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; /** * * @author sjk4 */ public class LoginPage extends HttpServlet { /** * Encode password using MD5. * @param md5 password * @return encoded password */ public String MD5(String md5) { try { java.security.MessageDigest md = java.security.MessageDigest.getInstance("MD5"); byte[] array = md.digest(md5.getBytes()); StringBuffer sb = new StringBuffer(); for (int i = 0; i < array.length; ++i) { sb.append(Integer.toHexString((array[i] & 0xFF) | 0x100).substring(1,3)); } return sb.toString(); } catch (java.security.NoSuchAlgorithmException e) { } return null; } /** * Handles the HTTP * <code>GET</code> method. * * @param request servlet request * @param response servlet response * @throws ServletException if a servlet-specific error occurs * @throws IOException if an I/O error occurs */ @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { HttpSession session = request.getSession(true); if(session != null && session.getAttribute("user") != null){ // If user logged, redirect to main page response.sendRedirect("main"); }else{ // If not load login page request.getRequestDispatcher("/WEB-INF/login_page.jsp").forward(request, response); } } /** * Handles the HTTP * <code>POST</code> method. * * @param request servlet request * @param response servlet response * @throws ServletException if a servlet-specific error occurs * @throws IOException if an I/O error occurs */ @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { String email = request.getParameter("email"); String password = request.getParameter("password"); if(email.length() < 1 || password.length() < 1){ - request.setAttribute("errorMessage", "Please fill in both fields."); + request.setAttribute("errorMessage", "Please fill in both your email and password."); request.getRequestDispatcher("/WEB-INF/login_page.jsp").forward(request, response); }else{ PersistenceManager pm = new PersistenceManager(); password = this.MD5(password); Player selected = pm.doLogin(email, password); if(selected != null){ // If player exists save object to the session called "user" HttpSession session = request.getSession(true); session.setAttribute("user", selected); response.sendRedirect("main"); }else{ // If null, there's no player with this email and password request.setAttribute("errorMessage", "Password or email address is incorrect."); request.getRequestDispatcher("/WEB-INF/login_page.jsp").forward(request, response); } } } }
true
true
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { String email = request.getParameter("email"); String password = request.getParameter("password"); if(email.length() < 1 || password.length() < 1){ request.setAttribute("errorMessage", "Please fill in both fields."); request.getRequestDispatcher("/WEB-INF/login_page.jsp").forward(request, response); }else{ PersistenceManager pm = new PersistenceManager(); password = this.MD5(password); Player selected = pm.doLogin(email, password); if(selected != null){ // If player exists save object to the session called "user" HttpSession session = request.getSession(true); session.setAttribute("user", selected); response.sendRedirect("main"); }else{ // If null, there's no player with this email and password request.setAttribute("errorMessage", "Password or email address is incorrect."); request.getRequestDispatcher("/WEB-INF/login_page.jsp").forward(request, response); } } }
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { String email = request.getParameter("email"); String password = request.getParameter("password"); if(email.length() < 1 || password.length() < 1){ request.setAttribute("errorMessage", "Please fill in both your email and password."); request.getRequestDispatcher("/WEB-INF/login_page.jsp").forward(request, response); }else{ PersistenceManager pm = new PersistenceManager(); password = this.MD5(password); Player selected = pm.doLogin(email, password); if(selected != null){ // If player exists save object to the session called "user" HttpSession session = request.getSession(true); session.setAttribute("user", selected); response.sendRedirect("main"); }else{ // If null, there's no player with this email and password request.setAttribute("errorMessage", "Password or email address is incorrect."); request.getRequestDispatcher("/WEB-INF/login_page.jsp").forward(request, response); } } }
diff --git a/src/main/java/org/geoserver/shell/CoverageStoreCommands.java b/src/main/java/org/geoserver/shell/CoverageStoreCommands.java index c4e6154..0382ccc 100644 --- a/src/main/java/org/geoserver/shell/CoverageStoreCommands.java +++ b/src/main/java/org/geoserver/shell/CoverageStoreCommands.java @@ -1,155 +1,159 @@ package org.geoserver.shell; import it.geosolutions.geoserver.rest.GeoServerRESTReader; import it.geosolutions.geoserver.rest.HTTPUtils; import it.geosolutions.geoserver.rest.decoder.RESTCoverageStoreList; import it.geosolutions.geoserver.rest.decoder.utils.JDOMBuilder; import org.jdom.Element; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.shell.core.CommandMarker; import org.springframework.shell.core.annotation.CliAvailabilityIndicator; import org.springframework.shell.core.annotation.CliCommand; import org.springframework.shell.core.annotation.CliOption; import org.springframework.shell.support.util.OsUtils; import org.springframework.stereotype.Component; import java.io.File; import java.util.Collections; import java.util.List; @Component public class CoverageStoreCommands implements CommandMarker { @Autowired private Geoserver geoserver; public void setGeoserver(Geoserver gs) { this.geoserver = gs; } @CliAvailabilityIndicator({"coverage store list", "coverage store get", "coverage store create", "coverage store modify", "coverage store delete", "coverage store upload"}) public boolean isCommandAvailable() { return geoserver.isSet(); } @CliCommand(value = "coverage store list", help = "List coverage store.") public String list( @CliOption(key = "workspace", mandatory = true, help = "The workspace") String workspace ) throws Exception { GeoServerRESTReader reader = new GeoServerRESTReader(geoserver.getUrl(), geoserver.getUser(), geoserver.getPassword()); RESTCoverageStoreList list = reader.getCoverageStores(workspace); List<String> names = list.getNames(); Collections.sort(names); StringBuilder builder = new StringBuilder(); for (String name : names) { builder.append(name).append(OsUtils.LINE_SEPARATOR); } return builder.toString(); } @CliCommand(value = "coverage store get", help = "Get a coverage store.") public String get( @CliOption(key = "workspace", mandatory = true, help = "The workspace") String workspace, @CliOption(key = "coveragestore", mandatory = true, help = "The coveragestore") String coveragestore ) throws Exception { String url = geoserver.getUrl() + "/rest/workspaces/" + URLUtil.encode(workspace) + "/coveragestores/" + URLUtil.encode(coveragestore) + ".xml"; String xml = HTTPUtils.get(url, geoserver.getUser(), geoserver.getPassword()); Element coverageStoreElement = JDOMBuilder.buildElement(xml); String name = coverageStoreElement.getChildText("name"); String type = coverageStoreElement.getChildText("type"); String enabled = coverageStoreElement.getChildText("enabled"); String covUrl = coverageStoreElement.getChildText("url"); // @TODO RESTCoverageStore doesn't have access to type, enabled /*GeoServerRESTReader reader = new GeoServerRESTReader(geoserver.getUrl(), geoserver.getUser(), geoserver.getPassword()); RESTCoverageStore store = reader.getCoverageStore(workspace, coveragestore);*/ String TAB = " "; StringBuilder builder = new StringBuilder(); builder.append(name).append(OsUtils.LINE_SEPARATOR); builder.append(TAB).append("Type: ").append(type).append(OsUtils.LINE_SEPARATOR); builder.append(TAB).append("URL: ").append(covUrl).append(OsUtils.LINE_SEPARATOR); builder.append(TAB).append("Enabled: ").append(enabled).append(OsUtils.LINE_SEPARATOR); return builder.toString(); } @CliCommand(value = "coverage store delete", help = "Delete a coverage store.") public boolean delete( @CliOption(key = "workspace", mandatory = true, help = "The workspace") String workspace, @CliOption(key = "coveragestore", mandatory = true, help = "The coveragestore") String coveragestore, @CliOption(key = "recurse", mandatory = false, help = "Whether to delete all associated layers", unspecifiedDefaultValue = "false", specifiedDefaultValue = "false") boolean recurse ) throws Exception { String url = geoserver.getUrl() + "/rest/workspaces/" + URLUtil.encode(workspace) + "/coveragestores/" + URLUtil.encode(coveragestore) + ".xml?recurse=" + recurse; return HTTPUtils.delete(url, geoserver.getUser(), geoserver.getPassword()); } @CliCommand(value = "coverage store modify", help = "Modify a coverage store.") public boolean modify( @CliOption(key = "workspace", mandatory = true, help = "The workspace") String workspace, @CliOption(key = "coveragestore", mandatory = true, help = "The coveragestore") String coveragestore, @CliOption(key = "name", mandatory = false, help = "The name") String name, @CliOption(key = "type", mandatory = false, help = "The type") String type, @CliOption(key = "url", mandatory = false, help = "The file url") String fileUrl, @CliOption(key = "enabled", mandatory = false, help = "The enabled flag") String enabled ) throws Exception { String url = geoserver.getUrl() + "/rest/workspaces/" + URLUtil.encode(workspace) + "/coveragestores/" + URLUtil.encode(coveragestore) + ".xml"; Element element = new Element("coverageStore"); if (name != null) element.addContent(new Element("name").setText(name)); if (type != null) element.addContent(new Element("type").setText(type)); if (fileUrl != null) element.addContent(new Element("url").setText(fileUrl)); if (enabled != null) element.addContent(new Element("enabled").setText(enabled)); element.addContent(new Element("workspace").addContent(new Element("name").setText(workspace))); String content = JDOMUtil.toString(element); String response = HTTPUtils.putXml(url, content, geoserver.getUser(), geoserver.getPassword()); return response != null; } @CliCommand(value = "coverage store create", help = "Create a coverage store.") public boolean create( @CliOption(key = "workspace", mandatory = true, help = "The workspace") String workspace, @CliOption(key = "name", mandatory = true, help = "The name") String name, @CliOption(key = "type", mandatory = true, help = "The type") String type, @CliOption(key = "url", mandatory = true, help = "The file url") String fileUrl, @CliOption(key = "enabled", mandatory = false, unspecifiedDefaultValue = "true", help = "The enabled flag") boolean enabled ) throws Exception { String url = geoserver.getUrl() + "/rest/workspaces/" + URLUtil.encode(workspace) + "/coveragestores.xml"; Element element = new Element("coverageStore"); element.addContent(new Element("name").setText(name)); element.addContent(new Element("type").setText(type)); element.addContent(new Element("url").setText(fileUrl)); element.addContent(new Element("enabled").setText(String.valueOf(enabled))); element.addContent(new Element("workspace").addContent(new Element("name").setText(workspace))); String content = JDOMUtil.toString(element); String response = HTTPUtils.postXml(url, content, geoserver.getUser(), geoserver.getPassword()); return response != null; } @CliCommand(value = "coverage store upload", help = "Upload a file to create coverage store.") public boolean upload( @CliOption(key = "workspace", mandatory = true, help = "The workspace") String workspace, @CliOption(key = "coveragestore", mandatory = true, help = "The coveragestore") String coveragestore, @CliOption(key = "file", mandatory = true, help = "The file") File file, @CliOption(key = "type", mandatory = true, help = "The type (geotiff, worldimage, or imagemosaic)") String type, @CliOption(key = "configure", mandatory = false, help = "How to configure (first, none, all)", unspecifiedDefaultValue = "first") String configure, @CliOption(key = "coverage", mandatory = false, help = "The name of the coverage") String coverageName, @CliOption(key = "recalculate", mandatory = false, help = "How to recalculate bbox (nativebbox,latlonbbox)") String recalculate ) throws Exception { String url = geoserver.getUrl() + "/rest/workspaces/" + URLUtil.encode(workspace) + "/coveragestores/" + URLUtil.encode(coveragestore) + "/file." + type + "?configure=" + configure; if (coverageName != null) { url += "&coverageName=" + coverageName; } if (recalculate != null) { url += "&recalculate=" + recalculate; } String contentType; if (type.equalsIgnoreCase("geotiff")) { contentType = "image/tiff"; - } else if (type.equalsIgnoreCase("worldimage")) { - contentType = "image/" + file.getName().substring(file.getName().lastIndexOf(".")); } else { contentType = "application/zip"; } String response = HTTPUtils.put(url, file, contentType, geoserver.getUser(), geoserver.getPassword()); + if (geoserver.isVerbose()) { + System.out.println("URL: " + url); + System.out.println("Content Type: " + contentType); + System.out.println("File: " + file.getAbsolutePath()); + System.out.println("Response: " + response); + } return response != null; } }
false
true
public boolean upload( @CliOption(key = "workspace", mandatory = true, help = "The workspace") String workspace, @CliOption(key = "coveragestore", mandatory = true, help = "The coveragestore") String coveragestore, @CliOption(key = "file", mandatory = true, help = "The file") File file, @CliOption(key = "type", mandatory = true, help = "The type (geotiff, worldimage, or imagemosaic)") String type, @CliOption(key = "configure", mandatory = false, help = "How to configure (first, none, all)", unspecifiedDefaultValue = "first") String configure, @CliOption(key = "coverage", mandatory = false, help = "The name of the coverage") String coverageName, @CliOption(key = "recalculate", mandatory = false, help = "How to recalculate bbox (nativebbox,latlonbbox)") String recalculate ) throws Exception { String url = geoserver.getUrl() + "/rest/workspaces/" + URLUtil.encode(workspace) + "/coveragestores/" + URLUtil.encode(coveragestore) + "/file." + type + "?configure=" + configure; if (coverageName != null) { url += "&coverageName=" + coverageName; } if (recalculate != null) { url += "&recalculate=" + recalculate; } String contentType; if (type.equalsIgnoreCase("geotiff")) { contentType = "image/tiff"; } else if (type.equalsIgnoreCase("worldimage")) { contentType = "image/" + file.getName().substring(file.getName().lastIndexOf(".")); } else { contentType = "application/zip"; } String response = HTTPUtils.put(url, file, contentType, geoserver.getUser(), geoserver.getPassword()); return response != null; }
public boolean upload( @CliOption(key = "workspace", mandatory = true, help = "The workspace") String workspace, @CliOption(key = "coveragestore", mandatory = true, help = "The coveragestore") String coveragestore, @CliOption(key = "file", mandatory = true, help = "The file") File file, @CliOption(key = "type", mandatory = true, help = "The type (geotiff, worldimage, or imagemosaic)") String type, @CliOption(key = "configure", mandatory = false, help = "How to configure (first, none, all)", unspecifiedDefaultValue = "first") String configure, @CliOption(key = "coverage", mandatory = false, help = "The name of the coverage") String coverageName, @CliOption(key = "recalculate", mandatory = false, help = "How to recalculate bbox (nativebbox,latlonbbox)") String recalculate ) throws Exception { String url = geoserver.getUrl() + "/rest/workspaces/" + URLUtil.encode(workspace) + "/coveragestores/" + URLUtil.encode(coveragestore) + "/file." + type + "?configure=" + configure; if (coverageName != null) { url += "&coverageName=" + coverageName; } if (recalculate != null) { url += "&recalculate=" + recalculate; } String contentType; if (type.equalsIgnoreCase("geotiff")) { contentType = "image/tiff"; } else { contentType = "application/zip"; } String response = HTTPUtils.put(url, file, contentType, geoserver.getUser(), geoserver.getPassword()); if (geoserver.isVerbose()) { System.out.println("URL: " + url); System.out.println("Content Type: " + contentType); System.out.println("File: " + file.getAbsolutePath()); System.out.println("Response: " + response); } return response != null; }
diff --git a/src/com/android/camera/ui/CameraRootView.java b/src/com/android/camera/ui/CameraRootView.java index 0cff14480..e49ac59a6 100644 --- a/src/com/android/camera/ui/CameraRootView.java +++ b/src/com/android/camera/ui/CameraRootView.java @@ -1,114 +1,116 @@ /* * Copyright (C) 2013 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.camera.ui; import android.app.Activity; import android.content.Context; import android.content.res.Configuration; import android.graphics.Rect; import android.util.AttributeSet; import android.view.Gravity; import android.view.View; import android.widget.FrameLayout; import android.widget.RelativeLayout; import com.android.camera.Util; import com.android.gallery3d.R; public class CameraRootView extends RelativeLayout { private int mTopMargin = 0; private int mBottomMargin = 0; private int mLeftMargin = 0; private int mRightMargin = 0; private int mOffset = 0; private Rect mCurrentInsets; public CameraRootView(Context context, AttributeSet attrs) { super(context, attrs); // Layout the window as if we did not need navigation bar setSystemUiVisibility(View.SYSTEM_UI_FLAG_LAYOUT_FULLSCREEN | View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION); } @Override protected boolean fitSystemWindows(Rect insets) { super.fitSystemWindows(insets); mCurrentInsets = insets; // insets include status bar, navigation bar, etc // In this case, we are only concerned with the size of nav bar if (mOffset > 0) return true; if (insets.bottom > 0) { mOffset = insets.bottom; } else if (insets.right > 0) { mOffset = insets.right; } return true; } public void onLayout(boolean changed, int l, int t, int r, int b) { int rotation = Util.getDisplayRotation((Activity) getContext()); // all the layout code assumes camera device orientation to be portrait // adjust rotation for landscape int orientation = getResources().getConfiguration().orientation; int camOrientation = (rotation % 180 == 0) ? Configuration.ORIENTATION_PORTRAIT : Configuration.ORIENTATION_LANDSCAPE; if (camOrientation != orientation) { rotation = (rotation + 90) % 360; } // calculate margins int left = 0; int right = 0; int bottom = 0; int top = 0; switch (rotation) { case 0: bottom += mOffset; break; case 90: right += mOffset; break; case 180: top += mOffset; break; case 270: left += mOffset; break; } - if (mCurrentInsets.right > 0) { - // navigation bar on the right - right = right > 0 ? right : mCurrentInsets.right; - } else { - // navigation bar on the bottom - bottom = bottom > 0 ? bottom : mCurrentInsets.bottom; + if (mCurrentInsets != null) { + if (mCurrentInsets.right > 0) { + // navigation bar on the right + right = right > 0 ? right : mCurrentInsets.right; + } else { + // navigation bar on the bottom + bottom = bottom > 0 ? bottom : mCurrentInsets.bottom; + } } for (int i = 0; i < getChildCount(); i++) { View v = getChildAt(i); if (v instanceof CameraControls) { // Lay out camera controls to fill the short side of the screen // so that they stay in place during rotation if (rotation % 180 == 0) { v.layout(l, t + top, r, b - bottom); } else { v.layout(l + left, t, r - right, b); } } else { v.layout(l + left, t + top, r - right, b - bottom); } } } }
true
true
public void onLayout(boolean changed, int l, int t, int r, int b) { int rotation = Util.getDisplayRotation((Activity) getContext()); // all the layout code assumes camera device orientation to be portrait // adjust rotation for landscape int orientation = getResources().getConfiguration().orientation; int camOrientation = (rotation % 180 == 0) ? Configuration.ORIENTATION_PORTRAIT : Configuration.ORIENTATION_LANDSCAPE; if (camOrientation != orientation) { rotation = (rotation + 90) % 360; } // calculate margins int left = 0; int right = 0; int bottom = 0; int top = 0; switch (rotation) { case 0: bottom += mOffset; break; case 90: right += mOffset; break; case 180: top += mOffset; break; case 270: left += mOffset; break; } if (mCurrentInsets.right > 0) { // navigation bar on the right right = right > 0 ? right : mCurrentInsets.right; } else { // navigation bar on the bottom bottom = bottom > 0 ? bottom : mCurrentInsets.bottom; } for (int i = 0; i < getChildCount(); i++) { View v = getChildAt(i); if (v instanceof CameraControls) { // Lay out camera controls to fill the short side of the screen // so that they stay in place during rotation if (rotation % 180 == 0) { v.layout(l, t + top, r, b - bottom); } else { v.layout(l + left, t, r - right, b); } } else { v.layout(l + left, t + top, r - right, b - bottom); } } }
public void onLayout(boolean changed, int l, int t, int r, int b) { int rotation = Util.getDisplayRotation((Activity) getContext()); // all the layout code assumes camera device orientation to be portrait // adjust rotation for landscape int orientation = getResources().getConfiguration().orientation; int camOrientation = (rotation % 180 == 0) ? Configuration.ORIENTATION_PORTRAIT : Configuration.ORIENTATION_LANDSCAPE; if (camOrientation != orientation) { rotation = (rotation + 90) % 360; } // calculate margins int left = 0; int right = 0; int bottom = 0; int top = 0; switch (rotation) { case 0: bottom += mOffset; break; case 90: right += mOffset; break; case 180: top += mOffset; break; case 270: left += mOffset; break; } if (mCurrentInsets != null) { if (mCurrentInsets.right > 0) { // navigation bar on the right right = right > 0 ? right : mCurrentInsets.right; } else { // navigation bar on the bottom bottom = bottom > 0 ? bottom : mCurrentInsets.bottom; } } for (int i = 0; i < getChildCount(); i++) { View v = getChildAt(i); if (v instanceof CameraControls) { // Lay out camera controls to fill the short side of the screen // so that they stay in place during rotation if (rotation % 180 == 0) { v.layout(l, t + top, r, b - bottom); } else { v.layout(l + left, t, r - right, b); } } else { v.layout(l + left, t + top, r - right, b - bottom); } } }
diff --git a/src/me/cnaude/plugin/WolfColors/WCMain.java b/src/me/cnaude/plugin/WolfColors/WCMain.java old mode 100755 new mode 100644 index bccf9a9..739ef5f --- a/src/me/cnaude/plugin/WolfColors/WCMain.java +++ b/src/me/cnaude/plugin/WolfColors/WCMain.java @@ -1,100 +1,104 @@ /* * To change this template, choose Tools | Templates * and open the template in the editor. */ package me.cnaude.plugin.WolfColors; import java.io.File; import java.util.logging.Level; import java.util.logging.Logger; import org.bukkit.craftbukkit.entity.CraftWolf; import org.bukkit.entity.AnimalTamer; import org.bukkit.entity.Entity; import org.bukkit.entity.EntityType; import org.bukkit.entity.Player; import org.bukkit.entity.Wolf; import org.bukkit.event.EventHandler; import org.bukkit.event.EventPriority; import org.bukkit.event.Listener; import org.bukkit.event.player.PlayerInteractEntityEvent; import org.bukkit.inventory.ItemStack; import org.bukkit.plugin.java.JavaPlugin; /** * * @author cnaude */ public class WCMain extends JavaPlugin implements Listener { public static final String PLUGIN_NAME = "WolfColors"; public static final String LOG_HEADER = "[" + PLUGIN_NAME + "]"; static final Logger log = Logger.getLogger("Minecraft"); private File pluginFolder; private File configFile; @Override public void onEnable() { pluginFolder = getDataFolder(); configFile = new File(pluginFolder, "config.yml"); createConfig(); this.getConfig().options().copyDefaults(true); saveConfig(); loadConfig(); getServer().getPluginManager().registerEvents(this, this); } @EventHandler(priority = EventPriority.NORMAL) public void onPlayerInteractEntity(PlayerInteractEntityEvent event) { Entity e = event.getRightClicked(); Player p = event.getPlayer(); EntityType et = e.getType(); if (et.equals(EntityType.WOLF)) { if (p.hasPermission("wolfcolors.wolfcolors")) { ItemStack item = p.getInventory().getItemInHand(); + // Check if wolf is tamed if (((Wolf)e).isTamed()) { + // Check if player owns this tamed wolf if (((Wolf)e).getOwner() == (AnimalTamer)p) { + // Check if player holding dye item if (item.getTypeId() == 351) { + // Do magic ((CraftWolf)e).getHandle().setCollarColor((byte) (15 - p.getInventory().getItemInHand().getDurability())); if (item.getAmount() == 1) { event.getPlayer().getInventory().remove(item); } else { item.setAmount(item.getAmount() - 1); } } } } } } } private void createConfig() { if (!pluginFolder.exists()) { try { pluginFolder.mkdir(); } catch (Exception e) { logInfo("ERROR: " + e.getMessage()); } } if (!configFile.exists()) { try { configFile.createNewFile(); } catch (Exception e) { logInfo("ERROR: " + e.getMessage()); } } } private void loadConfig() { } public void logInfo(String _message) { log.log(Level.INFO, String.format("%s %s", LOG_HEADER, _message)); } public void logError(String _message) { log.log(Level.SEVERE, String.format("%s %s", LOG_HEADER, _message)); } }
false
true
public void onPlayerInteractEntity(PlayerInteractEntityEvent event) { Entity e = event.getRightClicked(); Player p = event.getPlayer(); EntityType et = e.getType(); if (et.equals(EntityType.WOLF)) { if (p.hasPermission("wolfcolors.wolfcolors")) { ItemStack item = p.getInventory().getItemInHand(); if (((Wolf)e).isTamed()) { if (((Wolf)e).getOwner() == (AnimalTamer)p) { if (item.getTypeId() == 351) { ((CraftWolf)e).getHandle().setCollarColor((byte) (15 - p.getInventory().getItemInHand().getDurability())); if (item.getAmount() == 1) { event.getPlayer().getInventory().remove(item); } else { item.setAmount(item.getAmount() - 1); } } } } } } }
public void onPlayerInteractEntity(PlayerInteractEntityEvent event) { Entity e = event.getRightClicked(); Player p = event.getPlayer(); EntityType et = e.getType(); if (et.equals(EntityType.WOLF)) { if (p.hasPermission("wolfcolors.wolfcolors")) { ItemStack item = p.getInventory().getItemInHand(); // Check if wolf is tamed if (((Wolf)e).isTamed()) { // Check if player owns this tamed wolf if (((Wolf)e).getOwner() == (AnimalTamer)p) { // Check if player holding dye item if (item.getTypeId() == 351) { // Do magic ((CraftWolf)e).getHandle().setCollarColor((byte) (15 - p.getInventory().getItemInHand().getDurability())); if (item.getAmount() == 1) { event.getPlayer().getInventory().remove(item); } else { item.setAmount(item.getAmount() - 1); } } } } } } }
diff --git a/src/main/java/org/jasig/ssp/web/api/AppointmentController.java b/src/main/java/org/jasig/ssp/web/api/AppointmentController.java index a823957ac..db7544200 100644 --- a/src/main/java/org/jasig/ssp/web/api/AppointmentController.java +++ b/src/main/java/org/jasig/ssp/web/api/AppointmentController.java @@ -1,77 +1,77 @@ package org.jasig.ssp.web.api; import java.util.UUID; import org.jasig.ssp.factory.AppointmentTOFactory; import org.jasig.ssp.factory.TOFactory; import org.jasig.ssp.model.Appointment; import org.jasig.ssp.service.AppointmentService; import org.jasig.ssp.service.ObjectNotFoundException; import org.jasig.ssp.transferobject.AppointmentTO; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.bind.annotation.ResponseBody; /** * Appointment controller */ @Controller @RequestMapping("/1/person/{personId}/appointment") public class AppointmentController extends AbstractPersonAssocController<Appointment, AppointmentTO> { protected AppointmentController() { super(Appointment.class, AppointmentTO.class); } private static final Logger LOGGER = LoggerFactory .getLogger(AppointmentController.class); @Autowired protected transient AppointmentService service; @Autowired protected transient AppointmentTOFactory factory; @Override protected AppointmentService getService() { return service; } @Override protected TOFactory<AppointmentTO, Appointment> getFactory() { return factory; } @Override protected Logger getLogger() { return LOGGER; } @Override public String permissionBaseName() { return "APPOINTMENT"; } @RequestMapping(value = "/current", method = RequestMethod.GET) public @ResponseBody AppointmentTO getCurrentAppointmentForPerson( - final @PathVariable UUID id) throws ObjectNotFoundException { + final @PathVariable UUID personId) throws ObjectNotFoundException { checkPermissionForOp("READ"); final Appointment appt = service.getCurrentAppointmentForPerson( - personService.get(id)); + personService.get(personId)); if (appt == null) { return null; } return factory.from(appt); } }
false
true
AppointmentTO getCurrentAppointmentForPerson( final @PathVariable UUID id) throws ObjectNotFoundException { checkPermissionForOp("READ"); final Appointment appt = service.getCurrentAppointmentForPerson( personService.get(id)); if (appt == null) { return null; } return factory.from(appt); }
AppointmentTO getCurrentAppointmentForPerson( final @PathVariable UUID personId) throws ObjectNotFoundException { checkPermissionForOp("READ"); final Appointment appt = service.getCurrentAppointmentForPerson( personService.get(personId)); if (appt == null) { return null; } return factory.from(appt); }
diff --git a/maqetta.core.server/src/maqetta/core/server/DavinciReviewServlet.java b/maqetta.core.server/src/maqetta/core/server/DavinciReviewServlet.java index 41011145a..f5cf76cf4 100644 --- a/maqetta.core.server/src/maqetta/core/server/DavinciReviewServlet.java +++ b/maqetta.core.server/src/maqetta/core/server/DavinciReviewServlet.java @@ -1,229 +1,229 @@ package maqetta.core.server; import java.io.IOException; import java.net.URI; import java.net.URL; import java.net.URLEncoder; import java.util.List; import javax.servlet.ServletException; import javax.servlet.http.Cookie; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import maqetta.core.server.DavinciPageServlet; import maqetta.core.server.user.ReviewManager; import org.davinci.server.internal.Activator; import org.davinci.server.review.Constants; import org.davinci.server.review.ReviewObject; import org.davinci.server.review.Version; import org.davinci.server.review.cache.ReviewCacheManager; import org.davinci.server.review.user.IDesignerUser; import org.davinci.server.user.IUser; import org.davinci.server.util.JSONWriter; import org.eclipse.core.runtime.IPath; import org.eclipse.core.runtime.Path; import org.maqetta.server.IDavinciServerConstants; import org.maqetta.server.IVResource; import org.maqetta.server.ServerManager; import org.maqetta.server.VURL; @SuppressWarnings("serial") public class DavinciReviewServlet extends DavinciPageServlet { private ReviewManager reviewManager; protected String revieweeName; protected String noView; @Override public void initialize() { super.initialize(); reviewManager = ReviewManager.getReviewManager(); } @Override protected void service(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { super.service(req, resp); if ( !ReviewCacheManager.$.isAlive() ) ReviewCacheManager.$.start(); } public String getLoginUrl(HttpServletRequest req) { String loginUrl = serverManager.getDavinciProperty("loginUrl"); String params = ""; if ( null == loginUrl ) { loginUrl = req.getContextPath(); } // Ensure loginUrl is not null String pseparator = loginUrl.indexOf("?") > 0 ? "&" : "?"; if ( revieweeName != null ) { params = pseparator + "revieweeuser=" + revieweeName; } return loginUrl + params; } @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { if ( serverManager == null ) { initialize(); } revieweeName = req.getParameter("revieweeuser"); noView = req.getParameter("noview"); String contextString = req.getContextPath(); String pathInfo = req.getPathInfo(); IUser user = (IUser) req.getSession().getAttribute(IDavinciServerConstants.SESSION_USER); if ( ServerManager.DEBUG_IO_TO_CONSOLE ) { System.out.println("Review Servlet request: " + pathInfo + ", logged in= " + (user != null ? user.getUserName() : "guest")); } if ( user == null ) { req.getSession().setAttribute(IDavinciServerConstants.REDIRECT_TO, req.getRequestURL().toString()); String requestVersion = req.getParameter("version"); if(requestVersion!=null && !requestVersion.equals("")){ Cookie versionCookie = new Cookie(Constants.REVIEW_VERSION, requestVersion); /* have to set the path to delete it later from the client */ - versionCookie.setPath("/"); + versionCookie.setPath("/maqetta/"); resp.addCookie(versionCookie); } resp.sendRedirect(this.getLoginUrl(req)); return; } if ( pathInfo == null || pathInfo.equals("") ) { ReviewObject reviewObject = (ReviewObject) req.getSession().getAttribute(Constants.REVIEW_INFO); if ( reviewObject == null ) { // Because the requested URL is /review the empty review object // means we do not have a designer name: Error. resp.sendRedirect(this.getLoginUrl(req)); return; } else { resp.addCookie(new Cookie(Constants.REVIEW_COOKIE_DESIGNER, reviewObject.getDesignerName())); resp.addCookie(new Cookie(Constants.REVIEW_COOKIE_DESIGNER_EMAIL, reviewObject.getDesignerEmail())); if ( reviewObject.getCommentId() != null ) { resp.addCookie(new Cookie(Constants.REVIEW_COOKIE_CMTID, reviewObject.getCommentId())); } if ( reviewObject.getVersion() != null ) { Cookie versionCookie = new Cookie(Constants.REVIEW_VERSION, reviewObject.getVersion()); /* have to set the path to delete it later from the client */ versionCookie.setPath("/maqetta/"); resp.addCookie(versionCookie); } // writeReviewPage(req, resp, "review.html"); // writeMainPage(req, resp); resp.sendRedirect("/maqetta/"); } } else { IPath path = new Path(pathInfo); String prefix = path.segment(0); if ( prefix == null ) { resp.sendRedirect(contextString + "/review"); return; } if ( prefix.equals(IDavinciServerConstants.APP_URL.substring(1)) || prefix.equals(Constants.CMD_URL.substring(1)) ) { // Forward to DavinciPageServlet such as "/app/img/1.jpg" or "cmd/getUserInfo" req.getRequestDispatcher(pathInfo).forward(req, resp); return; } if ( handleReviewRequest(req, resp, path) || handleLibraryRequest(req, resp, path, user) ) { return; } // Check if it is a valid user name. // If it is a valid user name, do login // Else, error. IUser designer = userManager.getUser(prefix); if ( designer == null ) { resp.sendRedirect(this.getLoginUrl(req)); return; } else { ReviewObject reviewObject = new ReviewObject(prefix); reviewObject.setDesignerEmail(designer.getPerson().getEmail()); if ( path.segmentCount() > 2 ) { // Token = 20100101/project1/folder1/sample1.html/default String commentId = path.segment(path.segmentCount() - 1); String fileName = path.removeLastSegments(1).removeFirstSegments(1).toPortableString(); reviewObject.setCommentId(commentId); } reviewObject.setVersion(req.getParameter("version")); req.getSession().setAttribute(Constants.REVIEW_INFO, reviewObject); resp.sendRedirect(contextString + "/review"); return; } } } @Override protected boolean handleLibraryRequest(HttpServletRequest req, HttpServletResponse resp, IPath path, IUser user) throws ServletException, IOException { // Remove the following URL prefix // /user/heguyi/ws/workspace/.review/snapshot/20100101/project1/lib/dojo/dojo.js // to // project1/lib/dojo/dojo.js String version = null; String ownerId = null; String projectName = null; if ( isValidReviewPath(path) ) { ownerId = path.segment(1); version = path.segment(6); projectName = path.segment(7); path = path.removeFirstSegments(7); // So that each snapshot can be mapped to its virtual lib path correctly. path = ReviewManager.adjustPath(path, ownerId, version, projectName); } return super.handleLibraryRequest(req, resp, path, user); } protected boolean handleReviewRequest(HttpServletRequest req, HttpServletResponse resp, IPath path) throws ServletException, IOException { // Get the requested resources from the designer's folder // Remove the following URL prefix // /user/heguyi/ws/workspace/.review/snapshot/20100101/project1/folder1/sample1.html // to // /.review/snapshot/20100101/project1/folder1/sample1.html if ( isValidReviewPath(path) ) { String designerName = path.segment(1); path = path.removeFirstSegments(4); IVResource vr = reviewManager.getDesignerUser(designerName).getResource(path); if ( vr != null ) { writePage(req, resp, vr, true); return true; } } return false; } protected void writeReviewPage(HttpServletRequest req, HttpServletResponse resp, String path) throws ServletException, IOException { URL resourceURL = Activator.getActivator().getOtherBundle("maqetta.core.client").getEntry("/WebContent/" + path); VURL v = new VURL(resourceURL); writePage(req, resp, v, false); } private boolean isValidReviewPath(IPath path) { String designerName = path.segment(1); // Verify the user if ( designerName == null ) { return false; } IUser user = userManager.getUser(designerName); return user != null && path.segmentCount() > 8 && path.segment(4).equals(".review") && path.segment(5).equals("snapshot") && path.segment(0).equals("user") && path.segment(2).equals("ws") && path.segment(3).equals("workspace"); } @Override public void destroy() { ReviewCacheManager.$.markStop(); ReviewCacheManager.$.destroyAllReview(); } }
true
true
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { if ( serverManager == null ) { initialize(); } revieweeName = req.getParameter("revieweeuser"); noView = req.getParameter("noview"); String contextString = req.getContextPath(); String pathInfo = req.getPathInfo(); IUser user = (IUser) req.getSession().getAttribute(IDavinciServerConstants.SESSION_USER); if ( ServerManager.DEBUG_IO_TO_CONSOLE ) { System.out.println("Review Servlet request: " + pathInfo + ", logged in= " + (user != null ? user.getUserName() : "guest")); } if ( user == null ) { req.getSession().setAttribute(IDavinciServerConstants.REDIRECT_TO, req.getRequestURL().toString()); String requestVersion = req.getParameter("version"); if(requestVersion!=null && !requestVersion.equals("")){ Cookie versionCookie = new Cookie(Constants.REVIEW_VERSION, requestVersion); /* have to set the path to delete it later from the client */ versionCookie.setPath("/"); resp.addCookie(versionCookie); } resp.sendRedirect(this.getLoginUrl(req)); return; } if ( pathInfo == null || pathInfo.equals("") ) { ReviewObject reviewObject = (ReviewObject) req.getSession().getAttribute(Constants.REVIEW_INFO); if ( reviewObject == null ) { // Because the requested URL is /review the empty review object // means we do not have a designer name: Error. resp.sendRedirect(this.getLoginUrl(req)); return; } else { resp.addCookie(new Cookie(Constants.REVIEW_COOKIE_DESIGNER, reviewObject.getDesignerName())); resp.addCookie(new Cookie(Constants.REVIEW_COOKIE_DESIGNER_EMAIL, reviewObject.getDesignerEmail())); if ( reviewObject.getCommentId() != null ) { resp.addCookie(new Cookie(Constants.REVIEW_COOKIE_CMTID, reviewObject.getCommentId())); } if ( reviewObject.getVersion() != null ) { Cookie versionCookie = new Cookie(Constants.REVIEW_VERSION, reviewObject.getVersion()); /* have to set the path to delete it later from the client */ versionCookie.setPath("/maqetta/"); resp.addCookie(versionCookie); } // writeReviewPage(req, resp, "review.html"); // writeMainPage(req, resp); resp.sendRedirect("/maqetta/"); } } else { IPath path = new Path(pathInfo); String prefix = path.segment(0); if ( prefix == null ) { resp.sendRedirect(contextString + "/review"); return; } if ( prefix.equals(IDavinciServerConstants.APP_URL.substring(1)) || prefix.equals(Constants.CMD_URL.substring(1)) ) { // Forward to DavinciPageServlet such as "/app/img/1.jpg" or "cmd/getUserInfo" req.getRequestDispatcher(pathInfo).forward(req, resp); return; } if ( handleReviewRequest(req, resp, path) || handleLibraryRequest(req, resp, path, user) ) { return; } // Check if it is a valid user name. // If it is a valid user name, do login // Else, error. IUser designer = userManager.getUser(prefix); if ( designer == null ) { resp.sendRedirect(this.getLoginUrl(req)); return; } else { ReviewObject reviewObject = new ReviewObject(prefix); reviewObject.setDesignerEmail(designer.getPerson().getEmail()); if ( path.segmentCount() > 2 ) { // Token = 20100101/project1/folder1/sample1.html/default String commentId = path.segment(path.segmentCount() - 1); String fileName = path.removeLastSegments(1).removeFirstSegments(1).toPortableString(); reviewObject.setCommentId(commentId); } reviewObject.setVersion(req.getParameter("version")); req.getSession().setAttribute(Constants.REVIEW_INFO, reviewObject); resp.sendRedirect(contextString + "/review"); return; } } }
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { if ( serverManager == null ) { initialize(); } revieweeName = req.getParameter("revieweeuser"); noView = req.getParameter("noview"); String contextString = req.getContextPath(); String pathInfo = req.getPathInfo(); IUser user = (IUser) req.getSession().getAttribute(IDavinciServerConstants.SESSION_USER); if ( ServerManager.DEBUG_IO_TO_CONSOLE ) { System.out.println("Review Servlet request: " + pathInfo + ", logged in= " + (user != null ? user.getUserName() : "guest")); } if ( user == null ) { req.getSession().setAttribute(IDavinciServerConstants.REDIRECT_TO, req.getRequestURL().toString()); String requestVersion = req.getParameter("version"); if(requestVersion!=null && !requestVersion.equals("")){ Cookie versionCookie = new Cookie(Constants.REVIEW_VERSION, requestVersion); /* have to set the path to delete it later from the client */ versionCookie.setPath("/maqetta/"); resp.addCookie(versionCookie); } resp.sendRedirect(this.getLoginUrl(req)); return; } if ( pathInfo == null || pathInfo.equals("") ) { ReviewObject reviewObject = (ReviewObject) req.getSession().getAttribute(Constants.REVIEW_INFO); if ( reviewObject == null ) { // Because the requested URL is /review the empty review object // means we do not have a designer name: Error. resp.sendRedirect(this.getLoginUrl(req)); return; } else { resp.addCookie(new Cookie(Constants.REVIEW_COOKIE_DESIGNER, reviewObject.getDesignerName())); resp.addCookie(new Cookie(Constants.REVIEW_COOKIE_DESIGNER_EMAIL, reviewObject.getDesignerEmail())); if ( reviewObject.getCommentId() != null ) { resp.addCookie(new Cookie(Constants.REVIEW_COOKIE_CMTID, reviewObject.getCommentId())); } if ( reviewObject.getVersion() != null ) { Cookie versionCookie = new Cookie(Constants.REVIEW_VERSION, reviewObject.getVersion()); /* have to set the path to delete it later from the client */ versionCookie.setPath("/maqetta/"); resp.addCookie(versionCookie); } // writeReviewPage(req, resp, "review.html"); // writeMainPage(req, resp); resp.sendRedirect("/maqetta/"); } } else { IPath path = new Path(pathInfo); String prefix = path.segment(0); if ( prefix == null ) { resp.sendRedirect(contextString + "/review"); return; } if ( prefix.equals(IDavinciServerConstants.APP_URL.substring(1)) || prefix.equals(Constants.CMD_URL.substring(1)) ) { // Forward to DavinciPageServlet such as "/app/img/1.jpg" or "cmd/getUserInfo" req.getRequestDispatcher(pathInfo).forward(req, resp); return; } if ( handleReviewRequest(req, resp, path) || handleLibraryRequest(req, resp, path, user) ) { return; } // Check if it is a valid user name. // If it is a valid user name, do login // Else, error. IUser designer = userManager.getUser(prefix); if ( designer == null ) { resp.sendRedirect(this.getLoginUrl(req)); return; } else { ReviewObject reviewObject = new ReviewObject(prefix); reviewObject.setDesignerEmail(designer.getPerson().getEmail()); if ( path.segmentCount() > 2 ) { // Token = 20100101/project1/folder1/sample1.html/default String commentId = path.segment(path.segmentCount() - 1); String fileName = path.removeLastSegments(1).removeFirstSegments(1).toPortableString(); reviewObject.setCommentId(commentId); } reviewObject.setVersion(req.getParameter("version")); req.getSession().setAttribute(Constants.REVIEW_INFO, reviewObject); resp.sendRedirect(contextString + "/review"); return; } } }
diff --git a/src/com/lugcheck/AddItemActivity.java b/src/com/lugcheck/AddItemActivity.java index 3bd56ea..e208260 100644 --- a/src/com/lugcheck/AddItemActivity.java +++ b/src/com/lugcheck/AddItemActivity.java @@ -1,263 +1,261 @@ /* NOTICE for Luggage & Suitcase Checklist, an Android app: Copyright (C) 2012 EBAK Mobile This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.lugcheck; import java.util.ArrayList; import java.util.Locale; import android.app.Activity; import android.app.AlertDialog; import android.content.DialogInterface; import android.content.Intent; import android.database.Cursor; import android.database.sqlite.SQLiteDatabase; import android.graphics.Color; import android.os.Bundle; import android.util.Log; import android.util.TypedValue; import android.view.View; import android.view.ViewGroup; import android.view.ViewGroup.LayoutParams; import android.widget.Button; import android.widget.EditText; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.RelativeLayout; import android.widget.TextView; public class AddItemActivity extends Activity { private int suitcaseId; private SQLiteDatabase db; private ArrayList<String> insertList; //inserts this list into QuickAdd table private float density; @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); density = this.getResources().getDisplayMetrics().density; setContentView(R.layout.activity_add_item); db = openOrCreateDatabase("data.db", SQLiteDatabase.CREATE_IF_NECESSARY, null); db.setVersion(1); db.setLocale(Locale.getDefault()); insertList = new ArrayList<String>(); Bundle extras = getIntent().getExtras(); suitcaseId = extras.getInt("suitcase_id"); Log.w("Suitcase id is ", " " + suitcaseId); setTitle("Quick Add"); Cursor c = db.rawQuery("SELECT * from QuickAdd", null); if (c.getCount() <= 0) {// if there is nothing in the QuickAdd Table addIntoArrayList(); // add all the items into InserList. Then we shove it into the DB for (int i = 0; i < insertList.size(); i++) { String tempName = insertList.get(i); //Log.w("tempName is ", tempName); String INSERT_STATEMENT = "INSERT INTO QuickAdd (name) Values ('" + tempName + "')"; db.execSQL(INSERT_STATEMENT); // insert into trip_table db } } createLayoutsFromDB(); c.close(); } public void createLayoutsFromDB() { /* Code Below fetches trips from trip_table and creates a layout*/ Cursor c = db.rawQuery("SELECT * from QuickAdd ORDER BY name", null); c.moveToFirst(); while (c.isAfterLast() == false) { TextView hw = new TextView(this); final String text = c.getString(c.getColumnIndex("name")); hw.setText(text); hw.setTextSize(TypedValue.COMPLEX_UNIT_SP, 16); ImageView im = new ImageView(this); im.setImageResource(R.drawable.opensuitcase); // FROM STACKOVERFLOW! int width = (int) (58 * density); int height = (int) (50 * density); im.setLayoutParams(new LayoutParams(width, height)); int pad = (int) (5 * density); im.setPadding(pad, pad, 0, 0); // END int txtPadding = (int) (20 * density); hw.setPadding(0, txtPadding, 0, 0); Button addButton = new Button(this); addButton.setText("Add"); RelativeLayout relativeLayoutAdd = new RelativeLayout(this); // put the add button on this relative layout to push to the right LinearLayout newTab = new LinearLayout(this); newTab.setOrientation(LinearLayout.HORIZONTAL); RelativeLayout.LayoutParams paramRight = new RelativeLayout.LayoutParams( LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT); paramRight.addRule(RelativeLayout.ALIGN_PARENT_RIGHT, RelativeLayout.TRUE); newTab.addView(im); newTab.addView(hw); relativeLayoutAdd.addView(addButton, paramRight); newTab.addView(relativeLayoutAdd);//add the relative layout onto the newTab layout newTab.setBackgroundColor(Color.WHITE); LinearLayout tripContainer = (LinearLayout) findViewById(R.id.add_item_container); tripContainer.addView(newTab); View ruler = new View(this); ruler.setBackgroundColor(Color.BLACK); // this code draws the black lines tripContainer.addView(ruler, new ViewGroup.LayoutParams( ViewGroup.LayoutParams.MATCH_PARENT, 2)); c.moveToNext(); addButton.setOnClickListener(new View.OnClickListener() { public void onClick(View v) { final EditText quantityEditText = new EditText(AddItemActivity.this); quantityEditText.setHint("Quantity"); AlertDialog.Builder builder = new AlertDialog.Builder(AddItemActivity.this); builder.setMessage("Please enter a quantity for " + text).setCancelable(false) .setView(quantityEditText) .setPositiveButton("Add", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) {// when they click "add" after entering quantity String quantity = quantityEditText.getText().toString(); boolean isDupe = false;//code below checks for dupes in database Cursor c = db.rawQuery("SELECT * from Item where suitcase_id='" + suitcaseId + "'", null); c.moveToFirst(); while (c.isAfterLast() == false) { String itemName = c.getString(c.getColumnIndex("item_name")); c.moveToNext(); if (text.equals(itemName)) { isDupe = true; break; } } c.close(); if (!quantity.matches("\\d+")) { AlertDialog dupe = new AlertDialog.Builder( AddItemActivity.this).create(); dupe.setMessage("Please enter a numeric value for 'Quantity'"); dupe.setButton("Ok", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { } }); dupe.show(); } else { if (isDupe == true) {//if there is a duplicate AlertDialog dupe = new AlertDialog.Builder( AddItemActivity.this).create(); dupe.setTitle("Duplicate Found"); dupe.setMessage("Item already exists. Please use that item instead"); dupe.setButton("Ok", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { } }); dupe.show(); } else { /* Andrew's super-duper important insert statement */ String INSERT_STATEMENT = new StringBuilder( "INSERT INTO Item (item_name, quantity, suitcase_id, is_slashed) Values ('") .append(text).append("', '").append(quantity) .append("','").append(suitcaseId) .append("','0')").toString(); db.execSQL(INSERT_STATEMENT); Intent resultIntent = new Intent(); resultIntent.putExtra("suitcase_id", suitcaseId); setResult(RESULT_OK, resultIntent); finish(); - /*startActivity(resultIntent); - AddItemActivity.this.finish();*/ } } } }).setNegativeButton("Cancel", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { dialog.cancel(); } }); AlertDialog alert = builder.create(); alert.show(); } }); } } public void addIntoArrayList() { insertList.add("Shoes"); insertList.add("Underwear"); insertList.add("Jacket"); insertList.add("Sweater"); insertList.add("Pajama"); insertList.add("Swimsuit"); insertList.add("T-Shirt"); insertList.add("Socks"); insertList.add("Dresses"); insertList.add("Blouse"); insertList.add("Umbrella"); insertList.add("Scarf"); insertList.add("Gloves"); insertList.add("Hat"); insertList.add("Cap"); insertList.add("Pants"); insertList.add("Boots"); insertList.add("Flip Flops / Sandals"); insertList.add("Slippers"); insertList.add("Belt"); insertList.add("Toilet Paper"); insertList.add("Sunscreen"); insertList.add("Lip Balm"); insertList.add("First Aid Kit"); insertList.add("Sunglasses"); insertList.add("Maps"); insertList.add("Computer"); insertList.add("Tablet"); insertList.add("Phone"); insertList.add("Toothbrush"); insertList.add("Toothpaste"); insertList.add("Comb"); insertList.add("Shampoo"); insertList.add("Nail Clippers"); insertList.add("Towel"); insertList.add("Camera"); insertList.add("Passport"); insertList.add("Travel Ticket"); insertList.add("Fanny Pack"); } public boolean isInteger(String s) { boolean result = false; try { Integer.parseInt("-1234"); result = true; } catch (NumberFormatException nfe) { // no need to handle the exception } return result; } }
true
true
public void createLayoutsFromDB() { /* Code Below fetches trips from trip_table and creates a layout*/ Cursor c = db.rawQuery("SELECT * from QuickAdd ORDER BY name", null); c.moveToFirst(); while (c.isAfterLast() == false) { TextView hw = new TextView(this); final String text = c.getString(c.getColumnIndex("name")); hw.setText(text); hw.setTextSize(TypedValue.COMPLEX_UNIT_SP, 16); ImageView im = new ImageView(this); im.setImageResource(R.drawable.opensuitcase); // FROM STACKOVERFLOW! int width = (int) (58 * density); int height = (int) (50 * density); im.setLayoutParams(new LayoutParams(width, height)); int pad = (int) (5 * density); im.setPadding(pad, pad, 0, 0); // END int txtPadding = (int) (20 * density); hw.setPadding(0, txtPadding, 0, 0); Button addButton = new Button(this); addButton.setText("Add"); RelativeLayout relativeLayoutAdd = new RelativeLayout(this); // put the add button on this relative layout to push to the right LinearLayout newTab = new LinearLayout(this); newTab.setOrientation(LinearLayout.HORIZONTAL); RelativeLayout.LayoutParams paramRight = new RelativeLayout.LayoutParams( LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT); paramRight.addRule(RelativeLayout.ALIGN_PARENT_RIGHT, RelativeLayout.TRUE); newTab.addView(im); newTab.addView(hw); relativeLayoutAdd.addView(addButton, paramRight); newTab.addView(relativeLayoutAdd);//add the relative layout onto the newTab layout newTab.setBackgroundColor(Color.WHITE); LinearLayout tripContainer = (LinearLayout) findViewById(R.id.add_item_container); tripContainer.addView(newTab); View ruler = new View(this); ruler.setBackgroundColor(Color.BLACK); // this code draws the black lines tripContainer.addView(ruler, new ViewGroup.LayoutParams( ViewGroup.LayoutParams.MATCH_PARENT, 2)); c.moveToNext(); addButton.setOnClickListener(new View.OnClickListener() { public void onClick(View v) { final EditText quantityEditText = new EditText(AddItemActivity.this); quantityEditText.setHint("Quantity"); AlertDialog.Builder builder = new AlertDialog.Builder(AddItemActivity.this); builder.setMessage("Please enter a quantity for " + text).setCancelable(false) .setView(quantityEditText) .setPositiveButton("Add", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) {// when they click "add" after entering quantity String quantity = quantityEditText.getText().toString(); boolean isDupe = false;//code below checks for dupes in database Cursor c = db.rawQuery("SELECT * from Item where suitcase_id='" + suitcaseId + "'", null); c.moveToFirst(); while (c.isAfterLast() == false) { String itemName = c.getString(c.getColumnIndex("item_name")); c.moveToNext(); if (text.equals(itemName)) { isDupe = true; break; } } c.close(); if (!quantity.matches("\\d+")) { AlertDialog dupe = new AlertDialog.Builder( AddItemActivity.this).create(); dupe.setMessage("Please enter a numeric value for 'Quantity'"); dupe.setButton("Ok", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { } }); dupe.show(); } else { if (isDupe == true) {//if there is a duplicate AlertDialog dupe = new AlertDialog.Builder( AddItemActivity.this).create(); dupe.setTitle("Duplicate Found"); dupe.setMessage("Item already exists. Please use that item instead"); dupe.setButton("Ok", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { } }); dupe.show(); } else { /* Andrew's super-duper important insert statement */ String INSERT_STATEMENT = new StringBuilder( "INSERT INTO Item (item_name, quantity, suitcase_id, is_slashed) Values ('") .append(text).append("', '").append(quantity) .append("','").append(suitcaseId) .append("','0')").toString(); db.execSQL(INSERT_STATEMENT); Intent resultIntent = new Intent(); resultIntent.putExtra("suitcase_id", suitcaseId); setResult(RESULT_OK, resultIntent); finish(); /*startActivity(resultIntent); AddItemActivity.this.finish();*/ } } } }).setNegativeButton("Cancel", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { dialog.cancel(); } }); AlertDialog alert = builder.create(); alert.show(); } }); } }
public void createLayoutsFromDB() { /* Code Below fetches trips from trip_table and creates a layout*/ Cursor c = db.rawQuery("SELECT * from QuickAdd ORDER BY name", null); c.moveToFirst(); while (c.isAfterLast() == false) { TextView hw = new TextView(this); final String text = c.getString(c.getColumnIndex("name")); hw.setText(text); hw.setTextSize(TypedValue.COMPLEX_UNIT_SP, 16); ImageView im = new ImageView(this); im.setImageResource(R.drawable.opensuitcase); // FROM STACKOVERFLOW! int width = (int) (58 * density); int height = (int) (50 * density); im.setLayoutParams(new LayoutParams(width, height)); int pad = (int) (5 * density); im.setPadding(pad, pad, 0, 0); // END int txtPadding = (int) (20 * density); hw.setPadding(0, txtPadding, 0, 0); Button addButton = new Button(this); addButton.setText("Add"); RelativeLayout relativeLayoutAdd = new RelativeLayout(this); // put the add button on this relative layout to push to the right LinearLayout newTab = new LinearLayout(this); newTab.setOrientation(LinearLayout.HORIZONTAL); RelativeLayout.LayoutParams paramRight = new RelativeLayout.LayoutParams( LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT); paramRight.addRule(RelativeLayout.ALIGN_PARENT_RIGHT, RelativeLayout.TRUE); newTab.addView(im); newTab.addView(hw); relativeLayoutAdd.addView(addButton, paramRight); newTab.addView(relativeLayoutAdd);//add the relative layout onto the newTab layout newTab.setBackgroundColor(Color.WHITE); LinearLayout tripContainer = (LinearLayout) findViewById(R.id.add_item_container); tripContainer.addView(newTab); View ruler = new View(this); ruler.setBackgroundColor(Color.BLACK); // this code draws the black lines tripContainer.addView(ruler, new ViewGroup.LayoutParams( ViewGroup.LayoutParams.MATCH_PARENT, 2)); c.moveToNext(); addButton.setOnClickListener(new View.OnClickListener() { public void onClick(View v) { final EditText quantityEditText = new EditText(AddItemActivity.this); quantityEditText.setHint("Quantity"); AlertDialog.Builder builder = new AlertDialog.Builder(AddItemActivity.this); builder.setMessage("Please enter a quantity for " + text).setCancelable(false) .setView(quantityEditText) .setPositiveButton("Add", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) {// when they click "add" after entering quantity String quantity = quantityEditText.getText().toString(); boolean isDupe = false;//code below checks for dupes in database Cursor c = db.rawQuery("SELECT * from Item where suitcase_id='" + suitcaseId + "'", null); c.moveToFirst(); while (c.isAfterLast() == false) { String itemName = c.getString(c.getColumnIndex("item_name")); c.moveToNext(); if (text.equals(itemName)) { isDupe = true; break; } } c.close(); if (!quantity.matches("\\d+")) { AlertDialog dupe = new AlertDialog.Builder( AddItemActivity.this).create(); dupe.setMessage("Please enter a numeric value for 'Quantity'"); dupe.setButton("Ok", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { } }); dupe.show(); } else { if (isDupe == true) {//if there is a duplicate AlertDialog dupe = new AlertDialog.Builder( AddItemActivity.this).create(); dupe.setTitle("Duplicate Found"); dupe.setMessage("Item already exists. Please use that item instead"); dupe.setButton("Ok", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { } }); dupe.show(); } else { /* Andrew's super-duper important insert statement */ String INSERT_STATEMENT = new StringBuilder( "INSERT INTO Item (item_name, quantity, suitcase_id, is_slashed) Values ('") .append(text).append("', '").append(quantity) .append("','").append(suitcaseId) .append("','0')").toString(); db.execSQL(INSERT_STATEMENT); Intent resultIntent = new Intent(); resultIntent.putExtra("suitcase_id", suitcaseId); setResult(RESULT_OK, resultIntent); finish(); } } } }).setNegativeButton("Cancel", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int id) { dialog.cancel(); } }); AlertDialog alert = builder.create(); alert.show(); } }); } }
diff --git a/src/com/github/triarry/PvPRestore/PvPRestorePlayerListener.java b/src/com/github/triarry/PvPRestore/PvPRestorePlayerListener.java index 04e7c6c..3d4cb47 100644 --- a/src/com/github/triarry/PvPRestore/PvPRestorePlayerListener.java +++ b/src/com/github/triarry/PvPRestore/PvPRestorePlayerListener.java @@ -1,204 +1,203 @@ package com.github.triarry.PvPRestore; import java.text.DecimalFormat; import java.util.HashMap; import net.milkbowl.vault.economy.EconomyResponse; import org.bukkit.Bukkit; import org.bukkit.ChatColor; import org.bukkit.entity.Player; import org.bukkit.event.EventHandler; import org.bukkit.event.Listener; import org.bukkit.event.entity.PlayerDeathEvent; import org.bukkit.event.player.PlayerKickEvent; import org.bukkit.event.player.PlayerMoveEvent; import org.bukkit.event.player.PlayerQuitEvent; import org.bukkit.event.player.PlayerRespawnEvent; import org.bukkit.inventory.ItemStack; import de.Keyle.MyPet.entity.types.CraftMyPet; @SuppressWarnings("unused") public class PvPRestorePlayerListener implements Listener { private PvPRestore plugin; public PvPRestorePlayerListener(PvPRestore plugin) { this.plugin = plugin; } public HashMap<Player , ItemStack[]> items = new HashMap<Player , ItemStack[]>(); public HashMap<Player , ItemStack[]> armor = new HashMap<Player , ItemStack[]>(); @EventHandler public void onPlayerDeath(PlayerDeathEvent event) { Player player = null; if (event.getEntity() instanceof Player) { player = event.getEntity(); } Player killer = player.getKiller(); if (killer != null || event.getEntity() instanceof CraftMyPet) { if (player.hasPermission("pvprestore.keep") && plugin.getConfig().getBoolean("keep-inventory") == true && plugin.getConfig().getBoolean("keep-xp") == true) { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory and " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); - if (plugin.getConfig().getBoolean("vault.enabled") == true) { + if (plugin.getConfig().getBoolean("vault.enabled") == true && killer != null) { moneySteal(event); } event.setDroppedExp(0); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP and inventory was saved!"); } - player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory and " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } else if ((player.hasPermission("pvprestore.keep.xp") || player.hasPermission("pvprestore.keep")) && plugin.getConfig().getBoolean("keep-xp") == true) { if (player.hasPermission("pvprestore.keep.inventory")) { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } - if (plugin.getConfig().getBoolean("vault.enabled") == true) { + if (plugin.getConfig().getBoolean("vault.enabled") == true && killer != null) { moneySteal(event); } event.setDroppedExp(0); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP and inventory was saved!"); } ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } else { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } - if (plugin.getConfig().getBoolean("vault.enabled") == true) { + if (plugin.getConfig().getBoolean("vault.enabled") == true && killer != null) { moneySteal(event); } player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP was saved!"); } event.setDroppedExp(0); } } else if ((player.hasPermission("pvprestore.keep.inventory") || player.hasPermission("pvprestore.keep")) && plugin.getConfig().getBoolean("keep-inventory") == true) { if (player.hasPermission("pvprestore.keep.xp")) { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } - if (plugin.getConfig().getBoolean("vault.enabled") == true) { + if (plugin.getConfig().getBoolean("vault.enabled") == true && killer != null) { moneySteal(event); } player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory and " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); event.setDroppedExp(0); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP and inventory was saved!"); } ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } else { player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory has been saved."); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their inventory was saved!"); } - if (plugin.getConfig().getBoolean("vault.enabled") == true) { + if (plugin.getConfig().getBoolean("vault.enabled") == true && killer != null) { moneySteal(event); } ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } } else { player.sendMessage(ChatColor.RED + "Your death was not player related, so your inventory and XP have dropped where you died."); } } else { player.sendMessage(ChatColor.RED + "Your death was not player related, so your inventory and XP have dropped where you died."); } } @EventHandler public void onPlayerRespawn(PlayerRespawnEvent event) { if(items.containsKey(event.getPlayer())){ event.getPlayer().getInventory().clear(); event.getPlayer().getInventory().setContents(items.get(event.getPlayer())); items.remove(event.getPlayer()); } if(armor.containsKey(event.getPlayer()) && armor.size() != 0) { event.getPlayer().getInventory().setArmorContents(armor.get(event.getPlayer())); armor.remove(event.getPlayer()); } } @EventHandler public void onPlayerQuit(PlayerQuitEvent event) { if (event.getPlayer().isDead()) { if(items.containsKey(event.getPlayer())){ event.getPlayer().getInventory().clear(); event.getPlayer().getInventory().setContents(items.get(event.getPlayer())); items.remove(event.getPlayer()); } if(armor.containsKey(event.getPlayer()) && armor.size() != 0) { event.getPlayer().getInventory().setArmorContents(armor.get(event.getPlayer())); armor.remove(event.getPlayer()); } } } public void onPlayerKick(PlayerKickEvent event) { if (event.getPlayer().isDead()) { if(items.containsKey(event.getPlayer())){ event.getPlayer().getInventory().clear(); event.getPlayer().getInventory().setContents(items.get(event.getPlayer())); items.remove(event.getPlayer()); } if(armor.containsKey(event.getPlayer()) && armor.size() != 0) { event.getPlayer().getInventory().setArmorContents(armor.get(event.getPlayer())); armor.remove(event.getPlayer()); } } } @SuppressWarnings("static-access") public void moneySteal(PlayerDeathEvent event) { Player player = null; if (event.getEntity() instanceof Player) { player = event.getEntity(); } Player killer = player.getKiller(); if (killer != null) { double r = plugin.econ.getBalance(player.getName()) * (plugin.getConfig().getInt("vault.money-to-steal") / 100.0); System.out.println(r); System.out.println(plugin.getConfig().getInt("vault.money-to-steal")); plugin.econ.depositPlayer(killer.getName(), r); plugin.econ.withdrawPlayer(player.getName(), r); DecimalFormat dFormat = new DecimalFormat(); String d = dFormat.format(r); killer.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "You stole " + ChatColor.RED + d + " " + plugin.econ.currencyNamePlural() + ChatColor.GREEN + " from " + ChatColor.RED + player.getName()); } } }
false
true
public void onPlayerDeath(PlayerDeathEvent event) { Player player = null; if (event.getEntity() instanceof Player) { player = event.getEntity(); } Player killer = player.getKiller(); if (killer != null || event.getEntity() instanceof CraftMyPet) { if (player.hasPermission("pvprestore.keep") && plugin.getConfig().getBoolean("keep-inventory") == true && plugin.getConfig().getBoolean("keep-xp") == true) { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory and " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); if (plugin.getConfig().getBoolean("vault.enabled") == true) { moneySteal(event); } event.setDroppedExp(0); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP and inventory was saved!"); } player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory and " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } else if ((player.hasPermission("pvprestore.keep.xp") || player.hasPermission("pvprestore.keep")) && plugin.getConfig().getBoolean("keep-xp") == true) { if (player.hasPermission("pvprestore.keep.inventory")) { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } if (plugin.getConfig().getBoolean("vault.enabled") == true) { moneySteal(event); } event.setDroppedExp(0); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP and inventory was saved!"); } ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } else { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } if (plugin.getConfig().getBoolean("vault.enabled") == true) { moneySteal(event); } player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP was saved!"); } event.setDroppedExp(0); } } else if ((player.hasPermission("pvprestore.keep.inventory") || player.hasPermission("pvprestore.keep")) && plugin.getConfig().getBoolean("keep-inventory") == true) { if (player.hasPermission("pvprestore.keep.xp")) { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } if (plugin.getConfig().getBoolean("vault.enabled") == true) { moneySteal(event); } player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory and " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); event.setDroppedExp(0); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP and inventory was saved!"); } ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } else { player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory has been saved."); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their inventory was saved!"); } if (plugin.getConfig().getBoolean("vault.enabled") == true) { moneySteal(event); } ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } } else { player.sendMessage(ChatColor.RED + "Your death was not player related, so your inventory and XP have dropped where you died."); } } else { player.sendMessage(ChatColor.RED + "Your death was not player related, so your inventory and XP have dropped where you died."); } }
public void onPlayerDeath(PlayerDeathEvent event) { Player player = null; if (event.getEntity() instanceof Player) { player = event.getEntity(); } Player killer = player.getKiller(); if (killer != null || event.getEntity() instanceof CraftMyPet) { if (player.hasPermission("pvprestore.keep") && plugin.getConfig().getBoolean("keep-inventory") == true && plugin.getConfig().getBoolean("keep-xp") == true) { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory and " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); if (plugin.getConfig().getBoolean("vault.enabled") == true && killer != null) { moneySteal(event); } event.setDroppedExp(0); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP and inventory was saved!"); } ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } else if ((player.hasPermission("pvprestore.keep.xp") || player.hasPermission("pvprestore.keep")) && plugin.getConfig().getBoolean("keep-xp") == true) { if (player.hasPermission("pvprestore.keep.inventory")) { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } if (plugin.getConfig().getBoolean("vault.enabled") == true && killer != null) { moneySteal(event); } event.setDroppedExp(0); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP and inventory was saved!"); } ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } else { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } if (plugin.getConfig().getBoolean("vault.enabled") == true && killer != null) { moneySteal(event); } player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP was saved!"); } event.setDroppedExp(0); } } else if ((player.hasPermission("pvprestore.keep.inventory") || player.hasPermission("pvprestore.keep")) && plugin.getConfig().getBoolean("keep-inventory") == true) { if (player.hasPermission("pvprestore.keep.xp")) { event.setKeepLevel(true); if (plugin.getConfig().getInt("xp-to-remove") < 100 && plugin.getConfig().getInt("xp-to-remove") >= 0) { player.setLevel((int) (player.getLevel() * ((100.0 - plugin.getConfig().getInt("xp-to-remove")) / 100.0))); } if (plugin.getConfig().getBoolean("vault.enabled") == true && killer != null) { moneySteal(event); } player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory and " + (100 - plugin.getConfig().getInt("xp-to-remove")) + "% of your XP has been saved."); event.setDroppedExp(0); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their XP and inventory was saved!"); } ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } else { player.sendMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.GREEN + "Your death was player related, so your inventory has been saved."); if (plugin.getConfig().getBoolean("death-message") == true) { event.setDeathMessage(ChatColor.YELLOW + "[PVP_Restore] " + ChatColor.RED + player.getName() + ChatColor.GREEN + " was killed by " + ChatColor.RED + killer.getName() + ChatColor.GREEN + ", and their inventory was saved!"); } if (plugin.getConfig().getBoolean("vault.enabled") == true && killer != null) { moneySteal(event); } ItemStack[] content = player.getInventory().getContents(); ItemStack[] content_armor = player.getInventory().getArmorContents(); armor.put(player, content_armor); items.put(player, content); player.getInventory().clear(); event.getDrops().clear(); } } else { player.sendMessage(ChatColor.RED + "Your death was not player related, so your inventory and XP have dropped where you died."); } } else { player.sendMessage(ChatColor.RED + "Your death was not player related, so your inventory and XP have dropped where you died."); } }
diff --git a/eclipse/server/plugins/com.liferay.ide.eclipse.server.tomcat.core/src/com/liferay/ide/eclipse/server/tomcat/core/PortalTomcatServer.java b/eclipse/server/plugins/com.liferay.ide.eclipse.server.tomcat.core/src/com/liferay/ide/eclipse/server/tomcat/core/PortalTomcatServer.java index 4e72be84d..395dd6f49 100644 --- a/eclipse/server/plugins/com.liferay.ide.eclipse.server.tomcat.core/src/com/liferay/ide/eclipse/server/tomcat/core/PortalTomcatServer.java +++ b/eclipse/server/plugins/com.liferay.ide.eclipse.server.tomcat.core/src/com/liferay/ide/eclipse/server/tomcat/core/PortalTomcatServer.java @@ -1,252 +1,252 @@ /******************************************************************************* * Copyright (c) 2003, 2011 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - Initial API and implementation * Greg Amerson <[email protected]> *******************************************************************************/ package com.liferay.ide.eclipse.server.tomcat.core; import com.liferay.ide.eclipse.core.util.CoreUtil; import com.liferay.ide.eclipse.project.core.util.ProjectUtil; import com.liferay.ide.eclipse.server.core.IPortalServer; import com.liferay.ide.eclipse.server.tomcat.core.util.PortalTomcatUtil; import java.net.MalformedURLException; import java.net.URL; import org.eclipse.core.resources.IFolder; import org.eclipse.core.resources.IProject; import org.eclipse.core.runtime.CoreException; import org.eclipse.core.runtime.IPath; import org.eclipse.core.runtime.IProgressMonitor; import org.eclipse.core.runtime.IStatus; import org.eclipse.core.runtime.Status; import org.eclipse.jst.server.tomcat.core.internal.ITomcatVersionHandler; import org.eclipse.jst.server.tomcat.core.internal.Messages; import org.eclipse.jst.server.tomcat.core.internal.TomcatConfiguration; import org.eclipse.jst.server.tomcat.core.internal.TomcatPlugin; import org.eclipse.jst.server.tomcat.core.internal.TomcatServer; import org.eclipse.osgi.util.NLS; import org.eclipse.wst.server.core.IModule; import org.eclipse.wst.server.core.IRuntime; import org.eclipse.wst.server.core.ServerUtil; @SuppressWarnings("restriction") public class PortalTomcatServer extends TomcatServer implements IPortalTomcatConstants, IPortalTomcatServer, IPortalTomcatServerWorkingCopy, IPortalServer { public PortalTomcatServer() { super(); } public String getAutoDeployDirectory() { return getAttribute(PROPERTY_AUTO_DEPLOY_DIR, "../deploy"); } public String getAutoDeployInterval() { return getAttribute(PROPERTY_AUTO_DEPLOY_INTERVAL, IPortalTomcatConstants.DEFAULT_AUTO_DEPLOY_INTERVAL); } public String getMemoryArgs() { return getAttribute(PROPERTY_MEMORY_ARGS, IPortalTomcatConstants.DEFAULT_MEMORY_ARGS); } public URL getPortalHomeUrl() { try { TomcatConfiguration config = getTomcatConfiguration(); if (config == null) return null; String url = "http://" + getServer().getHost(); int port = config.getMainPort().getPort(); port = ServerUtil.getMonitoredPort(getServer(), port, "web"); if (port != 80) url += ":" + port; return new URL(url); } catch (Exception ex) { return null; } } public IPortalTomcatConfiguration getPortalTomcatConfiguration() throws CoreException { return (IPortalTomcatConfiguration) getTomcatConfiguration(); } @Override public TomcatConfiguration getTomcatConfiguration() throws CoreException { if (configuration == null) { IFolder folder = getServer().getServerConfiguration(); if (folder == null || !folder.exists()) { String path = null; if (folder != null) { path = folder.getFullPath().toOSString(); IProject project = folder.getProject(); if (project != null && project.exists() && !project.isOpen()) throw new CoreException(new Status(IStatus.ERROR, TomcatPlugin.PLUGIN_ID, 0, NLS.bind( Messages.errorConfigurationProjectClosed, path, project.getName()), null)); } throw new CoreException(new Status(IStatus.ERROR, TomcatPlugin.PLUGIN_ID, 0, NLS.bind( Messages.errorNoConfiguration, path), null)); } String id = getServer().getServerType().getId(); if (id.indexOf("60") > 0) { configuration = new PortalTomcat60Configuration(folder); } try { ((IPortalTomcatConfiguration) configuration).load(folder, null); } catch (CoreException ce) { // ignore configuration = null; throw ce; } } return configuration; } @Override public ITomcatVersionHandler getTomcatVersionHandler() { ITomcatVersionHandler handler = super.getTomcatVersionHandler(); if (handler instanceof PortalTomcat60Handler) { ((PortalTomcat60Handler) handler).setCurrentServer(getServer()); } return handler; } public String getUserTimezone() { return getAttribute(PROPERTY_USER_TIMEZONE, IPortalTomcatConstants.DEFAULT_USER_TIMEZONE); } public URL getWebServicesListURL() { try { return new URL(getPortalHomeUrl(), "/tunnel-web/axis"); } catch (MalformedURLException e) { PortalTomcatPlugin.logError("Unable to get web services list URL", e); } return null; } @Override public void importRuntimeConfiguration(IRuntime runtime, IProgressMonitor monitor) throws CoreException { if (runtime == null) { configuration = null; return; } IPath path = runtime.getLocation().append("conf"); String id = getServer().getServerType().getId(); IFolder folder = getServer().getServerConfiguration(); if (id.indexOf("60") > 0) { configuration = new PortalTomcat60Configuration(folder); } if (path.toFile().exists()) { try { configuration.importFromPath(path, isTestEnvironment(), monitor); } catch (CoreException ce) { // ignore configuration = null; throw ce; } } } @Override public void modifyModules(IModule[] add, IModule[] remove, IProgressMonitor monitor) throws CoreException { // check if we are adding ext plugin then we need to turn off auto publishing if we are removing ext plugin // then we can re-enable publishing if it was previously set boolean addingExt = false; boolean removingExt = false; if (!CoreUtil.isNullOrEmpty(add)) { for (IModule m : add) { if (m.getProject() != null && ProjectUtil.isExtProject(m.getProject())) { addingExt = true; break; } } } else if (!CoreUtil.isNullOrEmpty(remove)) { for (IModule m : remove) { if (m.getProject() != null && ProjectUtil.isExtProject(m.getProject())) { removingExt = true; break; } } } // if (addingExt && !removingExt) { // int existingSetting = // getServer().getAttribute(Server.PROP_AUTO_PUBLISH_SETTING, Server.AUTO_PUBLISH_RESOURCE); // // if (existingSetting != Server.AUTO_PUBLISH_DISABLE) { // PortalTomcatUtil.displayToggleMessage( // "The Ext plugin Automatic publishing has been set to disabled since an Ext plugin has been added. This setting will be restored once the Ext plugin is removed.", // PortalTomcatPlugin.PREFERENCES_ADDED_EXT_PLUGIN_TOGGLE_KEY); // } // // IServerWorkingCopy wc = getServer().createWorkingCopy(); // wc.setAttribute(Server.PROP_AUTO_PUBLISH_SETTING, Server.AUTO_PUBLISH_DISABLE); // wc.setAttribute("last-" + Server.PROP_AUTO_PUBLISH_SETTING, existingSetting); // wc.save(true, monitor); // } if (!addingExt && removingExt) { PortalTomcatUtil.displayToggleMessage( - "Removing the Ext plugin will only update the metadata, it will not actually restore any changes made by the Ext plugin. To restore the server to its original state, use the \"Clean App Server\" action available in the project context menu.", + "Removing the Ext plugin will only update the metadata; it will not actually restore any changes made by the Ext plugin. To restore the server to its original state, use the \"Clean App Server\" action available in the project context menu.", PortalTomcatPlugin.PREFERENCES_REMOVE_EXT_PLUGIN_TOGGLE_KEY); } // int lastSetting = // getServer().getAttribute("last-" + Server.PROP_AUTO_PUBLISH_SETTING, Server.AUTO_PUBLISH_RESOURCE); // IServerWorkingCopy wc = getServer().createWorkingCopy(); // wc.setAttribute(Server.PROP_AUTO_PUBLISH_SETTING, lastSetting); // wc.save(true, monitor); // } super.modifyModules(add, remove, monitor); } public void setAutoDeployDirectory(String dir) { setAttribute(PROPERTY_AUTO_DEPLOY_DIR, dir); } public void setAutoDeployInterval(String interval) { setAttribute(PROPERTY_AUTO_DEPLOY_INTERVAL, interval); } @Override public void setDefaults(IProgressMonitor monitor) { super.setDefaults(monitor); setTestEnvironment(false); setDeployDirectory(IPortalTomcatConstants.DEFAULT_DEPLOYDIR); setSaveSeparateContextFiles(true); } public void setMemoryArgs(String memoryArgs) { setAttribute(PROPERTY_MEMORY_ARGS, memoryArgs); } public void setUserTimezone(String userTimezone) { setAttribute(PROPERTY_USER_TIMEZONE, userTimezone); } }
true
true
public void modifyModules(IModule[] add, IModule[] remove, IProgressMonitor monitor) throws CoreException { // check if we are adding ext plugin then we need to turn off auto publishing if we are removing ext plugin // then we can re-enable publishing if it was previously set boolean addingExt = false; boolean removingExt = false; if (!CoreUtil.isNullOrEmpty(add)) { for (IModule m : add) { if (m.getProject() != null && ProjectUtil.isExtProject(m.getProject())) { addingExt = true; break; } } } else if (!CoreUtil.isNullOrEmpty(remove)) { for (IModule m : remove) { if (m.getProject() != null && ProjectUtil.isExtProject(m.getProject())) { removingExt = true; break; } } } // if (addingExt && !removingExt) { // int existingSetting = // getServer().getAttribute(Server.PROP_AUTO_PUBLISH_SETTING, Server.AUTO_PUBLISH_RESOURCE); // // if (existingSetting != Server.AUTO_PUBLISH_DISABLE) { // PortalTomcatUtil.displayToggleMessage( // "The Ext plugin Automatic publishing has been set to disabled since an Ext plugin has been added. This setting will be restored once the Ext plugin is removed.", // PortalTomcatPlugin.PREFERENCES_ADDED_EXT_PLUGIN_TOGGLE_KEY); // } // // IServerWorkingCopy wc = getServer().createWorkingCopy(); // wc.setAttribute(Server.PROP_AUTO_PUBLISH_SETTING, Server.AUTO_PUBLISH_DISABLE); // wc.setAttribute("last-" + Server.PROP_AUTO_PUBLISH_SETTING, existingSetting); // wc.save(true, monitor); // } if (!addingExt && removingExt) { PortalTomcatUtil.displayToggleMessage( "Removing the Ext plugin will only update the metadata, it will not actually restore any changes made by the Ext plugin. To restore the server to its original state, use the \"Clean App Server\" action available in the project context menu.", PortalTomcatPlugin.PREFERENCES_REMOVE_EXT_PLUGIN_TOGGLE_KEY); } // int lastSetting = // getServer().getAttribute("last-" + Server.PROP_AUTO_PUBLISH_SETTING, Server.AUTO_PUBLISH_RESOURCE); // IServerWorkingCopy wc = getServer().createWorkingCopy(); // wc.setAttribute(Server.PROP_AUTO_PUBLISH_SETTING, lastSetting); // wc.save(true, monitor); // } super.modifyModules(add, remove, monitor); }
public void modifyModules(IModule[] add, IModule[] remove, IProgressMonitor monitor) throws CoreException { // check if we are adding ext plugin then we need to turn off auto publishing if we are removing ext plugin // then we can re-enable publishing if it was previously set boolean addingExt = false; boolean removingExt = false; if (!CoreUtil.isNullOrEmpty(add)) { for (IModule m : add) { if (m.getProject() != null && ProjectUtil.isExtProject(m.getProject())) { addingExt = true; break; } } } else if (!CoreUtil.isNullOrEmpty(remove)) { for (IModule m : remove) { if (m.getProject() != null && ProjectUtil.isExtProject(m.getProject())) { removingExt = true; break; } } } // if (addingExt && !removingExt) { // int existingSetting = // getServer().getAttribute(Server.PROP_AUTO_PUBLISH_SETTING, Server.AUTO_PUBLISH_RESOURCE); // // if (existingSetting != Server.AUTO_PUBLISH_DISABLE) { // PortalTomcatUtil.displayToggleMessage( // "The Ext plugin Automatic publishing has been set to disabled since an Ext plugin has been added. This setting will be restored once the Ext plugin is removed.", // PortalTomcatPlugin.PREFERENCES_ADDED_EXT_PLUGIN_TOGGLE_KEY); // } // // IServerWorkingCopy wc = getServer().createWorkingCopy(); // wc.setAttribute(Server.PROP_AUTO_PUBLISH_SETTING, Server.AUTO_PUBLISH_DISABLE); // wc.setAttribute("last-" + Server.PROP_AUTO_PUBLISH_SETTING, existingSetting); // wc.save(true, monitor); // } if (!addingExt && removingExt) { PortalTomcatUtil.displayToggleMessage( "Removing the Ext plugin will only update the metadata; it will not actually restore any changes made by the Ext plugin. To restore the server to its original state, use the \"Clean App Server\" action available in the project context menu.", PortalTomcatPlugin.PREFERENCES_REMOVE_EXT_PLUGIN_TOGGLE_KEY); } // int lastSetting = // getServer().getAttribute("last-" + Server.PROP_AUTO_PUBLISH_SETTING, Server.AUTO_PUBLISH_RESOURCE); // IServerWorkingCopy wc = getServer().createWorkingCopy(); // wc.setAttribute(Server.PROP_AUTO_PUBLISH_SETTING, lastSetting); // wc.save(true, monitor); // } super.modifyModules(add, remove, monitor); }